diff options
Diffstat (limited to 'drivers/net/ethernet')
406 files changed, 15143 insertions, 6565 deletions
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index ecdea58e6a21..2227c83a4862 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1547,9 +1547,8 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_msglevel = netdev_set_msglevel, }; - #ifdef MODULE -void cleanup_module(void) +static void __exit corkscrew_exit_module(void) { while (!list_empty(&root_corkscrew_dev)) { struct net_device *dev; @@ -1563,4 +1562,5 @@ void cleanup_module(void) free_netdev(dev); } } +module_exit(corkscrew_exit_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index aead145dd91d..4a1b368ca7e6 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -129,6 +129,7 @@ source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/mucse/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 998dd628b202..2e18df8ca8ec 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ +obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 433a646e9831..75893c90a0a1 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -137,11 +137,11 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth) for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) airoha_fe_set(eth, REG_GDM_FWD_CFG(p), - GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | - GDM_DROP_CRC_ERR); + GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK | + GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK); - airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, - FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); + airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK, + FIELD_PREP(CDM_VLAN_MASK, 0x8100)); airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); } @@ -297,8 +297,11 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth) int q; all_rsv = airoha_fe_get_pse_all_rsv(eth); - /* hw misses PPE2 oq rsv */ - all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]; + if (airoha_ppe_is_enabled(eth, 1)) { + /* hw misses PPE2 oq rsv */ + all_rsv += PSE_RSV_PAGES * + pse_port_num_queues[FE_PSE_PORT_PPE2]; + } airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); /* CMD1 */ @@ -335,13 +338,17 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth) for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, PSE_QUEUE_RSV_PAGES); - /* PPE2 */ - for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { - if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, - PSE_QUEUE_RSV_PAGES); - else - airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); + if (airoha_ppe_is_enabled(eth, 1)) { + /* PPE2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, + q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, + q, 0); + } } /* GMD4 */ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) @@ -396,46 +403,46 @@ static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) { /* CDM1_CRSN_QSEL */ - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), CDM_CRSN_QSEL_Q6)); - airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), - CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), - FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), CDM_CRSN_QSEL_Q1)); /* CDM2_CRSN_QSEL */ - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22), CDM_CRSN_QSEL_Q1)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24), CDM_CRSN_QSEL_Q6)); - airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), - CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), - FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2), + CDM_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25), CDM_CRSN_QSEL_Q1)); } @@ -455,18 +462,18 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_wr(eth, REG_FE_PCE_CFG, PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); /* set vip queue selection to ring 1 */ - airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, - FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, - FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK, + FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK, + FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4)); /* set GDM4 source interface offset to 8 */ - airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, - GDM4_SPORT_OFF2_MASK | - GDM4_SPORT_OFF1_MASK | - GDM4_SPORT_OFF0_MASK, - FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | - FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | - FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); + airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4), + GDM_SPORT_OFF2_MASK | + GDM_SPORT_OFF1_MASK | + GDM_SPORT_OFF0_MASK, + FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) | + FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) | + FIELD_PREP(GDM_SPORT_OFF0_MASK, 8)); /* set PSE Page as 128B */ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, @@ -492,8 +499,8 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_GDM_MISC_CFG, GDM2_RDM_ACK_WAIT_PREF_MASK | GDM2_CHN_VLD_MODE_MASK); - airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, - FIELD_PREP(CDM2_OAM_QSEL_MASK, 15)); + airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK, + FIELD_PREP(CDM_OAM_QSEL_MASK, 15)); /* init fragment and assemble Force Port */ /* NPU Core-3, NPU Bridge Channel-3 */ @@ -507,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth) FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); - airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); - airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK); airoha_fe_crsn_qsel_init(eth); @@ -516,7 +523,7 @@ static int airoha_fe_init(struct airoha_eth *eth) airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); /* default aging mode for mbi unlock issue */ - airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, + airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2), MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); @@ -524,25 +531,6 @@ static int airoha_fe_init(struct airoha_eth *eth) /* disable IFC by default */ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) | - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1)); - airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1), - FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) | - FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2)); - /* enable 1:N vlan action, init vlan table */ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); @@ -904,19 +892,13 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, DMA_TO_DEVICE); - memset(e, 0, sizeof(*e)); + e->dma_addr = 0; + list_add_tail(&e->list, &q->tx_list); + WRITE_ONCE(desc->msg0, 0); WRITE_ONCE(desc->msg1, 0); q->queued--; - /* completion ring can report out-of-order indexes if hw QoS - * is enabled and packets with different priority are queued - * to same DMA ring. Take into account possible out-of-order - * reports incrementing DMA ring tail pointer - */ - while (q->tail != q->head && !q->entry[q->tail].dma_addr) - q->tail = (q->tail + 1) % q->ndesc; - if (skb) { u16 queue = skb_get_queue_mapping(skb); struct netdev_queue *txq; @@ -961,6 +943,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, q->ndesc = size; q->qdma = qdma; q->free_thr = 1 + MAX_SKB_FRAGS; + INIT_LIST_HEAD(&q->tx_list); q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), GFP_KERNEL); @@ -973,9 +956,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, return -ENOMEM; for (i = 0; i < q->ndesc; i++) { - u32 val; + u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); - val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); + list_add_tail(&q->entry[i].list, &q->tx_list); WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); } @@ -985,9 +968,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q, airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_CPU_IDX_MASK, 0)); airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, - FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_DMA_IDX_MASK, 0)); return 0; } @@ -1043,17 +1026,21 @@ static int airoha_qdma_init_tx(struct airoha_qdma *qdma) static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) { struct airoha_eth *eth = q->qdma->eth; + int i; spin_lock_bh(&q->lock); - while (q->queued) { - struct airoha_queue_entry *e = &q->entry[q->tail]; + for (i = 0; i < q->ndesc; i++) { + struct airoha_queue_entry *e = &q->entry[i]; + + if (!e->dma_addr) + continue; dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, DMA_TO_DEVICE); dev_kfree_skb_any(e->skb); + e->dma_addr = 0; e->skb = NULL; - - q->tail = (q->tail + 1) % q->ndesc; + list_add_tail(&e->list, &q->tx_list); q->queued--; } spin_unlock_bh(&q->lock); @@ -1387,8 +1374,7 @@ static int airoha_hw_init(struct platform_device *pdev, int err, i; /* disable xsi */ - err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), - eth->xsi_rsts); + err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts); if (err) return err; @@ -1695,19 +1681,23 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) return 0; } -static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) +static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) { - u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; struct airoha_eth *eth = port->qdma->eth; - u32 chan = port->id == 3 ? 4 : 0; + u32 val, pse_port, chan, nbq; + int src_port; /* Forward the traffic to the proper GDM port */ + pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 + : FE_PSE_PORT_GDM4; airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); - airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); + airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK); /* Enable GDM2 loopback */ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); + + chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0; airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, FIELD_PREP(LPBK_CHAN_MASK, chan) | @@ -1722,36 +1712,36 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); - if (port->id == 3) { - /* FIXME: handle XSI_PCE1_PORT */ - airoha_fe_rmw(eth, REG_FE_WAN_PORT, - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, - FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); - airoha_fe_rmw(eth, - REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), - SP_CPORT_PCIE0_MASK, - FIELD_PREP(SP_CPORT_PCIE0_MASK, - FE_PSE_PORT_CDM2)); - } else { - /* FIXME: handle XSI_USB_PORT */ + /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */ + nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; + src_port = eth->soc->ops.get_src_port_id(port, nbq); + if (src_port < 0) + return src_port; + + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, src_port)); + val = src_port & SP_CPORT_DFT_MASK; + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)), + SP_CPORT_MASK(val), + FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val))); + + if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth)) airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, FC_ID_OF_SRC_PORT24_MASK, FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); - airoha_fe_rmw(eth, REG_FE_WAN_PORT, - WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, - FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); - airoha_fe_rmw(eth, - REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), - SP_CPORT_ETH_MASK, - FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); - } + + return 0; } static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->qdma->eth; - u32 pse_port; + struct airoha_qdma *qdma = port->qdma; + struct airoha_eth *eth = qdma->eth; + u32 pse_port, fe_cpu_port; + u8 ppe_id; airoha_set_macaddr(port, dev->dev_addr); @@ -1759,18 +1749,37 @@ static int airoha_dev_init(struct net_device *dev) case 3: case 4: /* If GDM2 is active we can't enable loopback */ - if (!eth->ports[1]) - airhoha_set_gdm2_loopback(port); + if (!eth->ports[1]) { + int err; + + err = airhoha_set_gdm2_loopback(port); + if (err) + return err; + } fallthrough; case 2: - pse_port = FE_PSE_PORT_PPE2; - break; - default: + if (airoha_ppe_is_enabled(eth, 1)) { + /* For PPE2 always use secondary cpu port. */ + fe_cpu_port = FE_PSE_PORT_CDM2; + pse_port = FE_PSE_PORT_PPE2; + break; + } + fallthrough; + default: { + u8 qdma_id = qdma - ð->qdma[0]; + + /* For PPE1 select cpu port according to the running QDMA. */ + fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1; pse_port = FE_PSE_PORT_PPE1; break; } + } airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); + ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0; + airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id), + DFT_CPORT_MASK(port->id), + fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id))); return 0; } @@ -1873,18 +1882,20 @@ static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) #endif } -static bool airoha_dev_tx_queue_busy(struct airoha_queue *q, u32 nr_frags) +static int airoha_get_fe_port(struct airoha_gdm_port *port) { - u32 tail = q->tail <= q->head ? q->tail + q->ndesc : q->tail; - u32 index = q->head + nr_frags; + struct airoha_qdma *qdma = port->qdma; + struct airoha_eth *eth = qdma->eth; - /* completion napi can free out-of-order tx descriptors if hw QoS is - * enabled and packets with different priorities are queued to the same - * DMA ring. Take into account possible out-of-order reports checking - * if the tx queue is full using circular buffer head/tail pointers - * instead of the number of queued packets. - */ - return index >= tail; + switch (eth->soc->version) { + case 0x7583: + return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3 + : port->id; + case 0x7581: + default: + return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4 + : port->id; + } } static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, @@ -1893,8 +1904,10 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_qdma *qdma = port->qdma; u32 nr_frags, tag, msg0, msg1, len; + struct airoha_queue_entry *e; struct netdev_queue *txq; struct airoha_queue *q; + LIST_HEAD(tx_list); void *data; int i, qid; u16 index; @@ -1927,7 +1940,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, } } - fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + fport = airoha_get_fe_port(port); msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); @@ -1940,7 +1953,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, txq = netdev_get_tx_queue(dev, qid); nr_frags = 1 + skb_shinfo(skb)->nr_frags; - if (airoha_dev_tx_queue_busy(q, nr_frags)) { + if (q->queued + nr_frags >= q->ndesc) { /* not enough space in the queue */ netif_tx_stop_queue(txq); spin_unlock_bh(&q->lock); @@ -1949,11 +1962,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, len = skb_headlen(skb); data = skb->data; - index = q->head; + + e = list_first_entry(&q->tx_list, struct airoha_queue_entry, + list); + index = e - q->entry; for (i = 0; i < nr_frags; i++) { struct airoha_qdma_desc *desc = &q->desc[index]; - struct airoha_queue_entry *e = &q->entry[index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr; u32 val; @@ -1963,7 +1978,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(dev->dev.parent, addr))) goto error_unmap; - index = (index + 1) % q->ndesc; + list_move_tail(&e->list, &tx_list); + e->skb = i ? NULL : skb; + e->dma_addr = addr; + e->dma_len = len; + + e = list_first_entry(&q->tx_list, struct airoha_queue_entry, + list); + index = e - q->entry; val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); if (i < nr_frags - 1) @@ -1976,15 +1998,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); - e->skb = i ? NULL : skb; - e->dma_addr = addr; - e->dma_len = len; - data = skb_frag_address(frag); len = skb_frag_size(frag); } - - q->head = index; q->queued += i; skb_tx_timestamp(skb); @@ -1993,7 +2009,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, if (netif_xmit_stopped(txq) || !netdev_xmit_more()) airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, - FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); if (q->ndesc - q->queued < q->free_thr) netif_tx_stop_queue(txq); @@ -2003,10 +2019,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; error_unmap: - for (i--; i >= 0; i--) { - index = (q->head + i) % q->ndesc; - dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, - q->entry[index].dma_len, DMA_TO_DEVICE); + while (!list_empty(&tx_list)) { + e = list_first_entry(&tx_list, struct airoha_queue_entry, + list); + dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + e->dma_addr = 0; + list_move_tail(&e->list, &q->tx_list); } spin_unlock_bh(&q->lock); @@ -2036,8 +2055,12 @@ static void airoha_ethtool_get_mac_stats(struct net_device *dev, airoha_update_hw_stats(port); do { start = u64_stats_fetch_begin(&port->stats.syncp); + stats->FramesTransmittedOK = port->stats.tx_ok_pkts; + stats->OctetsTransmittedOK = port->stats.tx_ok_bytes; stats->MulticastFramesXmittedOK = port->stats.tx_multicast; stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; + stats->FramesReceivedOK = port->stats.rx_ok_pkts; + stats->OctetsReceivedOK = port->stats.rx_ok_bytes; stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; } while (u64_stats_fetch_retry(&port->stats.syncp, start)); } @@ -2780,6 +2803,7 @@ static const struct ethtool_ops airoha_ethtool_ops = { .get_drvinfo = airoha_ethtool_get_drvinfo, .get_eth_mac_stats = airoha_ethtool_get_mac_stats, .get_rmon_stats = airoha_ethtool_get_rmon_stats, + .get_link = ethtool_op_get_link, }; static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) @@ -2917,6 +2941,7 @@ free_metadata_dst: static int airoha_probe(struct platform_device *pdev) { + struct reset_control_bulk_data *xsi_rsts; struct device_node *np; struct airoha_eth *eth; int i, err; @@ -2925,6 +2950,10 @@ static int airoha_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + eth->soc = of_device_get_match_data(&pdev->dev); + if (!eth->soc) + return -EINVAL; + eth->dev = &pdev->dev; err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); @@ -2949,13 +2978,18 @@ static int airoha_probe(struct platform_device *pdev) return err; } - eth->xsi_rsts[0].id = "xsi-mac"; - eth->xsi_rsts[1].id = "hsi0-mac"; - eth->xsi_rsts[2].id = "hsi1-mac"; - eth->xsi_rsts[3].id = "hsi-mac"; - eth->xsi_rsts[4].id = "xfp-mac"; + xsi_rsts = devm_kcalloc(eth->dev, + eth->soc->num_xsi_rsts, sizeof(*xsi_rsts), + GFP_KERNEL); + if (!xsi_rsts) + return -ENOMEM; + + eth->xsi_rsts = xsi_rsts; + for (i = 0; i < eth->soc->num_xsi_rsts; i++) + eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i]; + err = devm_reset_control_bulk_get_exclusive(eth->dev, - ARRAY_SIZE(eth->xsi_rsts), + eth->soc->num_xsi_rsts, eth->xsi_rsts); if (err) { dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); @@ -3043,8 +3077,90 @@ static void airoha_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); } +static const char * const en7581_xsi_rsts_names[] = { + "xsi-mac", + "hsi0-mac", + "hsi1-mac", + "hsi-mac", + "xfp-mac", +}; + +static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq) +{ + switch (port->id) { + case 3: + /* 7581 SoC supports PCIe serdes on GDM3 port */ + if (nbq == 4) + return HSGMII_LAN_7581_PCIE0_SRCPORT; + if (nbq == 5) + return HSGMII_LAN_7581_PCIE1_SRCPORT; + break; + case 4: + /* 7581 SoC supports eth and usb serdes on GDM4 port */ + if (!nbq) + return HSGMII_LAN_7581_ETH_SRCPORT; + if (nbq == 1) + return HSGMII_LAN_7581_USB_SRCPORT; + break; + default: + break; + } + + return -EINVAL; +} + +static const char * const an7583_xsi_rsts_names[] = { + "xsi-mac", + "hsi0-mac", + "hsi1-mac", + "xfp-mac", +}; + +static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq) +{ + switch (port->id) { + case 3: + /* 7583 SoC supports eth serdes on GDM3 port */ + if (!nbq) + return HSGMII_LAN_7583_ETH_SRCPORT; + break; + case 4: + /* 7583 SoC supports PCIe and USB serdes on GDM4 port */ + if (!nbq) + return HSGMII_LAN_7583_PCIE_SRCPORT; + if (nbq == 1) + return HSGMII_LAN_7583_USB_SRCPORT; + break; + default: + break; + } + + return -EINVAL; +} + +static const struct airoha_eth_soc_data en7581_soc_data = { + .version = 0x7581, + .xsi_rsts_names = en7581_xsi_rsts_names, + .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names), + .num_ppe = 2, + .ops = { + .get_src_port_id = airoha_en7581_get_src_port_id, + }, +}; + +static const struct airoha_eth_soc_data an7583_soc_data = { + .version = 0x7583, + .xsi_rsts_names = an7583_xsi_rsts_names, + .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names), + .num_ppe = 1, + .ops = { + .get_src_port_id = airoha_an7583_get_src_port_id, + }, +}; + static const struct of_device_id of_airoha_match[] = { - { .compatible = "airoha,en7581-eth" }, + { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data }, + { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_airoha_match); diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h index cd13c1c1224f..fbbc58133364 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.h +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -21,7 +21,6 @@ #define AIROHA_MAX_NUM_IRQ_BANKS 4 #define AIROHA_MAX_DSA_PORTS 7 #define AIROHA_MAX_NUM_RSTS 3 -#define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 9216 #define AIROHA_MAX_PACKET_SIZE 2048 #define AIROHA_NUM_QOS_CHANNELS 4 @@ -48,20 +47,9 @@ #define QDMA_METER_IDX(_n) ((_n) & 0xff) #define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) -#define PPE_NUM 2 -#define PPE1_SRAM_NUM_ENTRIES (8 * 1024) -#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) -#ifdef CONFIG_NET_AIROHA_FLOW_STATS -#define PPE1_STATS_NUM_ENTRIES (4 * 1024) -#else -#define PPE1_STATS_NUM_ENTRIES 0 -#endif /* CONFIG_NET_AIROHA_FLOW_STATS */ -#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES) -#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES) -#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES) +#define PPE_SRAM_NUM_ENTRIES (8 * 1024) +#define PPE_STATS_NUM_ENTRIES (4 * 1024) #define PPE_DRAM_NUM_ENTRIES (16 * 1024) -#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) -#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) #define PPE_ENTRY_SIZE 80 #define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10)) @@ -79,10 +67,16 @@ enum { }; enum { - HSGMII_LAN_PCIE0_SRCPORT = 0x16, - HSGMII_LAN_PCIE1_SRCPORT, - HSGMII_LAN_ETH_SRCPORT, - HSGMII_LAN_USB_SRCPORT, + HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16, + HSGMII_LAN_7581_PCIE1_SRCPORT, + HSGMII_LAN_7581_ETH_SRCPORT, + HSGMII_LAN_7581_USB_SRCPORT, +}; + +enum { + HSGMII_LAN_7583_ETH_SRCPORT = 0x16, + HSGMII_LAN_7583_PCIE_SRCPORT = 0x18, + HSGMII_LAN_7583_USB_SRCPORT, }; enum { @@ -111,6 +105,13 @@ enum { CRSN_25 = 0x19, }; +enum airoha_gdm_index { + AIROHA_GDM1_IDX = 1, + AIROHA_GDM2_IDX = 2, + AIROHA_GDM3_IDX = 3, + AIROHA_GDM4_IDX = 4, +}; + enum { FE_PSE_PORT_CDM1, FE_PSE_PORT_GDM1, @@ -168,7 +169,10 @@ enum trtcm_param { struct airoha_queue_entry { union { void *buf; - struct sk_buff *skb; + struct { + struct list_head list; + struct sk_buff *skb; + }; }; dma_addr_t dma_addr; u16 dma_len; @@ -192,6 +196,8 @@ struct airoha_queue { struct napi_struct napi; struct page_pool *page_pool; struct sk_buff *skb; + + struct list_head tx_list; }; struct airoha_tx_irq_queue { @@ -554,7 +560,7 @@ struct airoha_ppe { struct rhashtable l2_flows; struct hlist_head *foe_flow; - u16 foe_check_time[PPE_NUM_ENTRIES]; + u16 *foe_check_time; struct airoha_foe_stats *foe_stats; dma_addr_t foe_stats_dma; @@ -562,9 +568,21 @@ struct airoha_ppe { struct dentry *debugfs_dir; }; +struct airoha_eth_soc_data { + u16 version; + const char * const *xsi_rsts_names; + int num_xsi_rsts; + int num_ppe; + struct { + int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq); + } ops; +}; + struct airoha_eth { struct device *dev; + const struct airoha_eth_soc_data *soc; + unsigned long state; void __iomem *fe_regs; @@ -574,7 +592,7 @@ struct airoha_eth { struct rhashtable flow_table; struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + struct reset_control_bulk_data *xsi_rsts; struct net_device *napi_dev; @@ -617,15 +635,27 @@ static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port) return port->id == 1; } +static inline bool airoha_is_7581(struct airoha_eth *eth) +{ + return eth->soc->version == 0x7581; +} + +static inline bool airoha_is_7583(struct airoha_eth *eth) +{ + return eth->soc->version == 0x7583; +} + bool airoha_is_valid_gdm_port(struct airoha_eth *eth, struct airoha_gdm_port *port); +bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index); void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, u16 hash, bool rx_wlan); int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data); int airoha_ppe_init(struct airoha_eth *eth); void airoha_ppe_deinit(struct airoha_eth *eth); void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port); +u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe); struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, u32 hash); void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 8c883f2b2d36..68b7f9684dc7 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -16,6 +16,8 @@ #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" #define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin" +#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin" +#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin" #define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000 #define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000 #define NPU_DUMP_SIZE 512 @@ -103,6 +105,16 @@ enum { QDMA_WAN_PON_XDSL, }; +struct airoha_npu_fw { + const char *name; + int max_size; +}; + +struct airoha_npu_soc_data { + struct airoha_npu_fw fw_rv32; + struct airoha_npu_fw fw_data; +}; + #define MBOX_MSG_FUNC_ID GENMASK(14, 11) #define MBOX_MSG_STATIC_BUF BIT(5) #define MBOX_MSG_STATUS GENMASK(4, 2) @@ -182,49 +194,53 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, return ret; } -static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, - struct resource *res) +static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr, + const struct airoha_npu_fw *fw_info) { const struct firmware *fw; - void __iomem *addr; int ret; - ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev); + ret = request_firmware(&fw, fw_info->name, dev); if (ret) return ret == -ENOENT ? -EPROBE_DEFER : ret; - if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) { + if (fw->size > fw_info->max_size) { dev_err(dev, "%s: fw size too overlimit (%zu)\n", - NPU_EN7581_FIRMWARE_RV32, fw->size); + fw_info->name, fw->size); ret = -E2BIG; goto out; } - addr = devm_ioremap_resource(dev, res); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto out; - } - memcpy_toio(addr, fw->data, fw->size); +out: release_firmware(fw); - ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev); - if (ret) - return ret == -ENOENT ? -EPROBE_DEFER : ret; + return ret; +} - if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) { - dev_err(dev, "%s: fw size too overlimit (%zu)\n", - NPU_EN7581_FIRMWARE_DATA, fw->size); - ret = -E2BIG; - goto out; - } +static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, + struct resource *res) +{ + const struct airoha_npu_soc_data *soc; + void __iomem *addr; + int ret; - memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size); -out: - release_firmware(fw); + soc = of_device_get_match_data(dev); + if (!soc) + return -EINVAL; - return ret; + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); + + /* Load rv32 npu firmware */ + ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32); + if (ret) + return ret; + + /* Load data npu firmware */ + return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM, + &soc->fw_data); } static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance) @@ -597,8 +613,31 @@ void airoha_npu_put(struct airoha_npu *npu) } EXPORT_SYMBOL_GPL(airoha_npu_put); +static const struct airoha_npu_soc_data en7581_npu_soc_data = { + .fw_rv32 = { + .name = NPU_EN7581_FIRMWARE_RV32, + .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE, + }, + .fw_data = { + .name = NPU_EN7581_FIRMWARE_DATA, + .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE, + }, +}; + +static const struct airoha_npu_soc_data an7583_npu_soc_data = { + .fw_rv32 = { + .name = NPU_AN7583_FIRMWARE_RV32, + .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE, + }, + .fw_data = { + .name = NPU_AN7583_FIRMWARE_DATA, + .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE, + }, +}; + static const struct of_device_id of_airoha_npu_match[] = { - { .compatible = "airoha,en7581-npu" }, + { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data }, + { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_airoha_npu_match); @@ -737,6 +776,8 @@ module_platform_driver(airoha_npu_driver); MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA); MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32); +MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA); +MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); MODULE_DESCRIPTION("Airoha Network Processor Unit driver"); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index c0e17035db18..0caabb0c3aa0 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -32,9 +32,50 @@ static const struct rhashtable_params airoha_l2_flow_table_params = { .automatic_shrinking = true, }; -static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) +static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe) { - return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; + if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS)) + return -EOPNOTSUPP; + + if (airoha_is_7583(ppe->eth)) + return -EOPNOTSUPP; + + return PPE_STATS_NUM_ENTRIES; +} + +static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe) +{ + int num_stats = airoha_ppe_get_num_stats_entries(ppe); + + if (num_stats > 0) { + struct airoha_eth *eth = ppe->eth; + + num_stats = num_stats * eth->soc->num_ppe; + } + + return num_stats; +} + +static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe) +{ + struct airoha_eth *eth = ppe->eth; + + return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe; +} + +u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe) +{ + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + + return sram_num_entries + PPE_DRAM_NUM_ENTRIES; +} + +bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index) +{ + if (index >= eth->soc->num_ppe) + return false; + + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK; } static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) @@ -46,14 +87,22 @@ static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) static void airoha_ppe_hw_init(struct airoha_ppe *ppe) { - u32 sram_tb_size, sram_num_entries, dram_num_entries; + u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries; + u32 sram_tb_size, dram_num_entries; struct airoha_eth *eth = ppe->eth; - int i; + int i, sram_num_stats_entries; - sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry); dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); - for (i = 0; i < PPE_NUM; i++) { + sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe); + if (sram_num_stats_entries > 0) + sram_ppe_num_data_entries -= sram_num_stats_entries; + sram_ppe_num_data_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries); + + for (i = 0; i < eth->soc->num_ppe; i++) { int p; airoha_fe_wr(eth, REG_PPE_TB_BASE(i), @@ -85,10 +134,16 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK | PPE_TB_CFG_KEEPALIVE_MASK | PPE_TB_ENTRY_SIZE_MASK, FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | - FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) | + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_ppe_num_data_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); @@ -101,35 +156,6 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) FIELD_PREP(FP1_EGRESS_MTU_MASK, AIROHA_MAX_MTU)); } - - if (airoha_ppe2_is_enabled(eth)) { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } else { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } } static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) @@ -428,9 +454,11 @@ static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, return 0; } -static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) +static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe, + struct airoha_foe_entry *hwe) { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u32 hash, hv1, hv2, hv3; switch (type) { @@ -468,25 +496,31 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) case PPE_PKT_TYPE_IPV6_6RD: default: WARN_ON_ONCE(1); - return PPE_HASH_MASK; + return ppe_hash_mask; } hash = (hv1 & hv2) | ((~hv1) & hv3); hash = (hash >> 24) | ((hash & 0xffffff) << 8); hash ^= hv1 ^ hv2 ^ hv3; hash ^= hash >> 16; - hash &= PPE_NUM_ENTRIES - 1; + hash &= ppe_hash_mask; return hash; } -static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) +static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, + u32 hash, u32 *index) { - if (!airoha_ppe2_is_enabled(ppe->eth)) - return hash; + int ppe_num_stats_entries; + + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return ppe_num_stats_entries; - return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES - : hash; + *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES + : hash; + + return 0; } static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, @@ -500,9 +534,13 @@ static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, struct airoha_npu *npu) { - int i; + int i, ppe_num_stats_entries; - for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + for (i = 0; i < ppe_num_stats_entries; i++) airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); } @@ -513,10 +551,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); u32 index, pse_port, val, *data, *ib2, *meter; + int ppe_num_stats_entries; u8 nbq; - index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); - if (index >= PPE_STATS_NUM_ENTRIES) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; if (type == PPE_PKT_TYPE_BRIDGE) { @@ -557,17 +602,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, static struct airoha_foe_entry * airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + lockdep_assert_held(&ppe_lock); - if (hash < PPE_SRAM_NUM_ENTRIES) { + if (hash < sram_num_entries) { u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; struct airoha_eth *eth = ppe->eth; - bool ppe2; u32 val; int i; - ppe2 = airoha_ppe2_is_enabled(ppe->eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | PPE_SRAM_CTRL_REQ_MASK); @@ -577,7 +622,8 @@ airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) REG_PPE_RAM_CTRL(ppe2))) return NULL; - for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) + for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe); + i++) hwe[i] = airoha_fe_rr(eth, REG_PPE_RAM_ENTRY(ppe2, i)); } @@ -614,10 +660,32 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); } +static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash) +{ + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; + u32 *ptr = (u32 *)hwe, val; + int i; + + for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++) + airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]); + + wmb(); + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | + PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK); + + return read_poll_timeout_atomic(airoha_fe_rr, val, + val & PPE_SRAM_CTRL_ACK_MASK, + 10, 100, false, ppe->eth, + REG_PPE_RAM_CTRL(ppe2)); +} + static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *e, u32 hash, bool rx_wlan) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); u32 ts = airoha_ppe_get_timestamp(ppe); struct airoha_eth *eth = ppe->eth; @@ -642,14 +710,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, if (!rx_wlan) airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); - if (hash < PPE_SRAM_NUM_ENTRIES) { - dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); - bool ppe2 = airoha_ppe2_is_enabled(eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; - - err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), - hash, ppe2); - } + if (hash < sram_num_entries) + err = airoha_ppe_foe_commit_sram_entry(ppe, hash); unlock: rcu_read_unlock(); @@ -772,7 +834,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, if (state == AIROHA_FOE_STATE_BIND) goto unlock; - index = airoha_ppe_foe_get_entry_hash(hwe); + index = airoha_ppe_foe_get_entry_hash(ppe, hwe); hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { if (e->type == FLOW_TYPE_L2_SUBFLOW) { state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); @@ -832,7 +894,7 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, if (type == PPE_PKT_TYPE_BRIDGE) return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); - hash = airoha_ppe_foe_get_entry_hash(&e->data); + hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data); e->type = FLOW_TYPE_L4; e->hash = 0xffff; @@ -1158,11 +1220,19 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth, void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, struct airoha_foe_stats64 *stats) { - u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); struct airoha_eth *eth = ppe->eth; + int ppe_num_stats_entries; struct airoha_npu *npu; + u32 index; - if (index >= PPE_STATS_NUM_ENTRIES) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; rcu_read_lock(); @@ -1225,20 +1295,22 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth, return -EOPNOTSUPP; } -static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, - struct airoha_npu *npu) +static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe) { - int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe; + int i, err = 0; - if (airoha_ppe2_is_enabled(ppe->eth)) - sram_num_entries = sram_num_entries / 2; + for (i = 0; i < sram_num_entries; i++) { + int err; - for (i = 0; i < sram_num_entries; i++) memset(&hwe[i], 0, sizeof(*hwe)); + err = airoha_ppe_foe_commit_sram_entry(ppe, i); + if (err) + break; + } - return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, - PPE_SRAM_NUM_ENTRIES); + return err; } static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) @@ -1257,7 +1329,7 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) { struct airoha_npu *npu = airoha_ppe_npu_get(eth); struct airoha_ppe *ppe = eth->ppe; - int err; + int err, ppe_num_stats_entries; if (IS_ERR(npu)) return PTR_ERR(npu); @@ -1266,18 +1338,15 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) if (err) goto error_npu_put; - if (PPE_STATS_NUM_ENTRIES) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma, - PPE_STATS_NUM_ENTRIES); + ppe_num_stats_entries); if (err) goto error_npu_put; } airoha_ppe_hw_init(ppe); - err = airoha_ppe_flush_sram_entries(ppe, npu); - if (err) - goto error_npu_put; - airoha_ppe_foe_flow_stats_reset(ppe, npu); rcu_assign_pointer(eth->npu, npu); @@ -1313,9 +1382,10 @@ void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, u16 hash, bool rx_wlan) { struct airoha_ppe *ppe = dev->priv; + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u16 now, diff; - if (hash > PPE_HASH_MASK) + if (hash > ppe_hash_mask) return; now = (u16)jiffies; @@ -1405,8 +1475,9 @@ EXPORT_SYMBOL_GPL(airoha_ppe_put_dev); int airoha_ppe_init(struct airoha_eth *eth) { + int foe_size, err, ppe_num_stats_entries; + u32 ppe_num_entries; struct airoha_ppe *ppe; - int foe_size, err; ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); if (!ppe) @@ -1415,24 +1486,25 @@ int airoha_ppe_init(struct airoha_eth *eth) ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb; ppe->dev.ops.check_skb = airoha_ppe_check_skb; ppe->dev.priv = ppe; + ppe->eth = eth; + eth->ppe = ppe; - foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); + foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry); ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, GFP_KERNEL); if (!ppe->foe) return -ENOMEM; - ppe->eth = eth; - eth->ppe = ppe; - ppe->foe_flow = devm_kzalloc(eth->dev, - PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), + ppe_num_entries * sizeof(*ppe->foe_flow), GFP_KERNEL); if (!ppe->foe_flow) return -ENOMEM; - foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); - if (foe_size) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { + foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats); ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_stats_dma, GFP_KERNEL); @@ -1440,6 +1512,15 @@ int airoha_ppe_init(struct airoha_eth *eth) return -ENOMEM; } + ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries, + GFP_KERNEL); + if (!ppe->foe_check_time) + return -ENOMEM; + + err = airoha_ppe_flush_sram_entries(ppe); + if (err) + return err; + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); if (err) return err; diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c index 05a756233f6a..0112c41150bb 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -53,9 +53,10 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, [AIROHA_FOE_STATE_FIN] = "FIN", }; struct airoha_ppe *ppe = m->private; + u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); int i; - for (i = 0; i < PPE_NUM_ENTRIES; i++) { + for (i = 0; i < ppe_num_entries; i++) { const char *state_str, *type_str = "UNKNOWN"; void *src_addr = NULL, *dest_addr = NULL; u16 *src_port = NULL, *dest_port = NULL; diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index 69c5a143db8c..ed4e3407f4a0 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -23,6 +23,8 @@ #define GDM3_BASE 0x1100 #define GDM4_BASE 0x2500 +#define CDM_BASE(_n) \ + ((_n) == 2 ? CDM2_BASE : CDM1_BASE) #define GDM_BASE(_n) \ ((_n) == 4 ? GDM4_BASE : \ (_n) == 3 ? GDM3_BASE : \ @@ -109,30 +111,24 @@ #define PATN_DP_MASK GENMASK(31, 16) #define PATN_SP_MASK GENMASK(15, 0) -#define REG_CDM1_VLAN_CTRL CDM1_BASE -#define CDM1_VLAN_MASK GENMASK(31, 16) +#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n) +#define CDM_VLAN_MASK GENMASK(31, 16) -#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) -#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) +#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08) +#define CDM_OAM_QSEL_MASK GENMASK(31, 27) +#define CDM_VIP_QSEL_MASK GENMASK(24, 20) -#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) -#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) - -#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) -#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) -#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) - -#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) -#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ +#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2)) +#define CDM_CRSN_QSEL_REASON_MASK(_n) \ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) #define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) -#define GDM_DROP_CRC_ERR BIT(23) -#define GDM_IP4_CKSUM BIT(22) -#define GDM_TCP_CKSUM BIT(21) -#define GDM_UDP_CKSUM BIT(20) -#define GDM_STRIP_CRC BIT(16) +#define GDM_PAD_EN_MASK BIT(28) +#define GDM_DROP_CRC_ERR_MASK BIT(23) +#define GDM_IP4_CKSUM_MASK BIT(22) +#define GDM_TCP_CKSUM_MASK BIT(21) +#define GDM_UDP_CKSUM_MASK BIT(20) +#define GDM_STRIP_CRC_MASK BIT(16) #define GDM_UCFQ_MASK GENMASK(15, 12) #define GDM_BCFQ_MASK GENMASK(11, 8) #define GDM_MCFQ_MASK GENMASK(7, 4) @@ -156,6 +152,10 @@ #define LBK_CHAN_MODE_MASK BIT(1) #define LPBK_EN_MASK BIT(0) +#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20) +#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) + #define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) #define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28) @@ -168,10 +168,10 @@ #define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) #define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) -#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) +#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4) #define FE_STRICT_RFC2819_MODE_MASK BIT(31) -#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) -#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) +#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17) +#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16) #define FE_TX_MIB_ID_MASK GENMASK(15, 8) #define FE_RX_MIB_ID_MASK GENMASK(7, 0) @@ -214,6 +214,33 @@ #define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) #define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) +#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c) +#define GDM_SPORT_OFF2_MASK GENMASK(19, 16) +#define GDM_SPORT_OFF1_MASK GENMASK(15, 12) +#define GDM_SPORT_OFF0_MASK GENMASK(11, 8) + +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) + +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) + #define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200) #define PPE_GLO_CFG_BUSY_MASK BIT(31) #define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9) @@ -326,44 +353,6 @@ #define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374) -#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) -#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) -#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) -#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) - -#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) -#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) -#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) -#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) -#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) -#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) -#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) -#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) -#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) -#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) -#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) -#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) -#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) -#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) -#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) -#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) - -#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) -#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) -#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) - -#define REG_GDM3_FWD_CFG GDM3_BASE -#define GDM3_PAD_EN_MASK BIT(28) - -#define REG_GDM4_FWD_CFG GDM4_BASE -#define GDM4_PAD_EN_MASK BIT(28) -#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) - -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c) -#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) -#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) -#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) - #define REG_IP_FRAG_FP 0x2010 #define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) #define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) @@ -383,10 +372,8 @@ #define REG_MC_VLAN_DATA 0x2108 #define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2)) -#define SP_CPORT_PCIE1_MASK GENMASK(31, 28) -#define SP_CPORT_PCIE0_MASK GENMASK(27, 24) -#define SP_CPORT_USB_MASK GENMASK(7, 4) -#define SP_CPORT_ETH_MASK GENMASK(7, 4) +#define SP_CPORT_DFT_MASK GENMASK(2, 0) +#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2)) #define REG_SRC_PORT_FC_MAP6 0x2298 #define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24) diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 82f2363a45cd..e5a56bb989da 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -401,9 +401,6 @@ struct altera_tse_private { /* MAC address space */ struct altera_tse_mac __iomem *mac_dev; - /* TSE Revision */ - u32 revision; - /* mSGDMA Rx Dispatcher address space */ void __iomem *rx_dma_csr; void __iomem *rx_dma_desc; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3f6204de9e6b..ca55c5fd11df 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -892,9 +892,6 @@ static int tse_open(struct net_device *dev) netdev_warn(dev, "device MAC address %pM\n", dev->dev_addr); - if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) - netdev_warn(dev, "TSE revision %x\n", priv->revision); - spin_lock(&priv->mac_cfg_lock); ret = reset_mac(priv); @@ -1142,6 +1139,7 @@ static int altera_tse_probe(struct platform_device *pdev) struct net_device *ndev; void __iomem *descmap; int ret = -ENODEV; + u32 revision; ndev = alloc_etherdev(sizeof(struct altera_tse_private)); if (!ndev) { @@ -1150,6 +1148,7 @@ static int altera_tse_probe(struct platform_device *pdev) } SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); priv->device = &pdev->dev; @@ -1387,25 +1386,7 @@ static int altera_tse_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); spin_lock_init(&priv->rxdma_irq_lock); - netif_carrier_off(ndev); - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "failed to register TSE net device\n"); - goto err_register_netdev; - } - - platform_set_drvdata(pdev, ndev); - - priv->revision = ioread32(&priv->mac_dev->megacore_revision); - - if (netif_msg_probe(priv)) - dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", - (priv->revision >> 8) & 0xff, - priv->revision & 0xff, - (unsigned long) control_port->start, priv->rx_irq, - priv->tx_irq); - - snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name); + snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", dev_name(&pdev->dev)); pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc); if (IS_ERR(pcs_bus)) { ret = PTR_ERR(pcs_bus); @@ -1442,12 +1423,30 @@ static int altera_tse_probe(struct platform_device *pdev) goto err_init_phylink; } + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register TSE net device\n"); + goto err_register_netdev; + } + + revision = ioread32(&priv->mac_dev->megacore_revision); + + if (revision < 0xd00 || revision > 0xe00) + netdev_warn(ndev, "TSE revision %x\n", revision); + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", + (revision >> 8) & 0xff, revision & 0xff, + (unsigned long)control_port->start, priv->rx_irq, + priv->tx_irq); + return 0; + +err_register_netdev: + phylink_destroy(priv->phylink); err_init_phylink: lynx_pcs_destroy(priv->pcs); err_init_pcs: - unregister_netdev(ndev); -err_register_netdev: netif_napi_del(&priv->napi); altera_tse_mdio_destroy(ndev); err_free_netdev: diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index b39c6f3e1eda..d54dca3074eb 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -165,6 +165,7 @@ config AMD_XGBE select CRC32 select PHYLIB select AMD_XGBE_HAVE_ECC if X86 + select NET_SELFTESTS help This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index 0b53a1fab46d..4a6b35c84dab 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -255,7 +255,8 @@ int pdsc_dl_flash_update(struct devlink *dl, struct devlink_flash_update_params *params, struct netlink_ext_ack *extack); int pdsc_dl_enable_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx); + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack); int pdsc_dl_enable_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index d8dc39da4161..b576be626a29 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -22,7 +22,8 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc, } int pdsc_dl_enable_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct pdsc *pdsc = devlink_priv(dl); struct pdsc_viftype *vt_entry; diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile index 980e27652237..5992f7fd4d9b 100644 --- a/drivers/net/ethernet/amd/xgbe/Makefile +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -5,7 +5,7 @@ amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \ xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \ - xgbe-platform.o + xgbe-platform.o xgbe-selftest.o amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index e5391a2eca51..b646ae575e6a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -211,6 +211,7 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) } XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); + pdata->sph = true; } static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) @@ -223,6 +224,7 @@ static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); } + pdata->sph = false; } static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, @@ -3578,3 +3580,20 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) DBGPR("<--xgbe_init_function_ptrs\n"); } + +int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Enable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1); + + /* Wait for loopback to stabilize */ + usleep_range(10, 15); + + return 0; +} + +void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata) +{ + /* Disable MAC loopback mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 4dc631af7933..3ddd896d6987 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -367,10 +367,11 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data) static void xgbe_isr_bh_work(struct work_struct *work) { struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work); + unsigned int mac_isr, mac_tssr, mac_mdioisr; struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_channel *channel; + bool per_ch_irq, ti, ri, rbu, fbe; unsigned int dma_isr, dma_ch_isr; - unsigned int mac_isr, mac_tssr, mac_mdioisr; + struct xgbe_channel *channel; unsigned int i; /* The DMA interrupt status register also reports MAC and MTL @@ -384,43 +385,73 @@ static void xgbe_isr_bh_work(struct work_struct *work) netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { + bool schedule_napi = false; + struct napi_struct *napi; + if (!(dma_isr & (1 << i))) continue; channel = pdata->channel[i]; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + + /* Precompute flags once */ + ti = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI); + ri = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI); + rbu = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU); + fbe = !!XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", i, dma_ch_isr); - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. + per_ch_irq = pdata->per_channel_irq; + + /* + * Decide which NAPI to use and whether to schedule: + * - When not using per-channel IRQs: schedule on global NAPI + * if TI or RI are set. + * - RBU should also trigger NAPI (either per-channel or global) + * to allow refill. */ - if (!pdata->per_channel_irq && - (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || - XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { - if (napi_schedule_prep(&pdata->napi)) { - /* Disable Tx and Rx interrupts */ - xgbe_disable_rx_tx_ints(pdata); + if (!per_ch_irq && (ti || ri)) + schedule_napi = true; + + if (rbu) { + schedule_napi = true; + pdata->ext_stats.rx_buffer_unavailable++; + } + + napi = per_ch_irq ? &channel->napi : &pdata->napi; - /* Turn on polling */ - __napi_schedule(&pdata->napi); + if (schedule_napi && napi_schedule_prep(napi)) { + /* Disable interrupts appropriately before polling */ + if (per_ch_irq) { + if (pdata->channel_irq_mode) + xgbe_disable_rx_tx_int(pdata, channel); + else + disable_irq_nosync(channel->dma_irq); + } else { + xgbe_disable_rx_tx_ints(pdata); } + + /* Turn on polling */ + __napi_schedule(napi); } else { - /* Don't clear Rx/Tx status if doing per channel DMA - * interrupts, these will be cleared by the ISR for - * per channel DMA interrupts. + /* + * Don't clear Rx/Tx status if doing per-channel DMA + * interrupts; those bits will be serviced/cleared by + * the per-channel ISR/NAPI. In non-per-channel mode + * when we're not scheduling NAPI here, ensure we don't + * accidentally clear TI/RI in HW: zero them in the + * local copy so that the eventual write-back does not + * clear TI/RI. */ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); } - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) - pdata->ext_stats.rx_buffer_unavailable++; - /* Restart the device on a Fatal Bus Error */ - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) + if (fbe) schedule_work(&pdata->restart_work); /* Clear interrupt signals */ @@ -1259,6 +1290,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata) udp_tunnel_nic_reset_ntf(netdev); + /* Reset the phy settings */ + ret = xgbe_phy_reset(pdata); + if (ret) + goto err_txrx; + netif_tx_start_all_queues(netdev); xgbe_start_timers(pdata); @@ -1268,6 +1304,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata) return 0; +err_txrx: + hw_if->disable_rx(pdata); + hw_if->disable_tx(pdata); + err_irqs: xgbe_free_irqs(pdata); @@ -1574,11 +1614,6 @@ static int xgbe_open(struct net_device *netdev) goto err_dev_wq; } - /* Reset the phy settings */ - ret = xgbe_phy_reset(pdata); - if (ret) - goto err_an_wq; - /* Enable the clocks */ ret = clk_prepare_enable(pdata->sysclk); if (ret) { @@ -1754,27 +1789,6 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr) return 0; } -static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) -{ - struct xgbe_prv_data *pdata = netdev_priv(netdev); - int ret; - - switch (cmd) { - case SIOCGHWTSTAMP: - ret = xgbe_get_hwtstamp_settings(pdata, ifreq); - break; - - case SIOCSHWTSTAMP: - ret = xgbe_set_hwtstamp_settings(pdata, ifreq); - break; - - default: - ret = -EOPNOTSUPP; - } - - return ret; -} - static int xgbe_change_mtu(struct net_device *netdev, int mtu) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -2020,7 +2034,6 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_set_rx_mode = xgbe_set_rx_mode, .ndo_set_mac_address = xgbe_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, .ndo_tx_timeout = xgbe_tx_timeout, .ndo_get_stats64 = xgbe_get_stats64, @@ -2033,6 +2046,8 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_fix_features = xgbe_fix_features, .ndo_set_features = xgbe_set_features, .ndo_features_check = xgbe_features_check, + .ndo_hwtstamp_get = xgbe_get_hwtstamp_settings, + .ndo_hwtstamp_set = xgbe_set_hwtstamp_settings, }; const struct net_device_ops *xgbe_get_netdev_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index b6e1b67a2d0e..0d19b09497a0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -85,6 +85,9 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) int i; switch (stringset) { + case ETH_SS_TEST: + xgbe_selftest_get_strings(pdata, data); + break; case ETH_SS_STATS: for (i = 0; i < XGBE_STATS_COUNT; i++) ethtool_puts(&data, xgbe_gstring_stats[i].stat_string); @@ -131,6 +134,9 @@ static int xgbe_get_sset_count(struct net_device *netdev, int stringset) int ret; switch (stringset) { + case ETH_SS_TEST: + ret = xgbe_selftest_get_count(pdata); + break; case ETH_SS_STATS: ret = XGBE_STATS_COUNT + (pdata->tx_ring_count * 2) + @@ -760,6 +766,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .set_ringparam = xgbe_set_ringparam, .get_channels = xgbe_get_channels, .set_channels = xgbe_set_channels, + .self_test = xgbe_selftest_run, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c index bc52e5ec6420..0127988e10be 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c @@ -157,26 +157,24 @@ unlock: spin_unlock_irqrestore(&pdata->tstamp_lock, flags); } -int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) +int xgbe_get_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, - sizeof(pdata->tstamp_config))) - return -EFAULT; + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + *config = pdata->tstamp_config; return 0; } -int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) +int xgbe_set_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; - unsigned int mac_tscr; - - if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) - return -EFAULT; - - mac_tscr = 0; + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int mac_tscr = 0; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: break; @@ -188,7 +186,7 @@ int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; @@ -290,7 +288,7 @@ int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq) xgbe_config_tstamp(pdata, mac_tscr); - memcpy(&pdata->tstamp_config, &config, sizeof(config)); + pdata->tstamp_config = *config; return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index a56efc1bee33..a68757e8fd22 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -668,7 +668,7 @@ static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad, else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg); else - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; xgbe_phy_put_comm_ownership(pdata); @@ -989,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) return ret; } phy_data->phydev = phydev; + phy_data->phydev->mac_managed_pm = true; xgbe_phy_external_phy_quirks(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c new file mode 100644 index 000000000000..55e5e467facd --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-selftest.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) +/* + * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. + * Copyright (c) 2014, Synopsys, Inc. + * All rights reserved + * + * Author: Raju Rangoju <Raju.Rangoju@amd.com> + */ +#include <linux/crc32.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <net/tcp.h> +#include <net/udp.h> +#include <net/checksum.h> +#include <net/selftests.h> + +#include "xgbe.h" +#include "xgbe-common.h" + +#define XGBE_LOOPBACK_NONE 0 +#define XGBE_LOOPBACK_MAC 1 +#define XGBE_LOOPBACK_PHY 2 + +struct xgbe_test { + char name[ETH_GSTRING_LEN]; + int lb; + int (*fn)(struct xgbe_prv_data *pdata); +}; + +static u8 xgbe_test_id; + +static int xgbe_test_loopback_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct net_test_priv *tdata = pt->af_packet_priv; + const unsigned char *dst = tdata->packet->dst; + const unsigned char *src = tdata->packet->src; + struct netsfhdr *hdr; + struct ethhdr *eh; + struct tcphdr *th; + struct udphdr *uh; + struct iphdr *ih; + int eat; + + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + goto out; + + eat = (skb->tail + skb->data_len) - skb->end; + if (eat > 0 && skb_shared(skb)) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; + } + + if (skb_linearize(skb)) + goto out; + + if (skb_headlen(skb) < (NET_TEST_PKT_SIZE - ETH_HLEN)) + goto out; + + eh = (struct ethhdr *)skb_mac_header(skb); + if (dst) { + if (!ether_addr_equal_unaligned(eh->h_dest, dst)) + goto out; + } + if (src) { + if (!ether_addr_equal_unaligned(eh->h_source, src)) + goto out; + } + + ih = ip_hdr(skb); + + if (tdata->packet->tcp) { + if (ih->protocol != IPPROTO_TCP) + goto out; + + th = (struct tcphdr *)((u8 *)ih + 4 * ih->ihl); + if (th->dest != htons(tdata->packet->dport)) + goto out; + + hdr = (struct netsfhdr *)((u8 *)th + sizeof(*th)); + } else { + if (ih->protocol != IPPROTO_UDP) + goto out; + + uh = (struct udphdr *)((u8 *)ih + 4 * ih->ihl); + if (uh->dest != htons(tdata->packet->dport)) + goto out; + + hdr = (struct netsfhdr *)((u8 *)uh + sizeof(*uh)); + } + + if (hdr->magic != cpu_to_be64(NET_TEST_PKT_MAGIC)) + goto out; + if (tdata->packet->id != hdr->id) + goto out; + + tdata->ok = true; + complete(&tdata->comp); +out: + kfree_skb(skb); + return 0; +} + +static int __xgbe_test_loopback(struct xgbe_prv_data *pdata, + struct net_packet_attrs *attr) +{ + struct net_test_priv *tdata; + struct sk_buff *skb = NULL; + int ret = 0; + + tdata = kzalloc(sizeof(*tdata), GFP_KERNEL); + if (!tdata) + return -ENOMEM; + + tdata->ok = false; + init_completion(&tdata->comp); + + tdata->pt.type = htons(ETH_P_IP); + tdata->pt.func = xgbe_test_loopback_validate; + tdata->pt.dev = pdata->netdev; + tdata->pt.af_packet_priv = tdata; + tdata->packet = attr; + + dev_add_pack(&tdata->pt); + + skb = net_test_get_skb(pdata->netdev, xgbe_test_id, attr); + if (!skb) { + ret = -ENOMEM; + goto cleanup; + } + + xgbe_test_id++; + ret = dev_direct_xmit(skb, attr->queue_mapping); + if (ret) + goto cleanup; + + if (!attr->timeout) + attr->timeout = NET_LB_TIMEOUT; + + wait_for_completion_timeout(&tdata->comp, attr->timeout); + ret = tdata->ok ? 0 : -ETIMEDOUT; + + if (ret) + netdev_err(pdata->netdev, "Response timedout: ret %d\n", ret); +cleanup: + dev_remove_pack(&tdata->pt); + kfree(tdata); + return ret; +} + +static int xgbe_test_mac_loopback(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + + attr.dst = pdata->netdev->dev_addr; + return __xgbe_test_loopback(pdata, &attr); +} + +static int xgbe_test_phy_loopback(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + int ret; + + if (!pdata->netdev->phydev) { + netdev_err(pdata->netdev, "phydev not found: cannot start PHY loopback test\n"); + return -EOPNOTSUPP; + } + + ret = phy_loopback(pdata->netdev->phydev, true, 0); + if (ret) + return ret; + + attr.dst = pdata->netdev->dev_addr; + ret = __xgbe_test_loopback(pdata, &attr); + + phy_loopback(pdata->netdev->phydev, false, 0); + return ret; +} + +static int xgbe_test_sph(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + unsigned long cnt_end, cnt_start; + int ret; + + cnt_start = pdata->ext_stats.rx_split_header_packets; + + if (!pdata->sph) { + netdev_err(pdata->netdev, "Split Header not enabled\n"); + return -EOPNOTSUPP; + } + + /* UDP test */ + attr.dst = pdata->netdev->dev_addr; + attr.tcp = false; + + ret = __xgbe_test_loopback(pdata, &attr); + if (ret) + return ret; + + cnt_end = pdata->ext_stats.rx_split_header_packets; + if (cnt_end <= cnt_start) + return -EINVAL; + + /* TCP test */ + cnt_start = cnt_end; + + attr.dst = pdata->netdev->dev_addr; + attr.tcp = true; + + ret = __xgbe_test_loopback(pdata, &attr); + if (ret) + return ret; + + cnt_end = pdata->ext_stats.rx_split_header_packets; + if (cnt_end <= cnt_start) + return -EINVAL; + + return 0; +} + +static int xgbe_test_jumbo(struct xgbe_prv_data *pdata) +{ + struct net_packet_attrs attr = {}; + int size = pdata->rx_buf_size; + + attr.dst = pdata->netdev->dev_addr; + attr.max_size = size - ETH_FCS_LEN; + + return __xgbe_test_loopback(pdata, &attr); +} + +static const struct xgbe_test xgbe_selftests[] = { + { + .name = "MAC Loopback ", + .lb = XGBE_LOOPBACK_MAC, + .fn = xgbe_test_mac_loopback, + }, { + .name = "PHY Loopback ", + .lb = XGBE_LOOPBACK_NONE, + .fn = xgbe_test_phy_loopback, + }, { + .name = "Split Header ", + .lb = XGBE_LOOPBACK_PHY, + .fn = xgbe_test_sph, + }, { + .name = "Jumbo Frame ", + .lb = XGBE_LOOPBACK_PHY, + .fn = xgbe_test_jumbo, + }, +}; + +void xgbe_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct xgbe_prv_data *pdata = netdev_priv(dev); + int count = xgbe_selftest_get_count(pdata); + int i, ret; + + memset(buf, 0, sizeof(*buf) * count); + xgbe_test_id = 0; + + if (etest->flags != ETH_TEST_FL_OFFLINE) { + netdev_err(pdata->netdev, "Only offline tests are supported\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } else if (!netif_carrier_ok(dev)) { + netdev_err(pdata->netdev, + "Invalid link, cannot execute tests\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + /* Wait for queues drain */ + msleep(200); + + for (i = 0; i < count; i++) { + ret = 0; + + switch (xgbe_selftests[i].lb) { + case XGBE_LOOPBACK_PHY: + ret = -EOPNOTSUPP; + if (dev->phydev) + ret = phy_loopback(dev->phydev, true, 0); + if (!ret) + break; + fallthrough; + case XGBE_LOOPBACK_MAC: + ret = xgbe_enable_mac_loopback(pdata); + break; + case XGBE_LOOPBACK_NONE: + break; + default: + ret = -EOPNOTSUPP; + break; + } + + /* + * First tests will always be MAC / PHY loopback. + * If any of them is not supported we abort earlier. + */ + if (ret) { + netdev_err(pdata->netdev, "Loopback not supported\n"); + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + + ret = xgbe_selftests[i].fn(pdata); + if (ret && (ret != -EOPNOTSUPP)) + etest->flags |= ETH_TEST_FL_FAILED; + buf[i] = ret; + + switch (xgbe_selftests[i].lb) { + case XGBE_LOOPBACK_PHY: + ret = -EOPNOTSUPP; + if (dev->phydev) + ret = phy_loopback(dev->phydev, false, 0); + if (!ret) + break; + fallthrough; + case XGBE_LOOPBACK_MAC: + xgbe_disable_mac_loopback(pdata); + break; + default: + break; + } + } +} + +void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data) +{ + u8 *p = data; + int i; + + for (i = 0; i < xgbe_selftest_get_count(pdata); i++) + ethtool_puts(&p, xgbe_selftests[i].name); +} + +int xgbe_selftest_get_count(struct xgbe_prv_data *pdata) +{ + return ARRAY_SIZE(xgbe_selftests); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e8bbb6805901..03ef0f548483 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1146,7 +1146,7 @@ struct xgbe_prv_data { spinlock_t tstamp_lock; struct ptp_clock_info ptp_clock_info; struct ptp_clock *ptp_clock; - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; unsigned int tstamp_addend; struct work_struct tx_tstamp_work; struct sk_buff *tx_tstamp_skb; @@ -1246,6 +1246,7 @@ struct xgbe_prv_data { int rx_adapt_retries; bool rx_adapt_done; bool mode_set; + bool sph; }; /* Function prototypes*/ @@ -1307,10 +1308,11 @@ void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, unsigned int nsec); void xgbe_tx_tstamp(struct work_struct *work); -int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, - struct ifreq *ifreq); -int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, - struct ifreq *ifreq); +int xgbe_get_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int xgbe_set_hwtstamp_settings(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, struct sk_buff *skb, struct xgbe_packet_data *packet); @@ -1321,6 +1323,16 @@ void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg, int index, bool on); +/* Selftest functions */ +void xgbe_selftest_run(struct net_device *dev, + struct ethtool_test *etest, u64 *buf); +void xgbe_selftest_get_strings(struct xgbe_prv_data *pdata, u8 *data); +int xgbe_selftest_get_count(struct xgbe_prv_data *pdata); + +/* Loopback control */ +int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata); +void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata); + #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); void xgbe_debugfs_exit(struct xgbe_prv_data *); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index b565189e5913..4ef4fe64b8ac 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -258,10 +258,15 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) (void)aq_nic_set_multicast_list(aq_nic, ndev); } -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) -static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, - struct hwtstamp_config *config) +static int aq_ndev_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { + struct aq_nic_s *aq_nic = netdev_priv(netdev); + + if (!IS_REACHABLE(CONFIG_PTP_1588_CLOCK) || !aq_nic->aq_ptp) + return -EOPNOTSUPP; + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: @@ -290,59 +295,17 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config); } -#endif - -static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr) -{ - struct hwtstamp_config config; -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - int ret_val; -#endif - - if (!aq_nic->aq_ptp) - return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - ret_val = aq_ndev_config_hwtstamp(aq_nic, &config); - if (ret_val) - return ret_val; -#endif - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) -static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr) +static int aq_ndev_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; + struct aq_nic_s *aq_nic = netdev_priv(netdev); if (!aq_nic->aq_ptp) return -EOPNOTSUPP; - aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, &config); - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} -#endif - -static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct aq_nic_s *aq_nic = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return aq_ndev_hwtstamp_set(aq_nic, ifr); - -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) - case SIOCGHWTSTAMP: - return aq_ndev_hwtstamp_get(aq_nic, ifr); -#endif - } - - return -EOPNOTSUPP; + aq_ptp_hwtstamp_config_get(aq_nic->aq_ptp, config); + return 0; } static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, @@ -500,12 +463,13 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, .ndo_fix_features = aq_ndev_fix_features, - .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, .ndo_bpf = aq_xdp, .ndo_xdp_xmit = aq_xdp_xmit, + .ndo_hwtstamp_get = aq_ndev_hwtstamp_get, + .ndo_hwtstamp_set = aq_ndev_hwtstamp_set, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 5acb3e16b567..0fa0f891c0e0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -51,7 +51,7 @@ struct ptp_tx_timeout { struct aq_ptp_s { struct aq_nic_s *aq_nic; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; spinlock_t ptp_lock; spinlock_t ptp_ring_lock; struct ptp_clock *ptp_clock; @@ -567,7 +567,7 @@ static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct skb_shared_hwtsta } void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { *config = aq_ptp->hwtstamp_config; } @@ -588,7 +588,7 @@ static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp) } int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { struct aq_nic_s *aq_nic = aq_ptp->aq_nic; const struct aq_hw_ops *hw_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h index 210b723f2207..5e643ec7cc06 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h @@ -60,9 +60,9 @@ void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp); /* Must be to check available of PTP before call */ void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config); + struct kernel_hwtstamp_config *config); /* Return either ring is belong to PTP or not*/ bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring); @@ -130,9 +130,9 @@ static inline int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb) static inline void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) {} static inline void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) {} + struct kernel_hwtstamp_config *config) {} static inline int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, - struct hwtstamp_config *config) + struct kernel_hwtstamp_config *config) { return 0; } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 9fdef874f5ca..666522d64775 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -25,6 +25,7 @@ config B44 select SSB select MII select PHYLIB + select FIXED_PHY if BCM47XX help If you have a network (Ethernet) controller of this type, say Y or M here. diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index 63f1a8c3a7fb..dd80ccfca19d 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -163,11 +163,30 @@ static void bcmasp_set_msglevel(struct net_device *dev, u32 level) static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_priv *priv = intf->parent; + struct device *kdev = &priv->pdev->dev; + u32 phy_wolopts = 0; + + if (dev->phydev) { + phy_ethtool_get_wol(dev->phydev, wol); + phy_wolopts = wol->wolopts; + } + + /* MAC is not wake-up capable, return what the PHY does */ + if (!device_can_wakeup(kdev)) + return; + + /* Overlay MAC capabilities with that of the PHY queried before */ + wol->supported |= BCMASP_SUPPORTED_WAKE; + wol->wolopts |= intf->wolopts; + + /* Return the PHY configured magic password */ + if (phy_wolopts & WAKE_MAGICSECURE) + return; - wol->supported = BCMASP_SUPPORTED_WAKE; - wol->wolopts = intf->wolopts; memset(wol->sopass, 0, sizeof(wol->sopass)); + /* Otherwise the MAC one */ if (wol->wolopts & WAKE_MAGICSECURE) memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass)); } @@ -177,10 +196,21 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct bcmasp_intf *intf = netdev_priv(dev); struct bcmasp_priv *priv = intf->parent; struct device *kdev = &priv->pdev->dev; + int ret = 0; + + /* Try Wake-on-LAN from the PHY first */ + if (dev->phydev) { + ret = phy_ethtool_set_wol(dev->phydev, wol); + if (ret != -EOPNOTSUPP && wol->wolopts) + return ret; + } if (!device_can_wakeup(kdev)) return -EOPNOTSUPP; + if (wol->wolopts & ~BCMASP_SUPPORTED_WAKE) + return -EINVAL; + /* Interface Specific */ intf->wolopts = wol->wolopts; if (intf->wolopts & WAKE_MAGICSECURE) diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 0353359c3fe9..888f28f11406 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -31,6 +31,7 @@ #include <linux/ssb/ssb.h> #include <linux/slab.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/uaccess.h> #include <asm/io.h> @@ -2233,7 +2234,6 @@ static int b44_register_phy_one(struct b44 *bp) struct mii_bus *mii_bus; struct ssb_device *sdev = bp->sdev; struct phy_device *phydev; - char bus_id[MII_BUS_ID_SIZE + 3]; struct ssb_sprom *sprom = &sdev->bus->sprom; int err; @@ -2260,27 +2260,26 @@ static int b44_register_phy_one(struct b44 *bp) goto err_out_mdiobus; } - if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && - (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { - + phydev = mdiobus_get_phy(bp->mii_bus, bp->phy_addr); + if (!phydev && + sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM)) { dev_info(sdev->dev, "could not find PHY at %i, use fixed one\n", bp->phy_addr); - bp->phy_addr = 0; - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0", - bp->phy_addr); - } else { - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, - bp->phy_addr); + phydev = fixed_phy_register_100fd(); + if (!IS_ERR(phydev)) + bp->phy_addr = phydev->mdio.addr; } - phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(phydev)) { + if (IS_ERR_OR_NULL(phydev)) + err = -ENODEV; + else + err = phy_connect_direct(bp->dev, phydev, &b44_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { dev_err(sdev->dev, "could not attach PHY at %i\n", bp->phy_addr); - err = PTR_ERR(phydev); goto err_out_mdiobus_unregister; } @@ -2293,7 +2292,6 @@ static int b44_register_phy_one(struct b44 *bp) linkmode_copy(phydev->advertising, phydev->supported); bp->old_link = 0; - bp->phy_addr = phydev->mdio.addr; phy_attached_info(phydev); @@ -2311,10 +2309,15 @@ err_out: static void b44_unregister_phy_one(struct b44 *bp) { - struct net_device *dev = bp->dev; struct mii_bus *mii_bus = bp->mii_bus; + struct net_device *dev = bp->dev; + struct phy_device *phydev; + + phydev = dev->phydev; - phy_disconnect(dev->phydev); + phy_disconnect(phydev); + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); mdiobus_unregister(mii_bus); mdiobus_free(mii_bus); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc8dec37a9e4..3d853eeb976f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3355,19 +3355,11 @@ static int bnx2x_get_rxfh_fields(struct net_device *dev, return 0; } -static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) +static u32 bnx2x_get_rx_ring_count(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = BNX2X_NUM_ETH_QUEUES(bp); - return 0; - default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } + return BNX2X_NUM_ETH_QUEUES(bp); } static int bnx2x_set_rxfh_fields(struct net_device *dev, @@ -3674,7 +3666,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_strings = bnx2x_get_strings, .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, - .get_rxnfc = bnx2x_get_rxnfc, + .get_rx_ring_count = bnx2x_get_rx_ring_count, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh = bnx2x_get_rxfh, .set_rxfh = bnx2x_set_rxfh, @@ -3702,7 +3694,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = { .get_sset_count = bnx2x_get_sset_count, .get_strings = bnx2x_get_strings, .get_ethtool_stats = bnx2x_get_ethtool_stats, - .get_rxnfc = bnx2x_get_rxnfc, + .get_rx_ring_count = bnx2x_get_rx_ring_count, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh = bnx2x_get_rxfh, .set_rxfh = bnx2x_set_rxfh, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f0f05d7315ac..aca4267babc8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -308,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); /**************************************************************************** * General service functions ****************************************************************************/ - -static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); +static int bnx2x_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +static int bnx2x_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config); static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) @@ -12813,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!netif_running(dev)) return -EAGAIN; - switch (cmd) { - case SIOCSHWTSTAMP: - return bnx2x_hwtstamp_ioctl(bp, ifr); - default: - DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", - mdio->phy_id, mdio->reg_num, mdio->val_in); - return mdio_mii_ioctl(&bp->mdio, mdio, cmd); - } + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); } static int bnx2x_validate_addr(struct net_device *dev) @@ -13036,6 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, + .ndo_hwtstamp_get = bnx2x_hwtstamp_get, + .ndo_hwtstamp_set = bnx2x_hwtstamp_set, }; static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, @@ -15350,31 +15350,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp) return 0; } -static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) +static int bnx2x_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct bnx2x *bp = netdev_priv(dev); int rc; - DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); + DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n"); - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!netif_running(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Device is down"); + return -EAGAIN; + } DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", - config.tx_type, config.rx_filter); + config->tx_type, config->rx_filter); + + switch (config->tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "One-step timestamping is not supported"); + return -ERANGE; + } bp->hwtstamp_ioctl_called = true; - bp->tx_type = config.tx_type; - bp->rx_filter = config.rx_filter; + bp->tx_type = config->tx_type; + bp->rx_filter = config->rx_filter; rc = bnx2x_configure_ptp_filters(bp); - if (rc) + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "HW configuration failure"); return rc; + } + + config->rx_filter = bp->rx_filter; + + return 0; +} + +static int bnx2x_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct bnx2x *bp = netdev_priv(dev); - config.rx_filter = bp->rx_filter; + config->rx_filter = bp->rx_filter; + config->tx_type = bp->tx_type; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } /* Configures HW for PTP */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a625e7c311dd..d17d0ea89c36 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -877,7 +877,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, next_tx_int: cons = NEXT_TX(cons); - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); } WRITE_ONCE(txr->tx_cons, cons); @@ -4479,7 +4479,14 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, ring->fw_ring_id = INVALID_HW_RING_ID; if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + RX_BD_TYPE_RX_AGG_BD; + + /* On P7, setting EOP will cause the chip to disable + * Relaxed Ordering (RO) for TPA data. Disable EOP for + * potentially higher performance with RO. + */ + if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA)) + type |= RX_BD_FLAGS_AGG_EOP; bnxt_init_rxbd_pages(ring, type); } @@ -5688,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, u16 cmd = bnxt_vf_req_snif[i]; unsigned int bit, idx; + if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) && + cmd == HWRM_PORT_PHY_QCFG) + continue; + idx = cmd / 32; bit = cmd % 32; data[idx] |= 1 << bit; @@ -8506,6 +8517,11 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; + if (resp->roce_bidi_opt_mode & + FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED) + bp->cos0_cos1_shared = 1; + else + bp->cos0_cos1_shared = 0; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: @@ -9653,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_ROCEV1_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) bp->flags |= BNXT_FLAG_ROCEV2_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN; if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) @@ -14020,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) { - struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - int i = bnapi->index; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2; + int i = bnapi->index, j; netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); + for (j = 0; j < cpr->cp_ring_count; j++) { + cpr2 = &cpr->cp_ring_arr[j]; + if (!cpr2->bnapi) + continue; + netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, j, cpr2->cp_ring_struct.fw_ring_id, + cpr2->cp_raw_cons); + } } static void bnxt_dbg_dump_states(struct bnxt *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3613a172483a..f5f07a7e6b29 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -131,6 +131,7 @@ struct rx_bd { #define RX_BD_TYPE_48B_BD_SIZE (2 << 4) #define RX_BD_TYPE_64B_BD_SIZE (3 << 4) #define RX_BD_FLAGS_SOP (1 << 6) + #define RX_BD_FLAGS_AGG_EOP (1 << 6) #define RX_BD_FLAGS_EOP (1 << 7) #define RX_BD_FLAGS_BUFFERS (3 << 8) #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8) @@ -2424,6 +2425,7 @@ struct bnxt { u8 tc_to_qidx[BNXT_MAX_QUEUE]; u8 q_ids[BNXT_MAX_QUEUE]; u8 max_q; + u8 cos0_cos1_shared; u8 num_tc; u16 max_pfcwd_tmo_ms; @@ -2482,6 +2484,7 @@ struct bnxt { #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6) #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7) #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8) + #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9) #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10) #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11) #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 67ca02d84c97..15de802bbac4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -1086,7 +1086,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); struct hwrm_nvm_get_variable_input *req; @@ -1168,7 +1169,8 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, } static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 41686a6f84b5..068e191ede19 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -688,16 +688,22 @@ skip_ring_stats: buf[j] = *(rx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { - long n = bnxt_tx_bytes_pri_arr[i].base_off + - bp->pri2cos_idx[i]; + u8 cos_idx = bp->pri2cos_idx[i]; + long n; + n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx; buf[j] = *(tx_port_stats_ext + n); + if (bp->cos0_cos1_shared && !cos_idx) + buf[j] += *(tx_port_stats_ext + n + 1); } for (i = 0; i < 8; i++, j++) { - long n = bnxt_tx_pkts_pri_arr[i].base_off + - bp->pri2cos_idx[i]; + u8 cos_idx = bp->pri2cos_idx[i]; + long n; + n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx; buf[j] = *(tx_port_stats_ext + n); + if (bp->cos0_cos1_shared && !cos_idx) + buf[j] += *(tx_port_stats_ext + n + 1); } } } @@ -1764,6 +1770,13 @@ static int bnxt_set_rxfh_fields(struct net_device *dev, return rc; } +static u32 bnxt_get_rx_ring_count(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return bp->rx_nr_rings; +} + static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -1771,10 +1784,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int rc = 0; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = bp->rx_nr_rings; - break; - case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bp->ntp_fltr_count; cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; @@ -4617,6 +4626,11 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) return 0; + if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || + bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){ + NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T"); + return -EOPNOTSUPP; + } switch (bp->link_info.module_status) { case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); @@ -5605,6 +5619,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { .set_channels = bnxt_set_channels, .get_rxnfc = bnxt_get_rxnfc, .set_rxnfc = bnxt_set_rxnfc, + .get_rx_ring_count = bnxt_get_rx_ring_count, .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 0abaa2bbe357..a8a74f07bb54 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -952,7 +952,6 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) snprintf(ptp_info->pin_config[i].name, sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i); ptp_info->pin_config[i].index = i; - ptp_info->pin_config[i].chan = i; if (*pin_usg == BNXT_PPS_PIN_PPS_IN) ptp_info->pin_config[i].func = PTP_PF_EXTTS; else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT) @@ -969,6 +968,8 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) ptp_info->n_per_out = 1; ptp_info->pps = 1; ptp_info->verify = bnxt_ptp_verify; + ptp_info->supported_extts_flags = PTP_RISING_EDGE | PTP_STRICT_FLAGS; + ptp_info->supported_perout_flags = PTP_PEROUT_DUTY_CYCLE; return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 80fed2c07b9e..be7deb9cc410 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -332,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, return rc; } +static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) + return 0; + + vf = &bp->pf.vf[vf_id]; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) { + case BNXT_VF_LINK_FORCED: + req->options = + FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN; + break; + case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP): + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP; + break; + default: + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + break; + } + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + return hwrm_req_send(bp, req); +} + int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) { struct bnxt *bp = netdev_priv(dev); @@ -357,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) break; default: netdev_err(bp->dev, "Invalid link option\n"); - rc = -EINVAL; - break; + return -EINVAL; } - if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) + rc = bnxt_set_vf_link_admin_state(bp, vf_id); + else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); return rc; @@ -666,15 +699,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { + struct bnxt_vf_info *vf = &pf->vf[i]; + + vf->fw_fid = pf->first_vf_id + i; + rc = bnxt_set_vf_link_admin_state(bp, i); + if (rc) + break; + if (reset) __bnxt_set_vf_params(bp, i); - req->vf_id = cpu_to_le16(pf->first_vf_id + i); + req->vf_id = cpu_to_le16(vf->fw_fid); rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = pf->first_vf_id + i; } if (pf->active_vfs) { @@ -741,6 +780,12 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) FUNC_CFG_REQ_ENABLES_NUM_VNICS | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) { + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + req->enables |= + cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + } + mtu = bp->dev->mtu + VLAN_ETH_HLEN; req->mru = cpu_to_le16(mtu); req->admin_mtu = cpu_to_le16(mtu); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index f8c2c72b382d..927971c362f1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -142,7 +142,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); - edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; exit: mutex_unlock(&edev->en_dev_lock); netdev_unlock(dev); @@ -159,8 +158,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) ulp = edev->ulp_tbl; netdev_lock(dev); mutex_lock(&edev->en_dev_lock); - if (ulp->msix_requested) - edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; edev->ulp_tbl->msix_requested = 0; if (ulp->max_async_event_id) @@ -298,7 +295,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) struct bnxt_ulp_ops *ops; bool reset = false; - if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + if (!edev) return; if (bnxt_ulp_registered(bp->edev)) { @@ -321,7 +318,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) struct bnxt_en_dev *edev = bp->edev; struct bnxt_ulp_ops *ops; - if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + if (!edev) return; if (bnxt_ulp_registered(bp->edev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 7b9dd8ebe4bc..3c5b8a53f715 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -58,7 +58,6 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ROCEV2_CAP 0x2 #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ BNXT_EN_FLAG_ROCEV2_CAP) - #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 #define BNXT_EN_FLAG_ULP_STOPPED 0x8 #define BNXT_EN_FLAG_VF 0x10 #define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 98971ae4f87d..05512aa10c20 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -35,7 +35,6 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/phy.h> -#include <linux/platform_data/bcmgenet.h> #include <linux/unaligned.h> @@ -1641,6 +1640,13 @@ static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv) return res; } +static u32 bcmgenet_get_rx_ring_count(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->hw_params->rx_queues ?: 1; +} + static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -1650,9 +1656,6 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int i = 0; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = priv->hw_params->rx_queues ?: 1; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bcmgenet_get_num_flows(priv); cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; @@ -1701,6 +1704,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = bcmgenet_get_rxnfc, .set_rxnfc = bcmgenet_set_rxnfc, + .get_rx_ring_count = bcmgenet_get_rx_ring_count, .get_pauseparam = bcmgenet_get_pauseparam, .set_pauseparam = bcmgenet_set_pauseparam, }; @@ -3926,7 +3930,6 @@ MODULE_DEVICE_TABLE(of, bcmgenet_match); static int bcmgenet_probe(struct platform_device *pdev) { - struct bcmgenet_platform_data *pd = pdev->dev.platform_data; const struct bcmgenet_plat_data *pdata; struct bcmgenet_priv *priv; struct net_device *dev; @@ -4010,9 +4013,6 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->version = pdata->version; priv->dma_max_burst_length = pdata->dma_max_burst_length; priv->flags = pdata->flags; - } else { - priv->version = pd->genet_version; - priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; } priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet"); @@ -4062,16 +4062,13 @@ static int bcmgenet_probe(struct platform_device *pdev) if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - if (pd && !IS_ERR_OR_NULL(pd->mac_address)) - eth_hw_addr_set(dev, pd->mac_address); - else - if (device_get_ethdev_address(&pdev->dev, dev)) - if (has_acpi_companion(&pdev->dev)) { - u8 addr[ETH_ALEN]; + if (device_get_ethdev_address(&pdev->dev, dev)) + if (has_acpi_companion(&pdev->dev)) { + u8 addr[ETH_ALEN]; - bcmgenet_get_hw_addr(priv, addr); - eth_hw_addr_set(dev, addr); - } + bcmgenet_get_hw_addr(priv, addr); + eth_hw_addr_set(dev, addr); + } if (!is_valid_ether_addr(dev->dev_addr)) { dev_warn(&pdev->dev, "using random Ethernet MAC\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 573e8b279e52..38f854b94a79 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -20,7 +20,6 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/platform_data/bcmgenet.h> #include <linux/platform_data/mdio-bcm-unimac.h> #include "bcmgenet.h" @@ -436,23 +435,6 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) return priv->mdio_dn; } -static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, - struct unimac_mdio_pdata *ppd) -{ - struct device *kdev = &priv->pdev->dev; - struct bcmgenet_platform_data *pd = kdev->platform_data; - - if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { - /* - * Internal or external PHY with MDIO access - */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - ppd->phy_mask = 1 << pd->phy_address; - else - ppd->phy_mask = 0; - } -} - static int bcmgenet_mii_wait(void *wait_func_data) { struct bcmgenet_priv *priv = wait_func_data; @@ -467,7 +449,6 @@ static int bcmgenet_mii_wait(void *wait_func_data) static int bcmgenet_mii_register(struct bcmgenet_priv *priv) { struct platform_device *pdev = priv->pdev; - struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; struct device_node *dn = pdev->dev.of_node; struct unimac_mdio_pdata ppd; struct platform_device *ppdev; @@ -511,8 +492,6 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv) ppdev->dev.parent = &pdev->dev; if (dn) ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); - else if (pdata) - bcmgenet_mii_pdata_init(priv, &ppd); else ppd.phy_mask = ~0; @@ -594,58 +573,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) return 0; } -static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) -{ - struct device *kdev = &priv->pdev->dev; - struct bcmgenet_platform_data *pd = kdev->platform_data; - char phy_name[MII_BUS_ID_SIZE + 3]; - char mdio_bus_id[MII_BUS_ID_SIZE]; - struct phy_device *phydev; - - snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", - UNIMAC_MDIO_DRV_NAME, priv->pdev->id); - - if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { - snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, - mdio_bus_id, pd->phy_address); - - /* - * Internal or external PHY with MDIO access - */ - phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); - if (IS_ERR(phydev)) { - dev_err(kdev, "failed to register PHY device\n"); - return PTR_ERR(phydev); - } - } else { - /* - * MoCA port or no MDIO access. - * Use fixed PHY to represent the link layer. - */ - struct fixed_phy_status fphy_status = { - .link = 1, - .speed = pd->phy_speed, - .duplex = pd->phy_duplex, - .pause = 0, - .asym_pause = 0, - }; - - phydev = fixed_phy_register(&fphy_status, NULL); - if (IS_ERR(phydev)) { - dev_err(kdev, "failed to register fixed PHY device\n"); - return PTR_ERR(phydev); - } - - /* Make sure we initialize MoCA PHYs with a link down */ - phydev->link = 0; - - } - - priv->phy_interface = pd->phy_interface; - - return 0; -} - static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -656,7 +583,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) else if (has_acpi_companion(kdev)) return bcmgenet_phy_interface_init(priv); else - return bcmgenet_mii_pd_init(priv); + return -EINVAL; } int bcmgenet_mii_init(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d78cafdb2094..e21f7c6a6de7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12719,29 +12719,17 @@ static int tg3_get_sset_count(struct net_device *dev, int sset) } } -static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) +static u32 tg3_get_rx_ring_count(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); if (!tg3_flag(tp, SUPPORT_MSIX)) - return -EOPNOTSUPP; + return 1; - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - if (netif_running(tp->dev)) - info->data = tp->rxq_cnt; - else { - info->data = num_online_cpus(); - if (info->data > TG3_RSS_MAX_NUM_QS) - info->data = TG3_RSS_MAX_NUM_QS; - } + if (netif_running(tp->dev)) + return tp->rxq_cnt; - return 0; - - default: - return -EOPNOTSUPP; - } + return min_t(u32, netif_get_num_default_rss_queues(), tp->rxq_max); } static u32 tg3_get_rxfh_indir_size(struct net_device *dev) @@ -14268,7 +14256,7 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, .get_sset_count = tg3_get_sset_count, - .get_rxnfc = tg3_get_rxnfc, + .get_rx_ring_count = tg3_get_rx_ring_count, .get_rxfh_indir_size = tg3_get_rxfh_indir_size, .get_rxfh = tg3_get_rxfh, .set_rxfh = tg3_set_rxfh, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 0830c48973aa..87414a2ddf6e 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -15,10 +15,6 @@ #include <linux/phy/phy.h> #include <linux/workqueue.h> -#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP) -#define MACB_EXT_DESC -#endif - #define MACB_GREGS_NBR 16 #define MACB_GREGS_VERSION 2 #define MACB_MAX_QUEUES 8 @@ -541,6 +537,8 @@ /* Bitfields in DCFG6. */ #define GEM_PBUF_LSO_OFFSET 27 #define GEM_PBUF_LSO_SIZE 1 +#define GEM_PBUF_RSC_OFFSET 26 +#define GEM_PBUF_RSC_SIZE 1 #define GEM_PBUF_CUTTHRU_OFFSET 25 #define GEM_PBUF_CUTTHRU_SIZE 1 #define GEM_DAW64_OFFSET 23 @@ -756,27 +754,31 @@ #define MACB_MAN_C45_CODE 2 /* Capability mask bits */ -#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 -#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 -#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 -#define MACB_CAPS_USRIO_DISABLED 0x00000010 -#define MACB_CAPS_JUMBO 0x00000020 -#define MACB_CAPS_GEM_HAS_PTP 0x00000040 -#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 -#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 -#define MACB_CAPS_MIIONRGMII 0x00000200 -#define MACB_CAPS_NEED_TSUCLK 0x00000400 -#define MACB_CAPS_QUEUE_DISABLE 0x00000800 -#define MACB_CAPS_QBV 0x00001000 -#define MACB_CAPS_PCS 0x01000000 -#define MACB_CAPS_HIGH_SPEED 0x02000000 -#define MACB_CAPS_CLK_HW_CHG 0x04000000 -#define MACB_CAPS_MACB_IS_EMAC 0x08000000 -#define MACB_CAPS_FIFO_MODE 0x10000000 -#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 -#define MACB_CAPS_SG_DISABLED 0x40000000 -#define MACB_CAPS_MACB_IS_GEM 0x80000000 +#define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0) +#define MACB_CAPS_USRIO_HAS_CLKEN BIT(1) +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2) +#define MACB_CAPS_NO_GIGABIT_HALF BIT(3) +#define MACB_CAPS_USRIO_DISABLED BIT(4) +#define MACB_CAPS_JUMBO BIT(5) +#define MACB_CAPS_GEM_HAS_PTP BIT(6) +#define MACB_CAPS_BD_RD_PREFETCH BIT(7) +#define MACB_CAPS_NEEDS_RSTONUBR BIT(8) +#define MACB_CAPS_MIIONRGMII BIT(9) +#define MACB_CAPS_NEED_TSUCLK BIT(10) +#define MACB_CAPS_QUEUE_DISABLE BIT(11) +#define MACB_CAPS_QBV BIT(12) +#define MACB_CAPS_PCS BIT(13) +#define MACB_CAPS_HIGH_SPEED BIT(14) +#define MACB_CAPS_CLK_HW_CHG BIT(15) +#define MACB_CAPS_MACB_IS_EMAC BIT(16) +#define MACB_CAPS_FIFO_MODE BIT(17) +#define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18) +#define MACB_CAPS_SG_DISABLED BIT(19) +#define MACB_CAPS_MACB_IS_GEM BIT(20) +#define MACB_CAPS_DMA_64B BIT(21) +#define MACB_CAPS_DMA_PTP BIT(22) +#define MACB_CAPS_RSC BIT(23) +#define MACB_CAPS_NO_LSO BIT(24) /* LSO settings */ #define MACB_LSO_UFO_ENABLE 0x01 @@ -853,12 +855,6 @@ struct macb_dma_desc { u32 ctrl; }; -#ifdef MACB_EXT_DESC -#define HW_DMA_CAP_32B 0 -#define HW_DMA_CAP_64B (1 << 0) -#define HW_DMA_CAP_PTP (1 << 1) -#define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP) - struct macb_dma_desc_64 { u32 addrh; u32 resvd; @@ -868,7 +864,6 @@ struct macb_dma_desc_ptp { u32 ts_1; u32 ts_2; }; -#endif /* DMA descriptor bitfields */ #define MACB_RX_USED_OFFSET 0 @@ -1299,7 +1294,6 @@ struct macb { unsigned int tx_ring_size; unsigned int num_queues; - unsigned int queue_mask; struct macb_queue queues[MACB_MAX_QUEUES]; spinlock_t lock; @@ -1347,11 +1341,8 @@ struct macb { struct macb_ptp_info *ptp_info; /* macb-ptp interface */ - struct phy *sgmii_phy; /* for ZynqMP SGMII mode */ + struct phy *phy; -#ifdef MACB_EXT_DESC - uint8_t hw_dma_cap; -#endif spinlock_t tsu_clk_lock; /* gem tsu clock locking */ unsigned int tsu_rate; struct ptp_clock *ptp_clock; @@ -1443,6 +1434,18 @@ static inline u64 enst_max_hw_interval(u32 speed_mbps) ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps)); } +static inline bool macb_dma64(struct macb *bp) +{ + return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && + bp->caps & MACB_CAPS_DMA_64B; +} + +static inline bool macb_dma_ptp(struct macb *bp) +{ + return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && + bp->caps & MACB_CAPS_DMA_PTP; +} + /** * struct macb_platform_data - platform data for MACB Ethernet used for PCI registration * @pclk: platform clock diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index ca2386b83473..e461f5072884 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -6,36 +6,36 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/clk.h> +#include <linux/circ_buf.h> #include <linux/clk-provider.h> +#include <linux/clk.h> #include <linux/crc32.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/circ_buf.h> -#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/io.h> #include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/ip.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> -#include <linux/phylink.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/ip.h> -#include <linux/udp.h> -#include <linux/tcp.h> -#include <linux/iopoll.h> #include <linux/phy/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/ptp_classify.h> #include <linux/reset.h> -#include <linux/firmware/xlnx-zynqmp.h> -#include <linux/inetdevice.h> +#include <linux/slab.h> +#include <linux/tcp.h> +#include <linux/types.h> +#include <linux/udp.h> #include <net/pkt_sched.h> #include "macb.h" @@ -121,56 +121,26 @@ struct sifive_fu540_macb_mgmt { */ static unsigned int macb_dma_desc_get_size(struct macb *bp) { -#ifdef MACB_EXT_DESC - unsigned int desc_size; + unsigned int desc_size = sizeof(struct macb_dma_desc); + + if (macb_dma64(bp)) + desc_size += sizeof(struct macb_dma_desc_64); + if (macb_dma_ptp(bp)) + desc_size += sizeof(struct macb_dma_desc_ptp); - switch (bp->hw_dma_cap) { - case HW_DMA_CAP_64B: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_64); - break; - case HW_DMA_CAP_PTP: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_ptp); - break; - case HW_DMA_CAP_64B_PTP: - desc_size = sizeof(struct macb_dma_desc) - + sizeof(struct macb_dma_desc_64) - + sizeof(struct macb_dma_desc_ptp); - break; - default: - desc_size = sizeof(struct macb_dma_desc); - } return desc_size; -#endif - return sizeof(struct macb_dma_desc); } static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) { -#ifdef MACB_EXT_DESC - switch (bp->hw_dma_cap) { - case HW_DMA_CAP_64B: - case HW_DMA_CAP_PTP: - desc_idx <<= 1; - break; - case HW_DMA_CAP_64B_PTP: - desc_idx *= 3; - break; - default: - break; - } -#endif - return desc_idx; + return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp)); } -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) { return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); } -#endif /* Ring buffer accessors */ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) @@ -357,7 +327,6 @@ static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); mdio_read_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -403,7 +372,6 @@ static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad, status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); mdio_read_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -435,7 +403,6 @@ static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, goto mdio_write_exit; mdio_write_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -481,7 +448,6 @@ static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id, goto mdio_write_exit; mdio_write_exit: - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); mdio_pm_exit: return status; @@ -492,15 +458,13 @@ static void macb_init_buffers(struct macb *bp) struct macb_queue *queue; unsigned int q; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* Single register for all queues' high 32 bits. */ - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { + if (macb_dma64(bp)) { macb_writel(bp, RBQPH, upper_32_bits(bp->queues[0].rx_ring_dma)); macb_writel(bp, TBQPH, upper_32_bits(bp->queues[0].tx_ring_dma)); } -#endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); @@ -1025,10 +989,9 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budge static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) { -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - struct macb_dma_desc_64 *desc_64; + if (macb_dma64(bp)) { + struct macb_dma_desc_64 *desc_64; - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { desc_64 = macb_64b_desc(bp, desc); desc_64->addrh = upper_32_bits(addr); /* The low bits of RX address contain the RX_USED bit, clearing @@ -1037,26 +1000,23 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_ */ dma_wmb(); } -#endif + desc->addr = lower_32_bits(addr); } static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) { dma_addr_t addr = 0; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - struct macb_dma_desc_64 *desc_64; - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { + if (macb_dma64(bp)) { + struct macb_dma_desc_64 *desc_64; + desc_64 = macb_64b_desc(bp, desc); addr = ((u64)(desc_64->addrh) << 32); } -#endif addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); -#ifdef CONFIG_MACB_USE_HWSTAMP - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) + if (macb_dma_ptp(bp)) addr &= ~GEM_BIT(DMA_RXVALID); -#endif return addr; } @@ -1336,8 +1296,19 @@ static void gem_rx_refill(struct macb_queue *queue) dma_wmb(); macb_set_addr(bp, desc, paddr); - /* properly align Ethernet header */ - skb_reserve(skb, NET_IP_ALIGN); + /* Properly align Ethernet header. + * + * Hardware can add dummy bytes if asked using the RBOF + * field inside the NCFGR register. That feature isn't + * available if hardware is RSC capable. + * + * We cannot fallback to doing the 2-byte shift before + * DMA mapping because the address field does not allow + * setting the low 2/3 bits. + * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits. + */ + if (!(bp->caps & MACB_CAPS_RSC)) + skb_reserve(skb, NET_IP_ALIGN); } else { desc->ctrl = 0; dma_wmb(); @@ -2024,14 +1995,14 @@ static unsigned int macb_tx_map(struct macb *bp, struct sk_buff *skb, unsigned int hdrlen) { - dma_addr_t mapping; - unsigned int len, entry, i, tx_head = queue->tx_head; - struct macb_tx_skb *tx_skb = NULL; - struct macb_dma_desc *desc; - unsigned int offset, size, count = 0; unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; - unsigned int eof = 1, mss_mfs = 0; + unsigned int len, i, tx_head = queue->tx_head; u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; + unsigned int eof = 1, mss_mfs = 0; + struct macb_tx_skb *tx_skb = NULL; + struct macb_dma_desc *desc; + unsigned int offset, size; + dma_addr_t mapping; /* LSO */ if (skb_shinfo(skb)->gso_size != 0) { @@ -2051,8 +2022,7 @@ static unsigned int macb_tx_map(struct macb *bp, offset = 0; while (len) { - entry = macb_tx_ring_wrap(bp, tx_head); - tx_skb = &queue->tx_skb[entry]; + tx_skb = macb_tx_skb(queue, tx_head); mapping = dma_map_single(&bp->pdev->dev, skb->data + offset, @@ -2068,10 +2038,9 @@ static unsigned int macb_tx_map(struct macb *bp, len -= size; offset += size; - count++; tx_head++; - size = min(len, bp->max_tx_length); + size = umin(len, bp->max_tx_length); } /* Then, map paged data from fragments */ @@ -2081,9 +2050,8 @@ static unsigned int macb_tx_map(struct macb *bp, len = skb_frag_size(frag); offset = 0; while (len) { - size = min(len, bp->max_tx_length); - entry = macb_tx_ring_wrap(bp, tx_head); - tx_skb = &queue->tx_skb[entry]; + size = umin(len, bp->max_tx_length); + tx_skb = macb_tx_skb(queue, tx_head); mapping = skb_frag_dma_map(&bp->pdev->dev, frag, offset, size, DMA_TO_DEVICE); @@ -2098,7 +2066,6 @@ static unsigned int macb_tx_map(struct macb *bp, len -= size; offset += size; - count++; tx_head++; } } @@ -2120,9 +2087,8 @@ static unsigned int macb_tx_map(struct macb *bp, * to set the end of TX queue */ i = tx_head; - entry = macb_tx_ring_wrap(bp, i); ctrl = MACB_BIT(TX_USED); - desc = macb_tx_desc(queue, entry); + desc = macb_tx_desc(queue, i); desc->ctrl = ctrl; if (lso_ctrl) { @@ -2142,16 +2108,15 @@ static unsigned int macb_tx_map(struct macb *bp, do { i--; - entry = macb_tx_ring_wrap(bp, i); - tx_skb = &queue->tx_skb[entry]; - desc = macb_tx_desc(queue, entry); + tx_skb = macb_tx_skb(queue, i); + desc = macb_tx_desc(queue, i); ctrl = (u32)tx_skb->size; if (eof) { ctrl |= MACB_BIT(TX_LAST); eof = 0; } - if (unlikely(entry == (bp->tx_ring_size - 1))) + if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1)) ctrl |= MACB_BIT(TX_WRAP); /* First descriptor is header descriptor */ @@ -2179,7 +2144,7 @@ static unsigned int macb_tx_map(struct macb *bp, queue->tx_head = tx_head; - return count; + return 0; dma_error: netdev_err(bp->dev, "TX DMA map failed\n"); @@ -2190,7 +2155,7 @@ dma_error: macb_tx_unmap(bp, tx_skb, 0); } - return 0; + return -ENOMEM; } static netdev_features_t macb_features_check(struct sk_buff *skb, @@ -2318,11 +2283,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -#ifdef CONFIG_MACB_USE_HWSTAMP - if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - (bp->hw_dma_cap & HW_DMA_CAP_PTP)) + if (macb_dma_ptp(bp) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; -#endif is_lso = (skb_shinfo(skb)->gso_size != 0); @@ -2339,7 +2302,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } } else - hdrlen = min(skb_headlen(skb), bp->max_tx_length); + hdrlen = umin(skb_headlen(skb), bp->max_tx_length); #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, @@ -2378,7 +2341,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Map socket buffer for DMA transfer */ - if (!macb_tx_map(bp, queue, skb, hdrlen)) { + if (macb_tx_map(bp, queue, skb, hdrlen)) { dev_kfree_skb_any(skb); goto unlock; } @@ -2799,14 +2762,10 @@ static void macb_configure_dma(struct macb *bp) dmacfg &= ~GEM_BIT(TXCOEN); dmacfg &= ~GEM_BIT(ADDR64); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (bp->hw_dma_cap & HW_DMA_CAP_64B) + if (macb_dma64(bp)) dmacfg |= GEM_BIT(ADDR64); -#endif -#ifdef CONFIG_MACB_USE_HWSTAMP - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) + if (macb_dma_ptp(bp)) dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); -#endif netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", dmacfg); gem_writel(bp, DMACFG, dmacfg); @@ -2821,7 +2780,11 @@ static void macb_init_hw(struct macb *bp) macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); - config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ + /* Make eth data aligned. + * If RSC capable, that offset is ignored by HW. + */ + if (!(bp->caps & MACB_CAPS_RSC)) + config |= MACB_BF(RBOF, NET_IP_ALIGN); config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ if (bp->caps & MACB_CAPS_JUMBO) config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ @@ -2998,7 +2961,11 @@ static int macb_open(struct net_device *dev) macb_init_hw(bp); - err = phy_power_on(bp->sgmii_phy); + err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface); + if (err) + goto reset_hw; + + err = phy_power_on(bp->phy); if (err) goto reset_hw; @@ -3014,7 +2981,7 @@ static int macb_open(struct net_device *dev) return 0; phy_off: - phy_power_off(bp->sgmii_phy); + phy_power_off(bp->phy); reset_hw: macb_reset_hw(bp); @@ -3046,7 +3013,7 @@ static int macb_close(struct net_device *dev) phylink_stop(bp->phylink); phylink_disconnect_phy(bp->phylink); - phy_power_off(bp->sgmii_phy); + phy_power_off(bp->phy); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -3582,7 +3549,7 @@ static int gem_get_ts_info(struct net_device *dev, { struct macb *bp = netdev_priv(dev); - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { + if (!macb_dma_ptp(bp)) { ethtool_op_get_ts_info(dev, info); return 0; } @@ -4108,6 +4075,8 @@ static int macb_taprio_setup_replace(struct net_device *ndev, struct macb *bp = netdev_priv(ndev); struct ethtool_link_ksettings kset; struct macb_queue *queue; + u32 queue_mask; + u8 queue_id; size_t i; int err; @@ -4159,8 +4128,9 @@ static int macb_taprio_setup_replace(struct net_device *ndev, goto cleanup; } - /* gate_mask must not select queues outside the valid queue_mask */ - if (entry->gate_mask & ~bp->queue_mask) { + /* gate_mask must not select queues outside the valid queues */ + queue_id = order_base_2(entry->gate_mask); + if (queue_id >= bp->num_queues) { netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n", i, entry->gate_mask, bp->num_queues); err = -EINVAL; @@ -4194,7 +4164,7 @@ static int macb_taprio_setup_replace(struct net_device *ndev, goto cleanup; } - enst_queue[i].queue_id = order_base_2(entry->gate_mask); + enst_queue[i].queue_id = queue_id; enst_queue[i].start_time_mask = (start_time_sec << GEM_START_TIME_SEC_OFFSET) | start_time_nsec; @@ -4222,8 +4192,9 @@ static int macb_taprio_setup_replace(struct net_device *ndev, /* All validations passed - proceed with hardware configuration */ scoped_guard(spinlock_irqsave, &bp->lock) { /* Disable ENST queues if running before configuring */ + queue_mask = BIT_U32(bp->num_queues) - 1; gem_writel(bp, ENST_CONTROL, - bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); for (i = 0; i < conf->num_entries; i++) { queue = &bp->queues[enst_queue[i].queue_id]; @@ -4252,15 +4223,16 @@ static void macb_taprio_destroy(struct net_device *ndev) { struct macb *bp = netdev_priv(ndev); struct macb_queue *queue; - u32 enst_disable_mask; + u32 queue_mask; unsigned int q; netdev_reset_tc(ndev); - enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET; + queue_mask = BIT_U32(bp->num_queues) - 1; scoped_guard(spinlock_irqsave, &bp->lock) { /* Single disable command for all queues */ - gem_writel(bp, ENST_CONTROL, enst_disable_mask); + gem_writel(bp, ENST_CONTROL, + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); /* Clear all queue ENST registers in batch */ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -4364,13 +4336,15 @@ static void macb_configure_caps(struct macb *bp, dcfg = gem_readl(bp, DCFG2); if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) bp->caps |= MACB_CAPS_FIFO_MODE; + if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6))) + bp->caps |= MACB_CAPS_RSC; if (gem_has_ptp(bp)) { if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) dev_err(&bp->pdev->dev, "GEM doesn't support hardware ptp.\n"); else { #ifdef CONFIG_MACB_USE_HWSTAMP - bp->hw_dma_cap |= HW_DMA_CAP_PTP; + bp->caps |= MACB_CAPS_DMA_PTP; bp->ptp_info = &gem_ptp_info; #endif } @@ -4383,26 +4357,25 @@ static void macb_configure_caps(struct macb *bp, dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); } -static void macb_probe_queues(void __iomem *mem, - bool native_io, - unsigned int *queue_mask, - unsigned int *num_queues) +static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io) { - *queue_mask = 0x1; - *num_queues = 1; + /* BIT(0) is never set but queue 0 always exists. */ + unsigned int queue_mask = 0x1; - /* is it macb or gem ? - * - * We need to read directly from the hardware here because - * we are early in the probe process and don't have the - * MACB_CAPS_MACB_IS_GEM flag positioned - */ - if (!hw_is_gem(mem, native_io)) - return; + /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */ + if (hw_is_gem(mem, native_io)) { + if (native_io) + queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF; + else + queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF; - /* bit 0 is never set but queue 0 always exists */ - *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; - *num_queues = hweight32(*queue_mask); + if (fls(queue_mask) != ffz(queue_mask)) { + dev_err(dev, "queue mask %#x has a hole\n", queue_mask); + return -EINVAL; + } + } + + return hweight32(queue_mask); } static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, @@ -4520,10 +4493,7 @@ static int macb_init(struct platform_device *pdev) * register mapping but we don't want to test the queue index then * compute the corresponding register offset at run time. */ - for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { - if (!(bp->queue_mask & (1 << hw_q))) - continue; - + for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) { queue = &bp->queues[q]; queue->bp = bp; spin_lock_init(&queue->tx_ptr_lock); @@ -4594,8 +4564,11 @@ static int macb_init(struct platform_device *pdev) /* Set features */ dev->hw_features = NETIF_F_SG; - /* Check LSO capability */ - if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) + /* Check LSO capability; runtime detection can be overridden by a cap + * flag if the hardware is known to be buggy + */ + if (!(bp->caps & MACB_CAPS_NO_LSO) && + GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) dev->hw_features |= MACB_NETIF_LSO; /* Checksum offload is only available on gem with packet buffer */ @@ -4614,8 +4587,8 @@ static int macb_init(struct platform_device *pdev) * each 4-tuple define requires 1 T2 screener reg + 3 compare regs */ reg = gem_readl(bp, DCFG8); - bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), - GEM_BFEXT(T2SCR, reg)); + bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3), + GEM_BFEXT(T2SCR, reg)); INIT_LIST_HEAD(&bp->rx_fs_list.list); if (bp->max_tuples > 0) { /* also needs one ethtype match to check IPv4 */ @@ -5168,13 +5141,13 @@ static int init_reset_optional(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Ensure PHY device used in SGMII mode is ready */ - bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); + bp->phy = devm_phy_optional_get(&pdev->dev, NULL); - if (IS_ERR(bp->sgmii_phy)) - return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy), + if (IS_ERR(bp->phy)) + return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy), "failed to get SGMII PHY\n"); - ret = phy_init(bp->sgmii_phy); + ret = phy_init(bp->phy); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to init SGMII PHY\n"); @@ -5203,7 +5176,7 @@ static int init_reset_optional(struct platform_device *pdev) /* Fully reset controller at hardware level if mapped in device tree */ ret = device_reset_optional(&pdev->dev); if (ret) { - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); return dev_err_probe(&pdev->dev, ret, "failed to reset controller"); } @@ -5211,8 +5184,30 @@ static int init_reset_optional(struct platform_device *pdev) err_out_phy_exit: if (ret) - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); + + return ret; +} + +static int eyeq5_init(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + struct device *dev = &pdev->dev; + int ret; + + bp->phy = devm_phy_get(dev, NULL); + if (IS_ERR(bp->phy)) + return dev_err_probe(dev, PTR_ERR(bp->phy), + "failed to get PHY\n"); + + ret = phy_init(bp->phy); + if (ret) + return dev_err_probe(dev, ret, "failed to init PHY\n"); + ret = macb_init(pdev); + if (ret) + phy_exit(bp->phy); return ret; } @@ -5370,6 +5365,17 @@ static const struct macb_config versal_config = { .usrio = &macb_default_usrio, }; +static const struct macb_config eyeq5_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE | + MACB_CAPS_NO_LSO, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = eyeq5_init, + .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, +}; + static const struct macb_config raspberrypi_rp1_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | MACB_CAPS_JUMBO | @@ -5401,6 +5407,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "microchip,mpfs-macb", .data = &mpfs_config }, { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config }, { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config }, + { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config }, { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config }, { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "xlnx,zynq-gem", .data = &zynq_config }, @@ -5424,21 +5431,17 @@ static const struct macb_config default_gem_config = { static int macb_probe(struct platform_device *pdev) { const struct macb_config *macb_config = &default_gem_config; - int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **, struct clk **, - struct clk **) = macb_config->clk_init; - int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; struct clk *tsu_clk = NULL; - unsigned int queue_mask, num_queues; - bool native_io; phy_interface_t interface; struct net_device *dev; struct resource *regs; u32 wtrmrk_rst_val; void __iomem *mem; struct macb *bp; + int num_queues; + bool native_io; int err, val; mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); @@ -5449,14 +5452,11 @@ static int macb_probe(struct platform_device *pdev) const struct of_device_id *match; match = of_match_node(macb_dt_ids, np); - if (match && match->data) { + if (match && match->data) macb_config = match->data; - clk_init = macb_config->clk_init; - init = macb_config->init; - } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); + err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); if (err) return err; @@ -5467,7 +5467,12 @@ static int macb_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); native_io = hw_is_native_io(mem); - macb_probe_queues(mem, native_io, &queue_mask, &num_queues); + num_queues = macb_probe_queues(&pdev->dev, mem, native_io); + if (num_queues < 0) { + err = num_queues; + goto err_disable_clocks; + } + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); if (!dev) { err = -ENOMEM; @@ -5491,16 +5496,13 @@ static int macb_probe(struct platform_device *pdev) bp->macb_reg_writel = hw_writel; } bp->num_queues = num_queues; - bp->queue_mask = queue_mask; - if (macb_config) - bp->dma_burst_length = macb_config->dma_burst_length; + bp->dma_burst_length = macb_config->dma_burst_length; bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; bp->rx_clk = rx_clk; bp->tsu_clk = tsu_clk; - if (macb_config) - bp->jumbo_max_len = macb_config->jumbo_max_len; + bp->jumbo_max_len = macb_config->jumbo_max_len; if (!hw_is_gem(bp->regs, bp->native_io)) bp->max_tx_length = MACB_MAX_TX_LEN; @@ -5546,7 +5548,7 @@ static int macb_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to set DMA mask\n"); goto err_out_free_netdev; } - bp->hw_dma_cap |= HW_DMA_CAP_64B; + bp->caps |= MACB_CAPS_DMA_64B; } #endif platform_set_drvdata(pdev, dev); @@ -5594,7 +5596,7 @@ static int macb_probe(struct platform_device *pdev) bp->phy_interface = interface; /* IP specific init */ - err = init(pdev); + err = macb_config->init(pdev); if (err) goto err_out_free_netdev; @@ -5616,7 +5618,6 @@ static int macb_probe(struct platform_device *pdev) macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), dev->base_addr, dev->irq, dev->dev_addr); - pm_runtime_mark_last_busy(&bp->pdev->dev); pm_runtime_put_autosuspend(&bp->pdev->dev); return 0; @@ -5626,7 +5627,7 @@ err_out_unregister_mdio: mdiobus_free(bp->mii_bus); err_out_phy_exit: - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); err_out_free_netdev: free_netdev(dev); @@ -5650,7 +5651,7 @@ static void macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); unregister_netdev(dev); - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); @@ -5677,7 +5678,7 @@ static int __maybe_unused macb_suspend(struct device *dev) u32 tmp; if (!device_may_wakeup(&bp->dev->dev)) - phy_exit(bp->sgmii_phy); + phy_exit(bp->phy); if (!netif_running(netdev)) return 0; @@ -5806,7 +5807,7 @@ static int __maybe_unused macb_resume(struct device *dev) int err; if (!device_may_wakeup(&bp->dev->dev)) - phy_init(bp->sgmii_phy); + phy_init(bp->phy); if (!netif_running(netdev)) return 0; diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index a63bf29c4fa8..c9e77819196e 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -28,14 +28,16 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, struct macb_dma_desc *desc) { - if (bp->hw_dma_cap == HW_DMA_CAP_PTP) - return (struct macb_dma_desc_ptp *) - ((u8 *)desc + sizeof(struct macb_dma_desc)); - if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) + if (!macb_dma_ptp(bp)) + return NULL; + + if (macb_dma64(bp)) return (struct macb_dma_desc_ptp *) ((u8 *)desc + sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64)); - return NULL; + else + return (struct macb_dma_desc_ptp *) + ((u8 *)desc + sizeof(struct macb_dma_desc)); } static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, @@ -380,7 +382,7 @@ int gem_get_hwtst(struct net_device *dev, struct macb *bp = netdev_priv(dev); *tstamp_config = bp->tstamp_config; - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) + if (!macb_dma_ptp(bp)) return -EOPNOTSUPP; return 0; @@ -407,7 +409,7 @@ int gem_set_hwtst(struct net_device *dev, struct macb *bp = netdev_priv(dev); u32 regval; - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) + if (!macb_dma_ptp(bp)) return -EOPNOTSUPP; switch (tstamp_config->tx_type) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8e2fcec26ea1..0732440eeacd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2107,20 +2107,16 @@ liquidio_get_stats64(struct net_device *netdev, lstats->tx_fifo_errors; } -/** - * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl - * @netdev: network device - * @ifr: interface request - */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +static int liquidio_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *conf, + struct netlink_ext_ack *extack) { - struct hwtstamp_config conf; struct lio *lio = GET_LIO(netdev); - if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) - return -EFAULT; + if (!lio->oct_dev->ptp_enable) + return -EOPNOTSUPP; - switch (conf.tx_type) { + switch (conf->tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; @@ -2128,7 +2124,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (conf.rx_filter) { + switch (conf->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -2146,39 +2142,32 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - conf.rx_filter = HWTSTAMP_FILTER_ALL; + conf->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + if (conf->rx_filter == HWTSTAMP_FILTER_ALL) ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); else ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); - return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; + return 0; } -/** - * liquidio_ioctl - ioctl handler - * @netdev: network device - * @ifr: interface request - * @cmd: command - */ -static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int liquidio_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *conf) { struct lio *lio = GET_LIO(netdev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (lio->oct_dev->ptp_enable) - return hwtstamp_ioctl(netdev, ifr); - fallthrough; - default: - return -EOPNOTSUPP; - } + /* TX timestamping is technically always on */ + conf->tx_type = HWTSTAMP_TX_ON; + conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return 0; } /** @@ -3227,7 +3216,6 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, .ndo_set_vf_mac = liquidio_set_vf_mac, @@ -3238,6 +3226,8 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, .ndo_get_port_parent_id = liquidio_get_port_parent_id, + .ndo_hwtstamp_get = liquidio_hwtstamp_get, + .ndo_hwtstamp_set = liquidio_hwtstamp_set, }; /** diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 3230dff5ba05..e02942dbbcce 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1236,20 +1236,13 @@ liquidio_get_stats64(struct net_device *netdev, lstats->tx_carrier_errors; } -/** - * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl - * @netdev: network device - * @ifr: interface request - */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +static int liquidio_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *conf, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); - struct hwtstamp_config conf; - - if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) - return -EFAULT; - switch (conf.tx_type) { + switch (conf->tx_type) { case HWTSTAMP_TX_ON: case HWTSTAMP_TX_OFF: break; @@ -1257,7 +1250,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (conf.rx_filter) { + switch (conf->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -1275,35 +1268,31 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - conf.rx_filter = HWTSTAMP_FILTER_ALL; + conf->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + if (conf->rx_filter == HWTSTAMP_FILTER_ALL) ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); else ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); - return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; + return 0; } -/** - * liquidio_ioctl - ioctl handler - * @netdev: network device - * @ifr: interface request - * @cmd: command - */ -static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int liquidio_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *conf) { - switch (cmd) { - case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); - default: - return -EOPNOTSUPP; - } + struct lio *lio = GET_LIO(netdev); + + /* TX timestamping is techically always on */ + conf->tx_type = HWTSTAMP_TX_ON; + conf->rx_filter = ifstate_check(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED) ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + return 0; } static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) @@ -1881,9 +1870,10 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, + .ndo_hwtstamp_get = liquidio_hwtstamp_get, + .ndo_hwtstamp_set = liquidio_hwtstamp_set, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 393b9951490a..c190fc6538d4 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -690,19 +690,16 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) return IRQ_HANDLED; } -static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, - struct ifreq *rq, int cmd) +static int octeon_mgmt_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct octeon_mgmt *p = netdev_priv(netdev); - struct hwtstamp_config config; - union cvmx_mio_ptp_clock_cfg ptp; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mio_ptp_clock_cfg ptp; bool have_hw_timestamps = false; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - /* Check the status of hardware for tiemstamps */ + /* Check the status of hardware for timestamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); @@ -733,10 +730,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, have_hw_timestamps = true; } - if (!have_hw_timestamps) + if (!have_hw_timestamps) { + NL_SET_ERR_MSG_MOD(extack, "HW doesn't support timestamping"); return -EINVAL; + } - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -744,7 +743,7 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: p->has_rx_tstamp = false; rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); @@ -766,33 +765,34 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - p->has_rx_tstamp = have_hw_timestamps; - config.rx_filter = HWTSTAMP_FILTER_ALL; - if (p->has_rx_tstamp) { - rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); - rxx_frm_ctl.s.ptp_mode = 1; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - } + p->has_rx_tstamp = true; + config->rx_filter = HWTSTAMP_FILTER_ALL; + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); break; default: return -ERANGE; } - if (copy_to_user(rq->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } -static int octeon_mgmt_ioctl(struct net_device *netdev, - struct ifreq *rq, int cmd) +static int octeon_mgmt_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - switch (cmd) { - case SIOCSHWTSTAMP: - return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); - default: - return phy_do_ioctl(netdev, rq, cmd); - } + struct octeon_mgmt *p = netdev_priv(netdev); + + /* Check the status of hardware for timestamps */ + if (!OCTEON_IS_MODEL(OCTEON_CN6XXX)) + return -EINVAL; + + config->tx_type = HWTSTAMP_TX_ON; + config->rx_filter = p->has_rx_tstamp ? + HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static void octeon_mgmt_disable_link(struct octeon_mgmt *p) @@ -1370,11 +1370,13 @@ static const struct net_device_ops octeon_mgmt_ops = { .ndo_start_xmit = octeon_mgmt_xmit, .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_eth_ioctl = octeon_mgmt_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, #endif + .ndo_hwtstamp_get = octeon_mgmt_hwtstamp_get, + .ndo_hwtstamp_set = octeon_mgmt_hwtstamp_set, }; static int octeon_mgmt_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index fc6053414b7d..413028bdcacb 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -541,21 +541,11 @@ static int nicvf_get_rxfh_fields(struct net_device *dev, return 0; } -static int nicvf_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, u32 *rules) +static u32 nicvf_get_rx_ring_count(struct net_device *dev) { struct nicvf *nic = netdev_priv(dev); - int ret = -EOPNOTSUPP; - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = nic->rx_queues; - ret = 0; - break; - default: - break; - } - return ret; + return nic->rx_queues; } static int nicvf_set_rxfh_fields(struct net_device *dev, @@ -861,7 +851,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, .set_ringparam = nicvf_set_ringparam, - .get_rxnfc = nicvf_get_rxnfc, + .get_rx_ring_count = nicvf_get_rx_ring_count, .get_rxfh_key_size = nicvf_get_rxfh_key_size, .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, .get_rxfh = nicvf_get_rxfh, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 1be2dc40a1a6..0b6e30a8feb0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1899,18 +1899,18 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) } } -static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +static int nicvf_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; struct nicvf *nic = netdev_priv(netdev); - if (!nic->ptp_clock) + if (!nic->ptp_clock) { + NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported"); return -ENODEV; + } - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -1918,7 +1918,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: nic->hw_rx_tstamp = false; break; @@ -1937,7 +1937,7 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: nic->hw_rx_tstamp = true; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; @@ -1946,20 +1946,24 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (netif_running(netdev)) nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp); - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } -static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +static int nicvf_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - switch (cmd) { - case SIOCSHWTSTAMP: - return nicvf_config_hwtstamp(netdev, req); - default: - return -EOPNOTSUPP; - } + struct nicvf *nic = netdev_priv(netdev); + + if (!nic->ptp_clock) + return -ENODEV; + + /* TX timestamping is technically always on */ + config->tx_type = HWTSTAMP_TX_ON; + config->rx_filter = nic->hw_rx_tstamp ? + HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; } static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, @@ -2081,8 +2085,9 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_eth_ioctl = nicvf_ioctl, .ndo_set_rx_mode = nicvf_set_rx_mode, + .ndo_hwtstamp_get = nicvf_hwtstamp_get, + .ndo_hwtstamp_set = nicvf_hwtstamp_set, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0d85198fb03d..f20f4bc58492 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -674,7 +674,7 @@ struct port_info { struct cxgb_fcoe fcoe; #endif /* CONFIG_CHELSIO_T4_FCOE */ bool rxtstamp; /* Enable TS */ - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; bool ptp_enable; struct sched_table *sched_tbl; u32 eth_flags; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 392723ef14e5..66b8854e059f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3042,12 +3042,87 @@ static void cxgb_get_stats(struct net_device *dev, ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; } +static int cxgb_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct port_info *pi = netdev_priv(dev); + + *config = pi->tstamp_config; + return 0; +} + +static int cxgb_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (is_t4(adapter->params.chip)) { + /* For T4 Adapters */ + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pi->rxtstamp = false; + break; + case HWTSTAMP_FILTER_ALL: + pi->rxtstamp = true; + break; + default: + return -ERANGE; + } + pi->tstamp_config = *config; + return 0; + } + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pi->rxtstamp = false; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L4); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + cxgb4_ptprx_timestamping(pi, pi->port_id, PTP_TS_L2_L4); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pi->rxtstamp = true; + break; + default: + return -ERANGE; + } + + if (config->tx_type == HWTSTAMP_TX_OFF && + config->rx_filter == HWTSTAMP_FILTER_NONE) { + if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) + pi->ptp_enable = false; + } + + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + if (cxgb4_ptp_redirect_rx_packet(adapter, pi) >= 0) + pi->ptp_enable = true; + } + pi->tstamp_config = *config; + return 0; +} + static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { unsigned int mbox; int ret = 0, prtad, devad; struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; switch (cmd) { @@ -3076,81 +3151,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, data->reg_num, data->val_in); break; - case SIOCGHWTSTAMP: - return copy_to_user(req->ifr_data, &pi->tstamp_config, - sizeof(pi->tstamp_config)) ? - -EFAULT : 0; - case SIOCSHWTSTAMP: - if (copy_from_user(&pi->tstamp_config, req->ifr_data, - sizeof(pi->tstamp_config))) - return -EFAULT; - - if (!is_t4(adapter->params.chip)) { - switch (pi->tstamp_config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (pi->tstamp_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - pi->rxtstamp = false; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - cxgb4_ptprx_timestamping(pi, pi->port_id, - PTP_TS_L4); - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - cxgb4_ptprx_timestamping(pi, pi->port_id, - PTP_TS_L2_L4); - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - pi->rxtstamp = true; - break; - default: - pi->tstamp_config.rx_filter = - HWTSTAMP_FILTER_NONE; - return -ERANGE; - } - - if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) && - (pi->tstamp_config.rx_filter == - HWTSTAMP_FILTER_NONE)) { - if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0) - pi->ptp_enable = false; - } - - if (pi->tstamp_config.rx_filter != - HWTSTAMP_FILTER_NONE) { - if (cxgb4_ptp_redirect_rx_packet(adapter, - pi) >= 0) - pi->ptp_enable = true; - } - } else { - /* For T4 Adapters */ - switch (pi->tstamp_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - pi->rxtstamp = false; - break; - case HWTSTAMP_FILTER_ALL: - pi->rxtstamp = true; - break; - default: - pi->tstamp_config.rx_filter = - HWTSTAMP_FILTER_NONE; - return -ERANGE; - } - } - return copy_to_user(req->ifr_data, &pi->tstamp_config, - sizeof(pi->tstamp_config)) ? - -EFAULT : 0; default: return -EOPNOTSUPP; } @@ -3485,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) struct adapter *adap = pi->adapter; struct ch_sched_queue qe = { 0 }; struct ch_sched_params p = { 0 }; - struct sched_class *e; + struct ch_sched_class *e; u32 req_rate; int err = 0; @@ -3875,6 +3875,8 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_setup_tc = cxgb_setup_tc, .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, + .ndo_hwtstamp_get = cxgb_hwtstamp_get, + .ndo_hwtstamp_set = cxgb_hwtstamp_set, }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 0765d000eaef..e2b5554531b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -161,20 +161,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, static void cxgb4_process_flow_match(struct net_device *dev, struct flow_rule *rule, + u16 addr_type, struct ch_filter_specification *fs) { - u16 addr_type = 0; - - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_match_control match; - - flow_rule_match_control(rule, &match); - addr_type = match.key->addr_type; - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; - } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -327,9 +316,6 @@ static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack, return -EOPNOTSUPP; } - if (flow_rule_match_has_control_flags(rule, extack)) - return -EOPNOTSUPP; - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -858,6 +844,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, { struct adapter *adap = netdev2adap(dev); struct filter_ctx ctx; + u16 addr_type = 0; u8 inet_family; int fidx, ret; @@ -867,7 +854,28 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, if (cxgb4_validate_flow_match(extack, rule)) return -EOPNOTSUPP; - cxgb4_process_flow_match(dev, rule, fs); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { + fs->val.frag = match.key->flags & FLOW_DIS_IS_FRAGMENT; + fs->mask.frag = true; + } + + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, + match.mask->flags, extack)) + return -EOPNOTSUPP; + + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + + cxgb4_process_flow_match(dev, rule, addr_type, fs); cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index 1672d3afe5be..f8dcf0b4abcd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -56,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct flow_action_entry *entry; struct ch_sched_queue qe; - struct sched_class *e; + struct ch_sched_class *e; u64 max_link_rate; u32 i, speed; int ret; @@ -180,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); struct flow_action_entry *entry; - struct sched_class *e; + struct ch_sched_class *e; int ret; u32 i; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index 338b04f339b3..a2dcd2e24263 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev, struct cxgb4_tc_port_mqprio *tc_port_mqprio; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - struct sched_class *e; + struct ch_sched_class *e; int ret; u8 i; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index a1b14468d1ff..38a30aeee122 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, { struct adapter *adap = pi->adapter; struct sched_table *s = pi->sched_tbl; - struct sched_class *e; + struct ch_sched_class *e; int err = 0; e = &s->tab[p->u.params.class]; @@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi, const u32 val) { struct sched_table *s = pi->sched_tbl; - struct sched_class *e, *end; + struct ch_sched_class *e, *end; void *found = NULL; /* Look for an entry with matching @val */ @@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi, return found; } -struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, - struct ch_sched_queue *p) +struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p) { struct port_info *pi = netdev2pinfo(dev); struct sched_queue_entry *qe = NULL; @@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) struct sched_queue_entry *qe = NULL; struct adapter *adap = pi->adapter; struct sge_eth_txq *txq; - struct sched_class *e; + struct ch_sched_class *e; int err = 0; if (p->queue < 0 || p->queue >= pi->nqsets) @@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) struct sched_queue_entry *qe = NULL; struct adapter *adap = pi->adapter; struct sge_eth_txq *txq; - struct sched_class *e; + struct ch_sched_class *e; unsigned int qid; int err = 0; @@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) { struct sched_flowc_entry *fe = NULL; struct adapter *adap = pi->adapter; - struct sched_class *e; + struct ch_sched_class *e; int err = 0; if (p->tid < 0 || p->tid >= adap->tids.neotids) @@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p) struct sched_table *s = pi->sched_tbl; struct sched_flowc_entry *fe = NULL; struct adapter *adap = pi->adapter; - struct sched_class *e; + struct ch_sched_class *e; int err = 0; if (p->tid < 0 || p->tid >= adap->tids.neotids) @@ -322,7 +322,7 @@ out_err: } static void t4_sched_class_unbind_all(struct port_info *pi, - struct sched_class *e, + struct ch_sched_class *e, enum sched_bind_type type) { if (!e) @@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, } /* If @p is NULL, fetch any available unused class */ -static struct sched_class *t4_sched_class_lookup(struct port_info *pi, - const struct ch_sched_params *p) +static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi, + const struct ch_sched_params *p) { struct sched_table *s = pi->sched_tbl; - struct sched_class *found = NULL; - struct sched_class *e, *end; + struct ch_sched_class *found = NULL; + struct ch_sched_class *e, *end; if (!p) { /* Get any available unused class */ @@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, return found; } -static struct sched_class *t4_sched_class_alloc(struct port_info *pi, - struct ch_sched_params *p) +static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi, + struct ch_sched_params *p) { - struct sched_class *e = NULL; + struct ch_sched_class *e = NULL; u8 class_id; int err; @@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, * scheduling class with matching @p is found, then the matching class is * returned. */ -struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, - struct ch_sched_params *p) +struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p) { struct port_info *pi = netdev2pinfo(dev); u8 class_id; @@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid) struct port_info *pi = netdev2pinfo(dev); struct sched_table *s = pi->sched_tbl; struct ch_sched_params p; - struct sched_class *e; + struct ch_sched_class *e; u32 speed; int ret; @@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid) } } -static void t4_sched_class_free(struct net_device *dev, struct sched_class *e) +static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e) { struct port_info *pi = netdev2pinfo(dev); @@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size) s->sched_size = sched_size; for (i = 0; i < s->sched_size; i++) { - memset(&s->tab[i], 0, sizeof(struct sched_class)); + memset(&s->tab[i], 0, sizeof(struct ch_sched_class)); s->tab[i].idx = i; s->tab[i].state = SCHED_STATE_UNUSED; INIT_LIST_HEAD(&s->tab[i].entry_list); @@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap) continue; for (i = 0; i < s->sched_size; i++) { - struct sched_class *e; + struct ch_sched_class *e; e = &s->tab[i]; if (e->state == SCHED_STATE_ACTIVE) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 6b3c778815f0..4d3b5a757536 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -71,7 +71,7 @@ struct sched_flowc_entry { struct ch_sched_flowc param; }; -struct sched_class { +struct ch_sched_class { u8 state; u8 idx; struct ch_sched_params info; @@ -82,7 +82,7 @@ struct sched_class { struct sched_table { /* per port scheduling table */ u8 sched_size; - struct sched_class tab[] __counted_by(sched_size); + struct ch_sched_class tab[] __counted_by(sched_size); }; static inline bool can_sched(struct net_device *dev) @@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id) return true; } -struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, - struct ch_sched_queue *p); +struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev, + struct ch_sched_queue *p); int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type); int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, enum sched_bind_type type); -struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, - struct ch_sched_params *p); +struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p); void cxgb4_sched_class_free(struct net_device *dev, u8 classid); struct sched_table *t4_init_sched(unsigned int size); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 4ee970f3bad6..ee0154337a9c 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1199,12 +1199,12 @@ static struct sock *chtls_recv_sock(struct sock *lsk, struct ipv6_pinfo *newnp = inet6_sk(newsk); struct ipv6_pinfo *np = inet6_sk(lsk); - inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; + newinet->pinet6 = &newtcp6sk->inet6; + newinet->ipv6_fl_list = NULL; memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr; - newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newsk->sk_bound_dev_if = treq->ir_iif; newinet->inet_opt = NULL; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index 4036db466e18..ee19933e2cca 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state) int send_tx_flowc_wr(struct sock *sk, int compl, u32 snd_nxt, u32 rcv_nxt) { - struct flowc_packed { - struct fw_flowc_wr fc; - struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX]; - } __packed sflowc; + DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX); int nparams, paramidx, flowclen16, flowclen; - struct fw_flowc_wr *flowc; struct chtls_sock *csk; struct tcp_sock *tp; csk = rcu_dereference_sk_user_data(sk); tp = tcp_sk(sk); - memset(&sflowc, 0, sizeof(sflowc)); - flowc = &sflowc.fc; #define FLOWC_PARAM(__m, __v) \ do { \ diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 6e4f17142519..846d58c769ea 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -41,7 +41,7 @@ module_param(tx_flow, int, 0); module_param(rx_flow, int, 0); module_param(copy_thresh, int, 0); module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ -module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ +module_param(rx_timeout, int, 0); /* Rx DMA wait time in 640ns increments */ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ @@ -262,7 +262,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) np->link_status = 0; /* Set media and reset PHY */ if (np->phy_media) { - /* default Auto-Negotiation for fiber deivices */ + /* default Auto-Negotiation for fiber devices */ if (np->an_enable == 2) { np->an_enable = 1; } @@ -887,7 +887,7 @@ tx_error (struct net_device *dev, int tx_status) frame_id = (tx_status & 0xffff0000); printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", dev->name, tx_status, frame_id); - /* Ttransmit Underrun */ + /* Transmit Underrun */ if (tx_status & 0x10) { dev->stats.tx_fifo_errors++; dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); @@ -1083,7 +1083,7 @@ rio_error (struct net_device *dev, int int_status) get_stats (dev); } - /* PCI Error, a catastronphic error related to the bus interface + /* PCI Error, a catastrophic error related to the bus interface occurs, set GlobalReset and HostReset to reset. */ if (int_status & HostError) { printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 4788cc94639d..9ebf7a6db93e 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -270,7 +270,7 @@ enum _pcs_reg { PCS_ESR = 15, }; -/* IEEE Extened Status Register */ +/* IEEE Extended Status Register */ enum _mii_esr { MII_ESR_1000BX_FD = 0x8000, MII_ESR_1000BX_HD = 0x4000, diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index f188fba021a6..03e19aea9ea4 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -176,7 +176,7 @@ struct tsnep_adapter { struct tsnep_gcl gcl[2]; int next_gcl; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; /* ptp clock lock */ @@ -203,7 +203,11 @@ extern const struct ethtool_ops tsnep_ethtool_ops; int tsnep_ptp_init(struct tsnep_adapter *adapter); void tsnep_ptp_cleanup(struct tsnep_adapter *adapter); -int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int tsnep_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int tsnep_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); int tsnep_tc_init(struct tsnep_adapter *adapter); void tsnep_tc_cleanup(struct tsnep_adapter *adapter); diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index eba73246f986..b118407c30e8 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -2168,16 +2168,6 @@ static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); } -static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, - int cmd) -{ - if (!netif_running(netdev)) - return -EINVAL; - if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) - return tsnep_ptp_ioctl(netdev, ifr, cmd); - return phy_mii_ioctl(netdev->phydev, ifr, cmd); -} - static void tsnep_netdev_set_multicast(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); @@ -2384,7 +2374,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, .ndo_start_xmit = tsnep_netdev_xmit_frame, - .ndo_eth_ioctl = tsnep_netdev_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = tsnep_netdev_set_multicast, .ndo_get_stats64 = tsnep_netdev_get_stats64, .ndo_set_mac_address = tsnep_netdev_set_mac_address, @@ -2394,6 +2384,8 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_bpf = tsnep_netdev_bpf, .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup, + .ndo_hwtstamp_get = tsnep_ptp_hwtstamp_get, + .ndo_hwtstamp_set = tsnep_ptp_hwtstamp_set, }; static int tsnep_mac_init(struct tsnep_adapter *adapter) diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c index 54fbf0126815..ae1308eb813d 100644 --- a/drivers/net/ethernet/engleder/tsnep_ptp.c +++ b/drivers/net/ethernet/engleder/tsnep_ptp.c @@ -19,57 +19,53 @@ void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time) *time = (((u64)high) << 32) | ((u64)low); } -int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +int tsnep_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { struct tsnep_adapter *adapter = netdev_priv(netdev); - struct hwtstamp_config config; - - if (!ifr) - return -EINVAL; - - if (cmd == SIOCSHWTSTAMP) { - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - memcpy(&adapter->hwtstamp_config, &config, - sizeof(adapter->hwtstamp_config)); + + *config = adapter->hwtstamp_config; + return 0; +} + +int tsnep_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct tsnep_adapter *adapter = netdev_priv(netdev); + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; } - if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, - sizeof(adapter->hwtstamp_config))) - return -EFAULT; + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + adapter->hwtstamp_config = *config; return 0; } diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 6ac8547ef9b8..3c9961806f75 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -196,7 +196,7 @@ enum intr_status_bits { ERI = 0x00000080, /* receive early int */ CNTOVF = 0x00000040, /* counter overflow */ RBU = 0x00000020, /* receive buffer unavailable */ - TBU = 0x00000010, /* transmit buffer unavilable */ + TBU = 0x00000010, /* transmit buffer unavailable */ TI = 0x00000008, /* transmit interrupt */ RI = 0x00000004, /* receive interrupt */ RxErr = 0x00000002, /* receive error */ @@ -215,7 +215,7 @@ enum rx_mode_bits { CR_W_RXMODEMASK = 0x000000e0, CR_W_PROM = 0x00000080, /* promiscuous mode */ CR_W_AB = 0x00000040, /* accept broadcast */ - CR_W_AM = 0x00000020, /* accept mutlicast */ + CR_W_AM = 0x00000020, /* accept multicast */ CR_W_ARP = 0x00000008, /* receive runt pkt */ CR_W_ALP = 0x00000004, /* receive long pkt */ CR_W_SEP = 0x00000002, /* receive error pkt */ diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index bbef47c3480c..e2a591cf9601 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -28,6 +28,7 @@ config FEC depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select PHYLIB + select FIXED_PHY if M5272 select PAGE_POOL imply PAGE_POOL_STATS imply NET_SELFTESTS diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index d09e456f14c0..ed3fa80af8c3 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -467,6 +467,47 @@ revert_values: return res; } +static void dpaa_get_pause_stats(struct net_device *net_dev, + struct ethtool_pause_stats *s) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + + if (mac_dev->get_pause_stats) + mac_dev->get_pause_stats(mac_dev->fman_mac, s); +} + +static void dpaa_get_rmon_stats(struct net_device *net_dev, + struct ethtool_rmon_stats *s, + const struct ethtool_rmon_hist_range **ranges) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + + if (mac_dev->get_rmon_stats) + mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges); +} + +static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev, + struct ethtool_eth_ctrl_stats *s) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + + if (mac_dev->get_eth_ctrl_stats) + mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s); +} + +static void dpaa_get_eth_mac_stats(struct net_device *net_dev, + struct ethtool_eth_mac_stats *s) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + + if (mac_dev->get_eth_mac_stats) + mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s); +} + const struct ethtool_ops dpaa_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES, @@ -487,4 +528,8 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_ts_info = dpaa_get_ts_info, .get_coalesce = dpaa_get_coalesce, .set_coalesce = dpaa_set_coalesce, + .get_pause_stats = dpaa_get_pause_stats, + .get_rmon_stats = dpaa_get_rmon_stats, + .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats, + .get_eth_mac_stats = dpaa_get_eth_mac_stats, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 00474ed11d53..baab4f1c908d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -711,6 +711,13 @@ static int dpaa2_eth_update_cls_rule(struct net_device *net_dev, return 0; } +static u32 dpaa2_eth_get_rx_ring_count(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + return dpaa2_eth_queue_count(priv); +} + static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -719,9 +726,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, int i, j = 0; switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = dpaa2_eth_queue_count(priv); - break; case ETHTOOL_GRXCLSRLCNT: rxnfc->rule_cnt = 0; rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv); @@ -949,6 +953,7 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .get_strings = dpaa2_eth_get_strings, .get_rxnfc = dpaa2_eth_get_rxnfc, .set_rxnfc = dpaa2_eth_set_rxnfc, + .get_rx_ring_count = dpaa2_eth_get_rx_ring_count, .get_rxfh_fields = dpaa2_eth_get_rxfh_fields, .set_rxfh_fields = dpaa2_eth_set_rxfh_fields, .get_ts_info = dpaa2_eth_get_ts_info, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 0535e92404e3..d5e5800b84ef 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -14,12 +14,21 @@ u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg) { + /* ENETC with pseudo MAC does not have Ethernet MAC + * port registers. + */ + if (enetc_is_pseudo_mac(si)) + return 0; + return enetc_port_rd(&si->hw, reg); } EXPORT_SYMBOL_GPL(enetc_port_mac_rd); void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val) { + if (enetc_is_pseudo_mac(si)) + return; + enetc_port_wr(&si->hw, reg, val); if (si->hw_features & ENETC_SI_F_QBU) enetc_port_wr(&si->hw, reg + si->drvdata->pmac_offset, val); @@ -3367,7 +3376,8 @@ int enetc_hwtstamp_set(struct net_device *ndev, new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: - if (!enetc_si_is_pf(priv->si)) + if (!enetc_si_is_pf(priv->si) || + enetc_is_pseudo_mac(priv->si)) return -EOPNOTSUPP; new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; @@ -3708,6 +3718,13 @@ static const struct enetc_drvdata enetc4_pf_data = { .eth_ops = &enetc4_pf_ethtool_ops, }; +static const struct enetc_drvdata enetc4_ppm_data = { + .sysclk_freq = ENETC_CLK_333M, + .tx_csum = true, + .max_frags = ENETC4_MAX_SKB_FRAGS, + .eth_ops = &enetc4_ppm_ethtool_ops, +}; + static const struct enetc_drvdata enetc_vf_data = { .sysclk_freq = ENETC_CLK_400M, .max_frags = ENETC_MAX_SKB_FRAGS, @@ -3727,6 +3744,15 @@ static const struct enetc_platform_info enetc_info[] = { .dev_id = ENETC_DEV_ID_VF, .data = &enetc_vf_data, }, + { + .revision = ENETC_REV_4_3, + .dev_id = NXP_ENETC_PPM_DEV_ID, + .data = &enetc4_ppm_data, + }, + { .revision = ENETC_REV_4_3, + .dev_id = NXP_ENETC_PF_DEV_ID, + .data = &enetc4_pf_data, + }, }; int enetc_get_driver_data(struct enetc_si *si) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index f279fa597991..dce27bd67a7d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -273,6 +273,7 @@ enum enetc_errata { #define ENETC_SI_F_QBV BIT(1) #define ENETC_SI_F_QBU BIT(2) #define ENETC_SI_F_LSO BIT(3) +#define ENETC_SI_F_PPM BIT(4) /* pseudo MAC */ struct enetc_drvdata { u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */ @@ -362,6 +363,11 @@ static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) } } +static inline bool enetc_is_pseudo_mac(struct enetc_si *si) +{ + return si->hw_features & ENETC_SI_F_PPM; +} + #define ENETC_MAX_NUM_TXQS 8 #define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) @@ -534,6 +540,8 @@ int enetc_hwtstamp_set(struct net_device *ndev, extern const struct ethtool_ops enetc_pf_ethtool_ops; extern const struct ethtool_ops enetc4_pf_ethtool_ops; extern const struct ethtool_ops enetc_vf_ethtool_ops; +extern const struct ethtool_ops enetc4_ppm_ethtool_ops; + void enetc_set_ethtool_ops(struct net_device *ndev); void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link); void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv); diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h index 19bf0e89cdc2..3ed0f7a02767 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h @@ -11,6 +11,7 @@ #define NXP_ENETC_VENDOR_ID 0x1131 #define NXP_ENETC_PF_DEV_ID 0xe101 +#define NXP_ENETC_PPM_DEV_ID 0xe110 /**********************Station interface registers************************/ /* Station interface LSO segmentation flag mask register 0/1 */ @@ -115,6 +116,10 @@ #define PMCAPR_HD BIT(8) #define PMCAPR_FP GENMASK(10, 9) +/* Port capability register */ +#define ENETC4_PCAPR 0x4000 +#define PCAPR_LINK_TYPE BIT(4) + /* Port configuration register */ #define ENETC4_PCR 0x4010 #define PCR_HDR_FMT BIT(0) @@ -165,6 +170,9 @@ /* Port MAC 0/1 Maximum Frame Length Register */ #define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400) +/* Port internal MDIO base address, use to access PCS */ +#define ENETC4_PM_IMDIO_BASE 0x5030 + /* Port MAC 0/1 Pause Quanta Register */ #define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400) @@ -193,4 +201,32 @@ #define SSP_1G 2 #define PM_IF_MODE_ENA BIT(15) +/* Port external MDIO Base address, use to access off-chip PHY */ +#define ENETC4_EMDIO_BASE 0x5c00 + +/**********************ENETC Pseudo MAC port registers************************/ +/* Port pseudo MAC receive octets counter (64-bit) */ +#define ENETC4_PPMROCR 0x5080 + +/* Port pseudo MAC receive unicast frame counter register (64-bit) */ +#define ENETC4_PPMRUFCR 0x5088 + +/* Port pseudo MAC receive multicast frame counter register (64-bit) */ +#define ENETC4_PPMRMFCR 0x5090 + +/* Port pseudo MAC receive broadcast frame counter register (64-bit) */ +#define ENETC4_PPMRBFCR 0x5098 + +/* Port pseudo MAC transmit octets counter (64-bit) */ +#define ENETC4_PPMTOCR 0x50c0 + +/* Port pseudo MAC transmit unicast frame counter register (64-bit) */ +#define ENETC4_PPMTUFCR 0x50c8 + +/* Port pseudo MAC transmit multicast frame counter register (64-bit) */ +#define ENETC4_PPMTMFCR 0x50d0 + +/* Port pseudo MAC transmit broadcast frame counter register (64-bit) */ +#define ENETC4_PPMTBFCR 0x50d8 + #endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index 82c443b28b15..498346dd996a 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -41,6 +41,16 @@ static void enetc4_get_port_caps(struct enetc_pf *pf) pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE; } +static void enetc4_get_psi_hw_features(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC4_PCAPR); + if (val & PCAPR_LINK_TYPE) + si->hw_features |= ENETC_SI_F_PPM; +} + static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si, const u8 *addr) { @@ -277,6 +287,7 @@ static int enetc4_pf_struct_init(struct enetc_si *si) pf->ops = &enetc4_pf_ops; enetc4_get_port_caps(pf); + enetc4_get_psi_hw_features(si); return 0; } @@ -589,6 +600,9 @@ static void enetc4_mac_config(struct enetc_pf *pf, unsigned int mode, struct enetc_si *si = pf->si; u32 val; + if (enetc_is_pseudo_mac(si)) + return; + val = enetc_port_mac_rd(si, ENETC4_PM_IF_MODE(0)); val &= ~(PM_IF_MODE_IFMODE | PM_IF_MODE_ENA); @@ -1071,6 +1085,7 @@ static void enetc4_pf_remove(struct pci_dev *pdev) static const struct pci_device_id enetc4_pf_id_table[] = { { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PF_DEV_ID) }, + { PCI_DEVICE(NXP_ENETC_VENDOR_ID, NXP_ENETC_PPM_DEV_ID) }, { 0, } /* End of table. */ }; MODULE_DEVICE_TABLE(pci, enetc4_pf_id_table); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 71d052de669a..fed89d4f1e1d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -435,6 +435,48 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev, } } +static void enetc_ppm_mac_stats(struct enetc_si *si, + struct ethtool_eth_mac_stats *s) +{ + struct enetc_hw *hw = &si->hw; + u64 rufcr, rmfcr, rbfcr; + u64 tufcr, tmfcr, tbfcr; + + rufcr = enetc_port_rd64(hw, ENETC4_PPMRUFCR); + rmfcr = enetc_port_rd64(hw, ENETC4_PPMRMFCR); + rbfcr = enetc_port_rd64(hw, ENETC4_PPMRBFCR); + + tufcr = enetc_port_rd64(hw, ENETC4_PPMTUFCR); + tmfcr = enetc_port_rd64(hw, ENETC4_PPMTMFCR); + tbfcr = enetc_port_rd64(hw, ENETC4_PPMTBFCR); + + s->FramesTransmittedOK = tufcr + tmfcr + tbfcr; + s->FramesReceivedOK = rufcr + rmfcr + rbfcr; + s->OctetsTransmittedOK = enetc_port_rd64(hw, ENETC4_PPMTOCR); + s->OctetsReceivedOK = enetc_port_rd64(hw, ENETC4_PPMROCR); + s->MulticastFramesXmittedOK = tmfcr; + s->BroadcastFramesXmittedOK = tbfcr; + s->MulticastFramesReceivedOK = rmfcr; + s->BroadcastFramesReceivedOK = rbfcr; +} + +static void enetc_ppm_get_eth_mac_stats(struct net_device *ndev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + switch (mac_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + enetc_ppm_mac_stats(priv->si, mac_stats); + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + break; + case ETHTOOL_MAC_STATS_SRC_AGGREGATE: + ethtool_aggregate_mac_stats(ndev, mac_stats); + break; + } +} + static void enetc_get_eth_ctrl_stats(struct net_device *ndev, struct ethtool_eth_ctrl_stats *ctrl_stats) { @@ -591,6 +633,13 @@ done: return enetc_set_fs_entry(si, &rfse, fs->location); } +static u32 enetc_get_rx_ring_count(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + return priv->num_rx_rings; +} + static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, u32 *rule_locs) { @@ -598,9 +647,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, int i, j; switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = priv->num_rx_rings; - break; case ETHTOOL_GRXCLSRLCNT: /* total number of entries */ rxnfc->data = priv->si->num_fs_entries; @@ -639,27 +685,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, return 0; } -/* i.MX95 ENETC does not support RFS table, but we can use ingress port - * filter table to implement Wake-on-LAN filter or drop the matched flow, - * so the implementation will be different from enetc_get_rxnfc() and - * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF. - */ -static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, - u32 *rule_locs) -{ - struct enetc_ndev_priv *priv = netdev_priv(ndev); - - switch (rxnfc->cmd) { - case ETHTOOL_GRXRINGS: - rxnfc->data = priv->num_rx_rings; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) { struct enetc_ndev_priv *priv = netdev_priv(ndev); @@ -894,6 +919,9 @@ static int enetc_get_phc_index_by_pdev(struct enetc_si *si) case ENETC_REV_4_1: devfn = PCI_DEVFN(24, 0); break; + case ENETC_REV_4_3: + devfn = PCI_DEVFN(0, 1); + break; default: return -1; } @@ -1290,6 +1318,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = { .get_rmon_stats = enetc_get_rmon_stats, .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats, .get_eth_mac_stats = enetc_get_eth_mac_stats, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxnfc = enetc_get_rxnfc, .set_rxnfc = enetc_set_rxnfc, .get_rxfh_key_size = enetc_get_rxfh_key_size, @@ -1313,6 +1342,25 @@ const struct ethtool_ops enetc_pf_ethtool_ops = { .get_mm_stats = enetc_get_mm_stats, }; +const struct ethtool_ops enetc4_ppm_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_eth_mac_stats = enetc_ppm_get_eth_mac_stats, + .get_rx_ring_count = enetc_get_rx_ring_count, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_rxfh_fields = enetc_get_rxfh_fields, + .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, + .get_link_ksettings = enetc_get_link_ksettings, + .set_link_ksettings = enetc_set_link_ksettings, + .get_link = ethtool_op_get_link, +}; + const struct ethtool_ops enetc_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | @@ -1322,6 +1370,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = { .get_sset_count = enetc_get_sset_count, .get_strings = enetc_get_strings, .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxnfc = enetc_get_rxnfc, .set_rxnfc = enetc_set_rxnfc, .get_rxfh_indir_size = enetc_get_rxfh_indir_size, @@ -1349,7 +1398,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = { .set_wol = enetc_set_wol, .get_pauseparam = enetc_get_pauseparam, .set_pauseparam = enetc_set_pauseparam, - .get_rxnfc = enetc4_get_rxnfc, + .get_rx_ring_count = enetc_get_rx_ring_count, .get_rxfh_key_size = enetc_get_rxfh_key_size, .get_rxfh_indir_size = enetc_get_rxfh_indir_size, .get_rxfh = enetc_get_rxfh, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 377c96325814..7b882b8921fe 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -378,6 +378,7 @@ enum enetc_bdr_type {TX, RX}; #define EIPBRR0_REVISION GENMASK(15, 0) #define ENETC_REV_1_0 0x0100 #define ENETC_REV_4_1 0X0401 +#define ENETC_REV_4_3 0x0403 #define ENETC_G_EIPBRR1 0x0bfc #define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c index edf14a95cab7..76263b8566bb 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c @@ -109,7 +109,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4; ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | @@ -133,6 +133,9 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->features |= NETIF_F_RXHASH; } + if (!enetc_is_pseudo_mac(si)) + ndev->hw_features |= NETIF_F_LOOPBACK; + /* TODO: currently, i.MX95 ENETC driver does not support advanced features */ if (!is_enetc_rev1(si)) goto end; @@ -173,7 +176,12 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) bus->parent = dev; mdio_priv = bus->priv; mdio_priv->hw = &pf->si->hw; - mdio_priv->mdio_base = ENETC_EMDIO_BASE; + + if (is_enetc_rev1(pf->si)) + mdio_priv->mdio_base = ENETC_EMDIO_BASE; + else + mdio_priv->mdio_base = ENETC4_EMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); err = of_mdiobus_register(bus, np); @@ -218,7 +226,12 @@ static int enetc_imdio_create(struct enetc_pf *pf) bus->phy_mask = ~0; mdio_priv = bus->priv; mdio_priv->hw = &pf->si->hw; - mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + + if (is_enetc_rev1(pf->si)) + mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + else + mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); err = mdiobus_register(bus); diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c index bcb8eefeb93c..443983fdecd9 100644 --- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c +++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c @@ -47,6 +47,13 @@ #define PCS_PROT_SFI BIT(4) #define PCS_PROT_10G_SXGMII BIT(6) +#define IMX94_EXT_PIN_CONTROL 0x10 +#define MAC2_MAC3_SEL BIT(1) + +#define IMX94_NETC_LINK_CFG(a) (0x4c + (a) * 4) +#define NETC_LINK_CFG_MII_PROT GENMASK(3, 0) +#define NETC_LINK_CFG_IO_VAR GENMASK(19, 16) + /* NETC privileged register block register */ #define PRB_NETCRR 0x100 #define NETCRR_SR BIT(0) @@ -59,6 +66,10 @@ /* NETC integrated endpoint register block register */ #define IERB_EMDIOFAUXR 0x344 #define IERB_T0FAUXR 0x444 +#define IERB_ETBCR(a) (0x300c + 0x100 * (a)) +#define IERB_LBCR(a) (0x1010 + 0x40 * (a)) +#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8) + #define IERB_EFAUXR(a) (0x3044 + 0x100 * (a)) #define IERB_VFAUXR(a) (0x4004 + 0x40 * (a)) #define FAUXR_LDID GENMASK(3, 0) @@ -68,6 +79,19 @@ #define IMX95_ENETC1_BUS_DEVFN 0x40 #define IMX95_ENETC2_BUS_DEVFN 0x80 +#define IMX94_ENETC0_BUS_DEVFN 0x100 +#define IMX94_ENETC1_BUS_DEVFN 0x140 +#define IMX94_ENETC2_BUS_DEVFN 0x180 +#define IMX94_TIMER0_BUS_DEVFN 0x1 +#define IMX94_TIMER1_BUS_DEVFN 0x101 +#define IMX94_TIMER2_BUS_DEVFN 0x181 +#define IMX94_ENETC0_LINK 3 +#define IMX94_ENETC1_LINK 4 +#define IMX94_ENETC2_LINK 5 + +#define NETC_ENETC_ID(a) (a) +#define NETC_TIMER_ID(a) (a) + /* Flags for different platforms */ #define NETC_HAS_NETCMIX BIT(0) @@ -192,6 +216,90 @@ static int imx95_netcmix_init(struct platform_device *pdev) return 0; } +static int imx94_enetc_get_link_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse ENETC link number */ + switch (bus_devfn) { + case IMX94_ENETC0_BUS_DEVFN: + return IMX94_ENETC0_LINK; + case IMX94_ENETC1_BUS_DEVFN: + return IMX94_ENETC1_LINK; + case IMX94_ENETC2_BUS_DEVFN: + return IMX94_ENETC2_LINK; + default: + return -EINVAL; + } +} + +static int imx94_link_config(struct netc_blk_ctrl *priv, + struct device_node *np, int link_id) +{ + phy_interface_t interface; + int mii_proto; + u32 val; + + /* The node may be disabled and does not have a 'phy-mode' + * or 'phy-connection-type' property. + */ + if (of_get_phy_mode(np, &interface)) + return 0; + + mii_proto = netc_get_link_mii_protocol(interface); + if (mii_proto < 0) + return mii_proto; + + val = mii_proto & NETC_LINK_CFG_MII_PROT; + if (val == MII_PROT_SERIAL) + val = u32_replace_bits(val, IO_VAR_16FF_16G_SERDES, + NETC_LINK_CFG_IO_VAR); + + netc_reg_write(priv->netcmix, IMX94_NETC_LINK_CFG(link_id), val); + + return 0; +} + +static int imx94_enetc_link_config(struct netc_blk_ctrl *priv, + struct device_node *np) +{ + int link_id = imx94_enetc_get_link_id(np); + + if (link_id < 0) + return link_id; + + return imx94_link_config(priv, np, link_id); +} + +static int imx94_netcmix_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + u32 val; + int err; + + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + err = imx94_enetc_link_config(priv, gchild); + if (err) + return err; + } + } + + /* ENETC 0 and switch port 2 share the same parallel interface. + * Currently, the switch is not supported, so this interface is + * used by ENETC 0 by default. + */ + val = netc_reg_read(priv->netcmix, IMX94_EXT_PIN_CONTROL); + val |= MAC2_MAC3_SEL; + netc_reg_write(priv->netcmix, IMX94_EXT_PIN_CONTROL, val); + + return 0; +} + static bool netc_ierb_is_locked(struct netc_blk_ctrl *priv) { return !!(netc_reg_read(priv->prb, PRB_NETCRR) & NETCRR_LOCK); @@ -217,6 +325,142 @@ static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv) 1000, 100000, true, priv->prb, PRB_NETCRR); } +static int netc_get_phy_addr(struct device_node *np) +{ + struct device_node *mdio_node, *phy_node; + u32 addr = 0; + int err = 0; + + mdio_node = of_get_child_by_name(np, "mdio"); + if (!mdio_node) + return 0; + + phy_node = of_get_next_child(mdio_node, NULL); + if (!phy_node) + goto of_put_mdio_node; + + err = of_property_read_u32(phy_node, "reg", &addr); + if (err) + goto of_put_phy_node; + + if (addr >= PHY_MAX_ADDR) + err = -EINVAL; + +of_put_phy_node: + of_node_put(phy_node); + +of_put_mdio_node: + of_node_put(mdio_node); + + return err ? err : addr; +} + +static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask) +{ + u32 mask = 0; + + for_each_child_of_node_scoped(np, child) { + u32 addr; + int err; + + err = of_property_read_u32(child, "reg", &addr); + if (err) + return err; + + if (addr >= PHY_MAX_ADDR) + return -EINVAL; + + mask |= BIT(addr); + } + + *phy_mask = mask; + + return 0; +} + +static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask) +{ + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,ee00")) + continue; + + return netc_parse_emdio_phy_mask(gchild, phy_mask); + } + } + + return 0; +} + +static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + int bus_devfn, addr, err; + u32 phy_mask = 0; + + err = netc_get_emdio_phy_mask(np, &phy_mask); + if (err) { + dev_err(dev, "Failed to get PHY address mask\n"); + return err; + } + + /* Update the port EMDIO PHY address through parsing phy properties. + * This is needed when using the port EMDIO but it's harmless when + * using the central EMDIO. So apply it on all cases. + */ + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + bus_devfn = netc_of_pci_get_bus_devfn(gchild); + if (bus_devfn < 0) { + dev_err(dev, "Failed to get BDF number\n"); + return bus_devfn; + } + + addr = netc_get_phy_addr(gchild); + if (addr < 0) { + dev_err(dev, "Failed to get PHY address\n"); + return addr; + } + + if (phy_mask & BIT(addr)) { + dev_err(dev, + "Find same PHY address in EMDIO and ENETC node\n"); + return -EINVAL; + } + + /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is + * 0, so no need to set the register. + */ + if (!addr) + continue; + + switch (bus_devfn) { + case IMX95_ENETC0_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(0), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + case IMX95_ENETC1_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(1), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + case IMX95_ENETC2_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(2), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + default: + break; + } + } + } + + return 0; +} + static int imx95_ierb_init(struct platform_device *pdev) { struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); @@ -244,6 +488,155 @@ static int imx95_ierb_init(struct platform_device *pdev) /* NETC TIMER */ netc_reg_write(priv->ierb, IERB_T0FAUXR, 7); + return imx95_enetc_mdio_phyaddr_config(pdev); +} + +static int imx94_get_enetc_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse ENETC offset */ + switch (bus_devfn) { + case IMX94_ENETC0_BUS_DEVFN: + return NETC_ENETC_ID(0); + case IMX94_ENETC1_BUS_DEVFN: + return NETC_ENETC_ID(1); + case IMX94_ENETC2_BUS_DEVFN: + return NETC_ENETC_ID(2); + default: + return -EINVAL; + } +} + +static int imx94_get_timer_id(struct device_node *np) +{ + int bus_devfn = netc_of_pci_get_bus_devfn(np); + + /* Parse NETC PTP timer ID, the timer0 is on bus 0, + * the timer 1 and timer2 is on bus 1. + */ + switch (bus_devfn) { + case IMX94_TIMER0_BUS_DEVFN: + return NETC_TIMER_ID(0); + case IMX94_TIMER1_BUS_DEVFN: + return NETC_TIMER_ID(1); + case IMX94_TIMER2_BUS_DEVFN: + return NETC_TIMER_ID(2); + default: + return -EINVAL; + } +} + +static int imx94_enetc_update_tid(struct netc_blk_ctrl *priv, + struct device_node *np) +{ + struct device *dev = &priv->pdev->dev; + struct device_node *timer_np; + int eid, tid; + + eid = imx94_get_enetc_id(np); + if (eid < 0) { + dev_err(dev, "Failed to get ENETC ID\n"); + return eid; + } + + timer_np = of_parse_phandle(np, "ptp-timer", 0); + if (!timer_np) { + /* If 'ptp-timer' is not present, the timer1 is the default + * timer of all standalone ENETCs, which is on the same PCIe + * bus as these ENETCs. + */ + tid = NETC_TIMER_ID(1); + goto end; + } + + tid = imx94_get_timer_id(timer_np); + of_node_put(timer_np); + if (tid < 0) { + dev_err(dev, "Failed to get NETC Timer ID\n"); + return tid; + } + +end: + netc_reg_write(priv->ierb, IERB_ETBCR(eid), tid); + + return 0; +} + +static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv, + struct device_node *np, + u32 phy_mask) +{ + struct device *dev = &priv->pdev->dev; + int bus_devfn, addr; + + bus_devfn = netc_of_pci_get_bus_devfn(np); + if (bus_devfn < 0) { + dev_err(dev, "Failed to get BDF number\n"); + return bus_devfn; + } + + addr = netc_get_phy_addr(np); + if (addr <= 0) { + dev_err(dev, "Failed to get PHY address\n"); + return addr; + } + + if (phy_mask & BIT(addr)) { + dev_err(dev, + "Find same PHY address in EMDIO and ENETC node\n"); + return -EINVAL; + } + + switch (bus_devfn) { + case IMX94_ENETC0_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + case IMX94_ENETC1_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + case IMX94_ENETC2_BUS_DEVFN: + netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK), + LBCR_MDIO_PHYAD_PRTAD(addr)); + break; + default: + break; + } + + return 0; +} + +static int imx94_ierb_init(struct platform_device *pdev) +{ + struct netc_blk_ctrl *priv = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + u32 phy_mask = 0; + int err; + + err = netc_get_emdio_phy_mask(np, &phy_mask); + if (err) { + dev_err(&pdev->dev, "Failed to get PHY address mask\n"); + return err; + } + + for_each_child_of_node_scoped(np, child) { + for_each_child_of_node_scoped(child, gchild) { + if (!of_device_is_compatible(gchild, "pci1131,e101")) + continue; + + err = imx94_enetc_update_tid(priv, gchild); + if (err) + return err; + + err = imx94_enetc_mdio_phyaddr_config(priv, gchild, + phy_mask); + if (err) + return err; + } + } + return 0; } @@ -340,8 +733,15 @@ static const struct netc_devinfo imx95_devinfo = { .ierb_init = imx95_ierb_init, }; +static const struct netc_devinfo imx94_devinfo = { + .flags = NETC_HAS_NETCMIX, + .netcmix_init = imx94_netcmix_init, + .ierb_init = imx94_ierb_init, +}; + static const struct of_device_id netc_blk_ctrl_match[] = { { .compatible = "nxp,imx95-netc-blk-ctrl", .data = &imx95_devinfo }, + { .compatible = "nxp,imx94-netc-blk-ctrl", .data = &imx94_devinfo }, {}, }; MODULE_DEVICE_TABLE(of, netc_blk_ctrl_match); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index abf1ef8e76c6..fd9a93d02f8e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -24,9 +24,7 @@ #include <linux/timecounter.h> #include <net/xdp.h> -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -242,23 +240,6 @@ struct bufdesc_ex { __fec16 res0[4]; }; -/* - * The following definitions courtesy of commproc.h, which where - * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). - */ -#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ -#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ -#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ -#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ -#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ -#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ -#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ -#define BD_SC_BR ((ushort)0x0020) /* Break received */ -#define BD_SC_FR ((ushort)0x0010) /* Framing error */ -#define BD_SC_PR ((ushort)0x0008) /* Parity error */ -#define BD_SC_OV ((ushort)0x0002) /* Overrun */ -#define BD_SC_CD ((ushort)0x0001) /* ?? */ - /* Buffer descriptor control/status used by Ethernet receive. */ #define BD_ENET_RX_EMPTY ((ushort)0x8000) @@ -530,12 +511,6 @@ struct bufdesc_prop { unsigned char dsize_log2; }; -struct fec_enet_priv_txrx_info { - int offset; - struct page *page; - struct sk_buff *skb; -}; - enum { RX_XDP_REDIRECT = 0, RX_XDP_PASS, @@ -575,7 +550,7 @@ struct fec_enet_priv_tx_q { struct fec_enet_priv_rx_q { struct bufdesc_prop bd; - struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE]; + struct page *rx_buf[RX_RING_SIZE]; /* page_pool */ struct page_pool *page_pool; @@ -668,7 +643,6 @@ struct fec_enet_private { struct pm_qos_request pm_qos_req; unsigned int tx_align; - unsigned int rx_align; /* hw interrupt coalesce */ unsigned int rx_pkts_itr; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3222359ac15b..c685a5c0cc51 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -52,6 +52,7 @@ #include <linux/of_net.h> #include <linux/phy.h> #include <linux/pinctrl/consumer.h> +#include <linux/phy_fixed.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/prefetch.h> @@ -252,9 +253,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); * size bits. Other FEC hardware does not, so we need to take that into * account when setting it. */ -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) +#ifndef CONFIG_M5272 #define OPT_ARCH_HAS_MAX_FL 1 #else #define OPT_ARCH_HAS_MAX_FL 0 @@ -1011,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP); rxq->bd.cur = rxq->bd.base; } @@ -1061,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, &txq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP); txq->dirty_tx = bdp; } } @@ -1656,8 +1655,7 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, if (unlikely(!new_page)) return -ENOMEM; - rxq->rx_skb_info[index].page = new_page; - rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; + rxq->rx_buf[index] = new_page; phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); @@ -1772,7 +1770,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) __fec32 cbd_bufaddr; u32 sub_len = 4; -#if !defined(CONFIG_M5272) /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of * FEC_RACC_SHIFT16 is set by default in the probe function. */ @@ -1780,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) data_start += 2; sub_len += 2; } -#endif #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA) /* @@ -1839,7 +1835,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget) ndev->stats.rx_bytes -= 2; index = fec_enet_get_bd_index(bdp, &rxq->bd); - page = rxq->rx_skb_info[index].page; + page = rxq->rx_buf[index]; cbd_bufaddr = bdp->cbd_bufaddr; if (fec_enet_update_cbd(rxq, bdp, index)) { ndev->stats.rx_dropped++; @@ -2233,7 +2229,6 @@ static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2282,7 +2277,6 @@ static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2314,7 +2308,6 @@ static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, if (ret) netdev_err(fep->netdev, "MDIO write timeout\n"); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2358,7 +2351,6 @@ static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, netdev_err(fep->netdev, "MDIO write timeout\n"); out: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; @@ -2478,11 +2470,8 @@ static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = NULL; - char mdio_bus_id[MII_BUS_ID_SIZE]; - char phy_name[MII_BUS_ID_SIZE + 3]; - int phy_id; - int dev_id = fep->dev_id; + struct phy_device *phy_dev; + int ret; if (fep->phy_node) { phy_dev = of_phy_connect(ndev, fep->phy_node, @@ -2494,30 +2483,28 @@ static int fec_enet_mii_probe(struct net_device *ndev) } } else { /* check for attached phy */ - for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) - continue; - if (dev_id--) - continue; - strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); - break; - } + phy_dev = phy_find_first(fep->mii_bus); + if (fep->dev_id && phy_dev) + phy_dev = phy_find_next(fep->mii_bus, phy_dev); - if (phy_id >= PHY_MAX_ADDR) { + if (!phy_dev) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); - phy_id = 0; + phy_dev = fixed_phy_register_100fd(); + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not register fixed PHY\n"); + return PTR_ERR(phy_dev); + } } - snprintf(phy_name, sizeof(phy_name), - PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, - fep->phy_interface); - } + ret = phy_connect_direct(ndev, phy_dev, &fec_enet_adjust_link, + fep->phy_interface); + if (ret) { + if (phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); + netdev_err(ndev, "could not attach to PHY\n"); + return ret; + } - if (IS_ERR(phy_dev)) { - netdev_err(ndev, "could not attach to PHY\n"); - return PTR_ERR(phy_dev); } /* mask with MAC supported features */ @@ -2525,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_set_max_speed(phy_dev, 1000); phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); -#if !defined(CONFIG_M5272) phy_support_sym_pause(phy_dev); -#endif } else phy_set_max_speed(phy_dev, 100); @@ -2554,7 +2539,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) int err = -ENXIO; u32 mii_speed, holdtime; u32 bus_freq; - int addr; /* * The i.MX28 dual fec interfaces are not equal. @@ -2669,11 +2653,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) of_node_put(node); /* find all the PHY devices on the bus and set mac_managed_pm to true */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phydev = mdiobus_get_phy(fep->mii_bus, addr); - if (phydev) - phydev->mac_managed_pm = true; - } + mdiobus_for_each_phy(fep->mii_bus, phydev) + phydev->mac_managed_pm = true; mii_cnt++; @@ -2722,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) } /* List of registers that can be safety be read to dump them with ethtool */ -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST) static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, @@ -2798,30 +2777,22 @@ static u32 fec_enet_register_offset[] = { static void fec_enet_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *regbuf) { + u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); struct fec_enet_private *fep = netdev_priv(ndev); u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + u32 *reg_list = fec_enet_register_offset; struct device *dev = &fep->pdev->dev; u32 *buf = (u32 *)regbuf; u32 i, off; int ret; -#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) - u32 *reg_list; - u32 reg_cnt; - - if (!of_machine_is_compatible("fsl,imx6ul")) { - reg_list = fec_enet_register_offset; - reg_cnt = ARRAY_SIZE(fec_enet_register_offset); - } else { + +#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST) + if (of_machine_is_compatible("fsl,imx6ul")) { reg_list = fec_enet_register_offset_6ul; reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); } -#else - /* coldfire */ - static u32 *reg_list = fec_enet_register_offset; - static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); #endif + ret = pm_runtime_resume_and_get(dev); if (ret < 0) return; @@ -2841,7 +2812,6 @@ static void fec_enet_get_regs(struct net_device *ndev, buf[off] = readl(&theregs[off]); } - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -3341,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; for (i = 0; i < rxq->bd.ring_size; i++) - page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); + page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i], + false); for (i = 0; i < XDP_STATS_TOTAL; i++) rxq->stats[i] = 0; @@ -3467,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) return err; } + /* Some platforms require the RX buffer must be 64 bytes alignment. + * Some platforms require 16 bytes alignment. And some platforms + * require 4 bytes alignment. But since the page pool have been + * introduced into the driver, the address of RX buffer is always + * the page address plus FEC_ENET_XDP_HEADROOM, and + * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can + * satisfy all platforms. To prevent future modifications to + * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a + * BUILD_BUG_ON() test has been added, which ensures that + * FEC_ENET_XDP_HEADROOM provides the required alignment. + */ + BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f); + for (i = 0; i < rxq->bd.ring_size; i++) { page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) @@ -3475,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); - rxq->rx_skb_info[i].page = page; - rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; + rxq->rx_buf[i] = page; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { @@ -3489,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP); return 0; err_alloc: @@ -3525,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, &txq->bd); - bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP); return 0; @@ -3618,7 +3601,6 @@ err_enet_mii_probe: err_enet_alloc: fec_enet_clk_enable(ndev, false); clk_enable: - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; @@ -3628,8 +3610,9 @@ static int fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; - phy_stop(ndev->phydev); + phy_stop(phy_dev); if (netif_device_present(ndev)) { napi_disable(&fep->napi); @@ -3637,7 +3620,10 @@ fec_enet_close(struct net_device *ndev) fec_stop(ndev); } - phy_disconnect(ndev->phydev); + phy_disconnect(phy_dev); + + if (!fep->phy_node && phy_is_pseudo_fixed_link(phy_dev)) + fixed_phy_unregister(phy_dev); if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_unused(); @@ -3649,7 +3635,6 @@ fec_enet_close(struct net_device *ndev) cpu_latency_qos_remove_request(&fep->pm_qos_req); pinctrl_pm_select_sleep_state(&fep->pdev->dev); - pm_runtime_mark_last_busy(&fep->pdev->dev); pm_runtime_put_autosuspend(&fep->pdev->dev); fec_enet_free_buffers(ndev); @@ -4100,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev) WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) - fep->rx_align = 0xf; fep->tx_align = 0xf; #else - fep->rx_align = 0x3; fep->tx_align = 0x3; #endif fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; @@ -4192,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) fep->tx_align = 0; - fep->rx_align = 0x3f; - } ndev->hw_features = ndev->features; @@ -4413,11 +4394,9 @@ fec_probe(struct platform_device *pdev) fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; -#if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; -#endif /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); @@ -4618,7 +4597,6 @@ fec_probe(struct platform_device *pdev) INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 0291093f2e4e..c84f0336c94c 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -649,6 +649,7 @@ static u32 memac_if_mode(phy_interface_t interface) return IF_MODE_GMII | IF_MODE_RGMII; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_QSGMII: return IF_MODE_GMII; case PHY_INTERFACE_MODE_10GBASER: @@ -667,6 +668,7 @@ static struct phylink_pcs *memac_select_pcs(struct phylink_config *config, switch (iface) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: return memac->sgmii_pcs; case PHY_INTERFACE_MODE_QSGMII: return memac->qsgmii_pcs; @@ -685,6 +687,7 @@ static int memac_prepare(struct phylink_config *config, unsigned int mode, switch (iface) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_10GBASER: return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET, @@ -897,6 +900,89 @@ static int memac_set_exception(struct fman_mac *memac, return 0; } +static u64 memac_read64(void __iomem *reg) +{ + u32 low, high, tmp; + + do { + high = ioread32be(reg + 4); + low = ioread32be(reg); + tmp = ioread32be(reg + 4); + } while (high != tmp); + + return ((u64)high << 32) | low; +} + +static void memac_get_pause_stats(struct fman_mac *memac, + struct ethtool_pause_stats *s) +{ + s->tx_pause_frames = memac_read64(&memac->regs->txpf_l); + s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l); +} + +static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 9600 }, + {}, +}; + +static void memac_get_rmon_stats(struct fman_mac *memac, + struct ethtool_rmon_stats *s, + const struct ethtool_rmon_hist_range **ranges) +{ + s->undersize_pkts = memac_read64(&memac->regs->rund_l); + s->oversize_pkts = memac_read64(&memac->regs->rovr_l); + s->fragments = memac_read64(&memac->regs->rfrg_l); + s->jabbers = memac_read64(&memac->regs->rjbr_l); + + s->hist[0] = memac_read64(&memac->regs->r64_l); + s->hist[1] = memac_read64(&memac->regs->r127_l); + s->hist[2] = memac_read64(&memac->regs->r255_l); + s->hist[3] = memac_read64(&memac->regs->r511_l); + s->hist[4] = memac_read64(&memac->regs->r1023_l); + s->hist[5] = memac_read64(&memac->regs->r1518_l); + s->hist[6] = memac_read64(&memac->regs->r1519x_l); + + s->hist_tx[0] = memac_read64(&memac->regs->t64_l); + s->hist_tx[1] = memac_read64(&memac->regs->t127_l); + s->hist_tx[2] = memac_read64(&memac->regs->t255_l); + s->hist_tx[3] = memac_read64(&memac->regs->t511_l); + s->hist_tx[4] = memac_read64(&memac->regs->t1023_l); + s->hist_tx[5] = memac_read64(&memac->regs->t1518_l); + s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l); + + *ranges = memac_rmon_ranges; +} + +static void memac_get_eth_ctrl_stats(struct fman_mac *memac, + struct ethtool_eth_ctrl_stats *s) +{ + s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l); + s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l); +} + +static void memac_get_eth_mac_stats(struct fman_mac *memac, + struct ethtool_eth_mac_stats *s) +{ + s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l); + s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l); + s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l); + s->AlignmentErrors = memac_read64(&memac->regs->raln_l); + s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l); + s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l); + s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l); + s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l); + s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l); + s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l); + s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l); + s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l); +} + static int memac_init(struct fman_mac *memac) { struct memac_cfg *memac_drv_param; @@ -1089,6 +1175,10 @@ int memac_initialization(struct mac_device *mac_dev, mac_dev->set_tstamp = memac_set_tstamp; mac_dev->enable = memac_enable; mac_dev->disable = memac_disable; + mac_dev->get_pause_stats = memac_get_pause_stats; + mac_dev->get_rmon_stats = memac_get_rmon_stats; + mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats; + mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats; mac_dev->fman_mac = memac_config(mac_dev, params); if (!mac_dev->fman_mac) @@ -1226,6 +1316,7 @@ int memac_initialization(struct mac_device *mac_dev, * those configurations modes don't use in-band autonegotiation. */ if (!of_property_present(mac_node, "managed") && + mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX && mac_dev->phy_if != PHY_INTERFACE_MODE_MII && !phy_interface_mode_is_rgmii(mac_dev->phy_if)) mac_dev->phylink_config.default_an_inband = true; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 955ace338965..63c2c5b4f99e 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -16,6 +16,11 @@ #include "fman.h" #include "fman_mac.h" +struct ethtool_eth_ctrl_stats; +struct ethtool_eth_mac_stats; +struct ethtool_pause_stats; +struct ethtool_rmon_stats; +struct ethtool_rmon_hist_range; struct fman_mac; struct mac_priv_s; @@ -46,6 +51,15 @@ struct mac_device { enet_addr_t *eth_addr); int (*remove_hash_mac_addr)(struct fman_mac *mac_dev, enet_addr_t *eth_addr); + void (*get_pause_stats)(struct fman_mac *memac, + struct ethtool_pause_stats *s); + void (*get_rmon_stats)(struct fman_mac *memac, + struct ethtool_rmon_stats *s, + const struct ethtool_rmon_hist_range **ranges); + void (*get_eth_ctrl_stats)(struct fman_mac *memac, + struct ethtool_eth_ctrl_stats *s); + void (*get_eth_mac_stats)(struct fman_mac *memac, + struct ethtool_eth_mac_stats *s); void (*update_speed)(struct mac_device *mac_dev, int speed); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 5fd1f7327680..6fa752d3b60d 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1431,6 +1431,13 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u32 gfar_get_rx_ring_count(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + return priv->num_rx_queues; +} + static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -1438,9 +1445,6 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int ret = 0; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = priv->num_rx_queues; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = priv->rx_list.count; break; @@ -1519,6 +1523,7 @@ const struct ethtool_ops gfar_ethtool_ops = { #endif .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, + .get_rx_ring_count = gfar_get_rx_ring_count, .set_rxfh_fields = gfar_set_rxfh_fields, .get_ts_info = gfar_get_ts_info, .get_link_ksettings = phy_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h index 1250e10d21db..55e705e239f8 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth.h +++ b/drivers/net/ethernet/fungible/funeth/funeth.h @@ -4,7 +4,7 @@ #define _FUNETH_H #include <uapi/linux/if_ether.h> -#include <uapi/linux/net_tstamp.h> +#include <linux/net_tstamp.h> #include <linux/mutex.h> #include <linux/seqlock.h> #include <linux/xarray.h> @@ -121,7 +121,7 @@ struct funeth_priv { u8 rx_coal_usec; u8 rx_coal_count; - struct hwtstamp_config hwtstamp_cfg; + struct kernel_hwtstamp_config hwtstamp_cfg; /* cumulative queue stats from earlier queue instances */ u64 tx_packets; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c index ac86179a0a81..792cddac6f1b 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_main.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c @@ -1014,26 +1014,25 @@ static int fun_get_port_attributes(struct net_device *netdev) return 0; } -static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int fun_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { const struct funeth_priv *fp = netdev_priv(dev); - return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, - sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; + *config = fp->hwtstamp_cfg; + return 0; } -static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int fun_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct funeth_priv *fp = netdev_priv(dev); - struct hwtstamp_config cfg; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; /* no TX HW timestamps */ - cfg.tx_type = HWTSTAMP_TX_OFF; + config->tx_type = HWTSTAMP_TX_OFF; - switch (cfg.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -1051,26 +1050,14 @@ static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - cfg.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - fp->hwtstamp_cfg = cfg; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} - -static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCSHWTSTAMP: - return fun_hwtstamp_set(dev, ifr); - case SIOCGHWTSTAMP: - return fun_hwtstamp_get(dev, ifr); - default: - return -EOPNOTSUPP; - } + fp->hwtstamp_cfg = *config; + return 0; } /* Prepare the queues for XDP. */ @@ -1340,7 +1327,6 @@ static const struct net_device_ops fun_netdev_ops = { .ndo_change_mtu = fun_change_mtu, .ndo_set_mac_address = fun_set_macaddr, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = fun_ioctl, .ndo_uninit = fun_uninit, .ndo_bpf = fun_xdp, .ndo_xdp_xmit = fun_xdp_xmit_frames, @@ -1348,6 +1334,8 @@ static const struct net_device_ops fun_netdev_ops = { .ndo_set_vf_vlan = fun_set_vf_vlan, .ndo_set_vf_rate = fun_set_vf_rate, .ndo_get_vf_config = fun_get_vf_config, + .ndo_hwtstamp_get = fun_hwtstamp_get, + .ndo_hwtstamp_set = fun_hwtstamp_set, }; #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \ diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4cc6dcbfd367..970d5ca8cdde 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -59,8 +59,6 @@ #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 -#define GVE_MAX_RX_BUFFER_SIZE 4096 - #define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 @@ -207,6 +205,13 @@ struct gve_rx_buf_state_dqo { s16 next; }; +/* Wrapper for XDP Rx metadata */ +struct gve_xdp_buff { + struct xdp_buff xdp; + struct gve_priv *gve; + const struct gve_rx_compl_desc_dqo *compl_desc; +}; + /* `head` and `tail` are indices into an array, or -1 if empty. */ struct gve_index_list { s16 head; @@ -1169,6 +1174,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv) priv->queue_format == GVE_GQI_QPL_FORMAT; } +static inline bool gve_is_dqo(struct gve_priv *priv) +{ + return priv->queue_format == GVE_DQO_RDA_FORMAT || + priv->queue_format == GVE_DQO_QPL_FORMAT; +} + static inline u32 gve_num_tx_queues(struct gve_priv *priv) { return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; @@ -1249,9 +1260,12 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); -u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit); bool gve_header_split_supported(const struct gve_priv *priv); -int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); +int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len, + struct netlink_ext_ack *extack, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); +int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); /* rx buffer handling */ int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs); void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 4f33d094a2ef..b72cc0fa2ba2 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -987,6 +987,10 @@ static void gve_enable_supported_features(struct gve_priv *priv, dev_info(&priv->pdev->dev, "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", priv->max_rx_buffer_size, priv->header_buf_size); + if (gve_is_dqo(priv) && + priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE) + priv->rx_cfg.packet_buffer_size = + priv->max_rx_buffer_size; } /* Read and store ring size ranges given by device */ diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h index 6eb442096e02..5871f773f0c7 100644 --- a/drivers/net/ethernet/google/gve/gve_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_dqo.h @@ -36,6 +36,7 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev); netdev_features_t gve_features_check_dqo(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); +int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp); bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean); bool gve_xdp_poll_dqo(struct gve_notify_block *block); bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index d0a223250845..52500ae8348e 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev, cmd->rx_pending = priv->rx_desc_cnt; cmd->tx_pending = priv->tx_desc_cnt; + kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size; + if (!gve_header_split_supported(priv)) kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN; else if (priv->header_split_enabled) @@ -537,34 +539,6 @@ static void gve_get_ringparam(struct net_device *netdev, kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; } -static int gve_adjust_ring_sizes(struct gve_priv *priv, - u16 new_tx_desc_cnt, - u16 new_rx_desc_cnt) -{ - struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; - struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - int err; - - /* get current queue configuration */ - gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); - - /* copy over the new ring_size from ethtool */ - tx_alloc_cfg.ring_size = new_tx_desc_cnt; - rx_alloc_cfg.ring_size = new_rx_desc_cnt; - - if (netif_running(priv->dev)) { - err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - if (err) - return err; - } - - /* Set new ring_size for the next up */ - priv->tx_desc_cnt = new_tx_desc_cnt; - priv->rx_desc_cnt = new_rx_desc_cnt; - - return 0; -} - static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt, u16 new_rx_desc_cnt) { @@ -584,34 +558,68 @@ static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt return 0; } +static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt, + u16 new_rx_desc_cnt, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +{ + if (new_tx_desc_cnt == priv->tx_desc_cnt && + new_rx_desc_cnt == priv->rx_desc_cnt) + return 0; + + if (!priv->modify_ring_size_enabled) { + dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); + return -EOPNOTSUPP; + } + + if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt)) + return -EINVAL; + + tx_alloc_cfg->ring_size = new_tx_desc_cnt; + rx_alloc_cfg->ring_size = new_rx_desc_cnt; + return 0; +} + static int gve_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *cmd, struct kernel_ethtool_ringparam *kernel_cmd, struct netlink_ext_ack *extack) { + struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; + struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_priv *priv = netdev_priv(netdev); - u16 new_tx_cnt, new_rx_cnt; int err; - err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + + err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack, + &rx_alloc_cfg); if (err) return err; - if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt) - return 0; - - if (!priv->modify_ring_size_enabled) { - dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); - return -EOPNOTSUPP; - } - - new_tx_cnt = cmd->tx_pending; - new_rx_cnt = cmd->rx_pending; + err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split, + &rx_alloc_cfg); + if (err) + return err; - if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt)) - return -EINVAL; + err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending, + &tx_alloc_cfg, &rx_alloc_cfg); + if (err) + return err; - return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt); + if (netif_running(priv->dev)) { + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); + if (err) + return err; + } else { + /* Set ring params for the next up */ + priv->rx_cfg.packet_buffer_size = + rx_alloc_cfg.packet_buffer_size; + priv->header_split_enabled = rx_alloc_cfg.enable_header_split; + priv->tx_desc_cnt = tx_alloc_cfg.ring_size; + priv->rx_desc_cnt = rx_alloc_cfg.ring_size; + } + return 0; } static int gve_user_reset(struct net_device *netdev, u32 *flags) @@ -946,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev, const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, - .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT | + ETHTOOL_RING_USE_RX_BUF_LEN, .get_drvinfo = gve_get_drvinfo, .get_strings = gve_get_strings, .get_sset_count = gve_get_sset_count, diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 1be1b1ef31ee..a5a2b18d309b 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1707,18 +1707,28 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return 0; } -static int verify_xdp_configuration(struct net_device *dev) +static int gve_verify_xdp_configuration(struct net_device *dev, + struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(dev); u16 max_xdp_mtu; if (dev->features & NETIF_F_LRO) { - netdev_warn(dev, "XDP is not supported when LRO is on.\n"); + NL_SET_ERR_MSG_MOD(extack, + "XDP is not supported when LRO is on."); return -EOPNOTSUPP; } if (priv->header_split_enabled) { - netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n"); + NL_SET_ERR_MSG_MOD(extack, + "XDP is not supported when header-data split is enabled."); + return -EOPNOTSUPP; + } + + if (priv->rx_cfg.packet_buffer_size != SZ_2K) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "XDP is not supported for Rx buf len %d, only %d supported.", + priv->rx_cfg.packet_buffer_size, SZ_2K); return -EOPNOTSUPP; } @@ -1727,17 +1737,20 @@ static int verify_xdp_configuration(struct net_device *dev) max_xdp_mtu -= GVE_RX_PAD; if (dev->mtu > max_xdp_mtu) { - netdev_warn(dev, "XDP is not supported for mtu %d.\n", - dev->mtu); + NL_SET_ERR_MSG_FMT_MOD(extack, + "XDP is not supported for mtu %d.", + dev->mtu); return -EOPNOTSUPP; } if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues || (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { - netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d", - priv->rx_cfg.num_queues, - priv->tx_cfg.num_queues, + netdev_warn(dev, + "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.", + priv->rx_cfg.num_queues, priv->tx_cfg.num_queues, priv->tx_cfg.max_queues); + NL_SET_ERR_MSG_MOD(extack, + "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); return -EINVAL; } return 0; @@ -1748,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct gve_priv *priv = netdev_priv(dev); int err; - err = verify_xdp_configuration(dev); + err = gve_verify_xdp_configuration(dev, xdp->extack); if (err) return err; switch (xdp->command) { @@ -2041,14 +2054,6 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) priv->tx_timeo_cnt++; } -u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit) -{ - if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE) - return GVE_MAX_RX_BUFFER_SIZE; - else - return GVE_DEFAULT_RX_BUFFER_SIZE; -} - /* Header split is only supported on DQ RDA queue format. If XDP is enabled, * header split is not allowed. */ @@ -2058,12 +2063,42 @@ bool gve_header_split_supported(const struct gve_priv *priv) priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog; } -int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) +int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len, + struct netlink_ext_ack *extack, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +{ + u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size; + + if (rx_buf_len == old_rx_buf_len) + return 0; + + /* device options may not always contain support for 4K buffers */ + if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) { + NL_SET_ERR_MSG_MOD(extack, + "Modifying Rx buf len is not supported"); + return -EOPNOTSUPP; + } + + if (priv->xdp_prog && rx_buf_len != SZ_2K) { + NL_SET_ERR_MSG_MOD(extack, + "Rx buf len can only be 2048 when XDP is on"); + return -EINVAL; + } + + if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) { + NL_SET_ERR_MSG_MOD(extack, + "Rx buf len can only be 2048 or 4096"); + return -EINVAL; + } + rx_alloc_cfg->packet_buffer_size = rx_buf_len; + + return 0; +} + +int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { - struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; - struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; bool enable_hdr_split; - int err = 0; if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN) return 0; @@ -2081,14 +2116,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) if (enable_hdr_split == priv->header_split_enabled) return 0; - gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); - - rx_alloc_cfg.enable_header_split = enable_hdr_split; - rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split); + rx_alloc_cfg->enable_header_split = enable_hdr_split; - if (netif_running(priv->dev)) - err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - return err; + return 0; } static int gve_set_features(struct net_device *netdev, @@ -2158,10 +2188,6 @@ static int gve_set_ts_config(struct net_device *dev, } kernel_config->rx_filter = HWTSTAMP_FILTER_ALL; - gve_clock_nic_ts_read(priv); - ptp_schedule_worker(priv->ptp->clock, 0); - } else { - ptp_cancel_worker_sync(priv->ptp->clock); } priv->ts_config.rx_filter = kernel_config->rx_filter; @@ -2322,6 +2348,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv) xdp_set_features_flag_locked(priv->dev, xdp_features); } +static const struct xdp_metadata_ops gve_xdp_metadata_ops = { + .xmo_rx_timestamp = gve_xdp_rx_timestamp, +}; + static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) { int num_ntfy; @@ -2417,6 +2447,9 @@ setup_device: } gve_set_netdev_xdp_features(priv); + if (!gve_is_gqi(priv)) + priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops; + err = gve_setup_device_resources(priv); if (err) goto err_free_xsk_bitmap; diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c index a384a9ed4914..073677d82ee8 100644 --- a/drivers/net/ethernet/google/gve/gve_ptp.c +++ b/drivers/net/ethernet/google/gve/gve_ptp.c @@ -133,9 +133,21 @@ int gve_init_clock(struct gve_priv *priv) err = -ENOMEM; goto release_ptp; } + err = gve_clock_nic_ts_read(priv); + if (err) { + dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err); + goto release_nic_ts_report; + } + ptp_schedule_worker(priv->ptp->clock, + msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS)); return 0; +release_nic_ts_report: + dma_free_coherent(&priv->pdev->dev, + sizeof(struct gve_nic_ts_report), + priv->nic_ts_report, priv->nic_ts_report_bus); + priv->nic_ts_report = NULL; release_ptp: gve_ptp_release(priv); return err; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 1aff3bbb8cfc..f1bd8f5d5732 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -240,6 +240,11 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, rx->rx_headroom = 0; } + /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes + * the 24 byte field cb to store gve specific data. + */ + XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff); + rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states, @@ -456,20 +461,38 @@ static void gve_rx_skb_hash(struct sk_buff *skb, * Note that this means if the time delta between packet reception and the last * clock read is greater than ~2 seconds, this will provide invalid results. */ +static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts) +{ + u64 last_read = READ_ONCE(gve->last_sync_nic_counter); + u32 low = (u32)last_read; + s32 diff = hwts - low; + + return ns_to_ktime(last_read + diff); +} + static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, const struct gve_rx_compl_desc_dqo *desc) { - u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter); struct sk_buff *skb = rx->ctx.skb_head; - u32 ts, low; - s32 diff; - - if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) { - ts = le32_to_cpu(desc->ts); - low = (u32)last_read; - diff = ts - low; - skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff); - } + + if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) + skb_hwtstamps(skb)->hwtstamp = + gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts)); +} + +int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) +{ + const struct gve_xdp_buff *ctx = (void *)_ctx; + + if (!ctx->gve->nic_ts_report) + return -ENODATA; + + if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)) + return -ENODATA; + + *timestamp = gve_rx_get_hwtstamp(ctx->gve, + le32_to_cpu(ctx->compl_desc->ts)); + return 0; } static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) @@ -683,16 +706,23 @@ err: } static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, - struct gve_rx_buf_state_dqo *buf_state, int buf_len, + const struct gve_rx_compl_desc_dqo *compl_desc, + struct gve_rx_buf_state_dqo *buf_state, struct bpf_prog *xprog) { struct xdp_buff *xdp = buf_state->xsk_buff; + int buf_len = compl_desc->packet_len; struct gve_priv *priv = rx->gve; + struct gve_xdp_buff *gve_xdp; int xdp_act; xdp->data_end = xdp->data + buf_len; xsk_buff_dma_sync_for_cpu(xdp); + gve_xdp = (void *)xdp; + gve_xdp->gve = priv; + gve_xdp->compl_desc = compl_desc; + if (xprog) { xdp_act = bpf_prog_run_xdp(xprog, xdp); buf_len = xdp->data_end - xdp->data; @@ -782,7 +812,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, xprog = READ_ONCE(priv->xdp_prog); if (buf_state->xsk_buff) - return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog); + return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog); /* Page might have not been used for awhile and was likely last written * by a different thread. @@ -840,23 +870,26 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, } if (xprog) { - struct xdp_buff xdp; + struct gve_xdp_buff gve_xdp; void *old_data; int xdp_act; - xdp_init_buff(&xdp, buf_state->page_info.buf_size, + xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size, &rx->xdp_rxq); - xdp_prepare_buff(&xdp, + xdp_prepare_buff(&gve_xdp.xdp, buf_state->page_info.page_address + buf_state->page_info.page_offset, buf_state->page_info.pad, buf_len, false); - old_data = xdp.data; - xdp_act = bpf_prog_run_xdp(xprog, &xdp); - buf_state->page_info.pad += xdp.data - old_data; - buf_len = xdp.data_end - xdp.data; + gve_xdp.gve = priv; + gve_xdp.compl_desc = compl_desc; + + old_data = gve_xdp.xdp.data; + xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp); + buf_state->page_info.pad += gve_xdp.xdp.data - old_data; + buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data; if (xdp_act != XDP_PASS) { - gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act, + gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act, buf_state); return 0; } diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index c6ff0968929d..97efc8d27e6f 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -730,7 +730,9 @@ unmap_drop: gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); } drop: + u64_stats_update_begin(&tx->statss); tx->dropped_pkt++; + u64_stats_update_end(&tx->statss); return 0; } diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 6f1d515673d2..40b89b3e5a31 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -1002,7 +1002,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, return 0; drop: + u64_stats_update_begin(&tx->statss); tx->dropped_pkt++; + u64_stats_update_end(&tx->statss); dev_kfree_skb_any(skb); return 0; } @@ -1324,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv, /* This indicates the packet was dropped. */ dev_kfree_skb_any(pending_packet->skb); pending_packet->skb = NULL; + + u64_stats_update_begin(&tx->statss); tx->dropped_pkt++; + u64_stats_update_end(&tx->statss); + net_err_ratelimited("%s: No reinjection completion was received for: %d.\n", priv->dev->name, (int)(pending_packet - tx->dqo.pending_packets)); diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 38875c196cb6..18eca7d12c20 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -151,6 +151,7 @@ config HIBMCGE select FIXED_PHY select MOTORCOMM_PHY select REALTEK_PHY + select PAGE_POOL help If you wish to compile a kernel for a BMC with HIBMC-xx_gmac then you should answer Y to this. This makes this driver suitable for use diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile index 1a9da564b306..d6610ba16855 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -3,6 +3,7 @@ # Makefile for the HISILICON BMC GE network device drivers. # +ccflags-y += -I$(src) obj-$(CONFIG_HIBMCGE) += hibmcge.o hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index 2097e4c2b3d7..8e134da3e217 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -7,6 +7,7 @@ #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/pci.h> +#include <net/page_pool/helpers.h> #include "hbg_reg.h" #define HBG_STATUS_DISABLE 0x0 @@ -55,6 +56,12 @@ struct hbg_buffer { dma_addr_t skb_dma; u32 skb_len; + struct page *page; + void *page_addr; + dma_addr_t page_dma; + u32 page_size; + u32 page_offset; + enum hbg_dir dir; struct hbg_ring *ring; struct hbg_priv *priv; @@ -78,6 +85,7 @@ struct hbg_ring { struct hbg_priv *priv; struct napi_struct napi; char *tout_log_buf; /* tx timeout log buffer */ + struct page_pool *page_pool; /* only for rx */ }; enum hbg_hw_event_type { diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 0b92a2e5e986..068da2fd1fea 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -472,6 +472,22 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } +static void hbg_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + rtnl_lock(); + if (netif_running(netdev)) + dev_close(netdev); + rtnl_unlock(); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static const struct pci_device_id hbg_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, 0x3730), 0}, { } @@ -482,6 +498,7 @@ static struct pci_driver hbg_driver = { .name = "hibmcge", .id_table = hbg_pci_tbl, .probe = hbg_probe, + .shutdown = hbg_shutdown, }; static int __init hbg_module_init(void) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index a39d1e796e4a..30b3903c8f2d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -252,6 +252,8 @@ struct hbg_rx_desc { #define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) #define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12) +#define HBG_RX_DESC_W3_IP_OFFSET_M GENMASK(23, 16) +#define HBG_RX_DESC_W3_VLAN_M GENMASK(15, 0) #define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30) #define HBG_RX_DESC_W4_IPSEC_B BIT(29) #define HBG_RX_DESC_W4_IP_VERSION_B BIT(28) @@ -269,6 +271,8 @@ struct hbg_rx_desc { #define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9) #define HBG_RX_DESC_W4_L2_ERR_B BIT(8) #define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7) +#define HBG_RX_DESC_W4_PARSE_MODE_M GENMASK(6, 5) +#define HBG_RX_DESC_W5_VALID_SIZE_M GENMASK(15, 0) enum hbg_l3_err_code { HBG_L3_OK = 0, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h new file mode 100644 index 000000000000..b70fd960da8d --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 Hisilicon Limited. */ + +/* This must be outside ifdef _HBG_TRACE_H */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hibmcge + +#if !defined(_HBG_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _HBG_TRACE_H_ + +#include <linux/bitfield.h> +#include <linux/pci.h> +#include <linux/tracepoint.h> +#include <linux/types.h> +#include "hbg_reg.h" + +TRACE_EVENT(hbg_rx_desc, + TP_PROTO(struct hbg_priv *priv, u32 index, + struct hbg_rx_desc *rx_desc), + TP_ARGS(priv, index, rx_desc), + + TP_STRUCT__entry(__field(u32, index) + __field(u8, port_num) + __field(u8, ip_offset) + __field(u8, parse_mode) + __field(u8, l4_error_code) + __field(u8, l3_error_code) + __field(u8, l2_error_code) + __field(u16, packet_len) + __field(u16, valid_size) + __field(u16, vlan) + __string(pciname, pci_name(priv->pdev)) + __string(devname, priv->netdev->name) + ), + + TP_fast_assign(__entry->index = index, + __entry->packet_len = + FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, + rx_desc->word2); + __entry->port_num = + FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, + rx_desc->word2); + __entry->ip_offset = + FIELD_GET(HBG_RX_DESC_W3_IP_OFFSET_M, + rx_desc->word3); + __entry->vlan = + FIELD_GET(HBG_RX_DESC_W3_VLAN_M, + rx_desc->word3); + __entry->parse_mode = + FIELD_GET(HBG_RX_DESC_W4_PARSE_MODE_M, + rx_desc->word4); + __entry->l4_error_code = + FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, + rx_desc->word4); + __entry->l3_error_code = + FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, + rx_desc->word4); + __entry->l2_error_code = + FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, + rx_desc->word4); + __entry->valid_size = + FIELD_GET(HBG_RX_DESC_W5_VALID_SIZE_M, + rx_desc->word5); + __assign_str(pciname); + __assign_str(devname); + ), + + TP_printk("%s %s index:%u, port num:%u, len:%u, valid size:%u, ip_offset:%u, vlan:0x%04x, parse mode:%u, l4_err:0x%x, l3_err:0x%x, l2_err:0x%x", + __get_str(pciname), __get_str(devname), __entry->index, + __entry->port_num, __entry->packet_len, + __entry->valid_size, __entry->ip_offset, __entry->vlan, + __entry->parse_mode, __entry->l4_error_code, + __entry->l3_error_code, __entry->l2_error_code + ) +); + +#endif /* _HBG_TRACE_H_ */ + +/* This must be outside ifdef _HBG_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hbg_trace +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index 8d814c8f19ea..a4ea92c31c2f 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -7,6 +7,9 @@ #include "hbg_reg.h" #include "hbg_txrx.h" +#define CREATE_TRACE_POINTS +#include "hbg_trace.h" + #define netdev_get_tx_ring(netdev) \ (&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring)) @@ -28,6 +31,11 @@ typeof(ring) _ring = (ring); \ _ring->p = hbg_queue_next_prt(_ring->p, _ring); }) +#define hbg_get_page_order(ring) ({ \ + typeof(ring) _ring = (ring); \ + get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); }) +#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring))) + #define HBG_TX_STOP_THRS 2 #define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS) @@ -62,6 +70,43 @@ static void hbg_dma_unmap(struct hbg_buffer *buffer) buffer->skb_dma = 0; } +static void hbg_buffer_free_page(struct hbg_buffer *buffer) +{ + struct hbg_ring *ring = buffer->ring; + + if (unlikely(!buffer->page)) + return; + + page_pool_put_full_page(ring->page_pool, buffer->page, false); + + buffer->page = NULL; + buffer->page_dma = 0; + buffer->page_addr = NULL; + buffer->page_size = 0; + buffer->page_offset = 0; +} + +static int hbg_buffer_alloc_page(struct hbg_buffer *buffer) +{ + struct hbg_ring *ring = buffer->ring; + u32 len = hbg_get_page_size(ring); + u32 offset; + + if (unlikely(!ring->page_pool)) + return 0; + + buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len); + if (unlikely(!buffer->page)) + return -ENOMEM; + + buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset; + buffer->page_addr = page_address(buffer->page) + offset; + buffer->page_size = len; + buffer->page_offset = offset; + + return 0; +} + static void hbg_init_tx_desc(struct hbg_buffer *buffer, struct hbg_tx_desc *tx_desc) { @@ -135,24 +180,14 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer) buffer->skb = NULL; } -static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) -{ - u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); - struct hbg_priv *priv = buffer->priv; - - buffer->skb = netdev_alloc_skb(priv->netdev, len); - if (unlikely(!buffer->skb)) - return -ENOMEM; - - buffer->skb_len = len; - memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); - return 0; -} - static void hbg_buffer_free(struct hbg_buffer *buffer) { - hbg_dma_unmap(buffer); - hbg_buffer_free_skb(buffer); + if (buffer->skb) { + hbg_dma_unmap(buffer); + return hbg_buffer_free_skb(buffer); + } + + hbg_buffer_free_page(buffer); } static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) @@ -374,25 +409,44 @@ static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) struct hbg_buffer *buffer; int ret; - if (hbg_queue_is_full(ring->ntc, ring->ntu, ring)) + if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) || + hbg_fifo_is_full(priv, ring->dir)) return 0; buffer = &ring->queue[ring->ntu]; - ret = hbg_buffer_alloc_skb(buffer); + ret = hbg_buffer_alloc_page(buffer); if (unlikely(ret)) return ret; - ret = hbg_dma_map(buffer); - if (unlikely(ret)) { - hbg_buffer_free_skb(buffer); - return ret; - } + memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE); + dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma, + HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE); - hbg_hw_fill_buffer(priv, buffer->skb_dma); + hbg_hw_fill_buffer(priv, buffer->page_dma); hbg_queue_move_next(ntu, ring); return 0; } +static int hbg_rx_fill_buffers(struct hbg_priv *priv) +{ + u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX); + u32 max_count = priv->dev_specs.rx_fifo_num; + u32 refill_count; + int ret; + + if (unlikely(remained >= max_count)) + return 0; + + refill_count = max_count - remained; + while (refill_count--) { + ret = hbg_rx_fill_one_buffer(priv); + if (unlikely(ret)) + break; + } + + return ret; +} + static bool hbg_sync_data_from_hw(struct hbg_priv *priv, struct hbg_buffer *buffer) { @@ -401,13 +455,29 @@ static bool hbg_sync_data_from_hw(struct hbg_priv *priv, /* make sure HW write desc complete */ dma_rmb(); - dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, - buffer->skb_len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma, + buffer->page_size, DMA_FROM_DEVICE); - rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + rx_desc = (struct hbg_rx_desc *)buffer->page_addr; return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0; } +static int hbg_build_skb(struct hbg_priv *priv, + struct hbg_buffer *buffer, u32 pkt_len) +{ + net_prefetch(buffer->page_addr); + + buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size); + if (unlikely(!buffer->skb)) + return -ENOMEM; + skb_mark_for_recycle(buffer->skb); + + /* page will be freed together with the skb */ + buffer->page = NULL; + + return 0; +} + static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) { struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); @@ -417,33 +487,39 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) u32 packet_done = 0; u32 pkt_len; + hbg_rx_fill_buffers(priv); while (packet_done < budget) { if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring))) break; buffer = &ring->queue[ring->ntc]; - if (unlikely(!buffer->skb)) + if (unlikely(!buffer->page)) goto next_buffer; if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) break; - rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + rx_desc = (struct hbg_rx_desc *)buffer->page_addr; pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); + trace_hbg_rx_desc(priv, ring->ntc, rx_desc); + + if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) { + hbg_buffer_free_page(buffer); + goto next_buffer; + } if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) { - hbg_buffer_free(buffer); + hbg_buffer_free_skb(buffer); goto next_buffer; } - hbg_dma_unmap(buffer); skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); skb_put(buffer->skb, pkt_len); buffer->skb->protocol = eth_type_trans(buffer->skb, priv->netdev); - dev_sw_netstats_rx_add(priv->netdev, pkt_len); napi_gro_receive(napi, buffer->skb); buffer->skb = NULL; + buffer->page = NULL; next_buffer: hbg_rx_fill_one_buffer(priv); @@ -458,6 +534,42 @@ next_buffer: return packet_done; } +static void hbg_ring_page_pool_destory(struct hbg_ring *ring) +{ + if (!ring->page_pool) + return; + + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; +} + +static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring) +{ + u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir); + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = hbg_get_page_order(ring), + .pool_size = ring->len * buf_size / hbg_get_page_size(ring), + .nid = dev_to_node(&priv->pdev->dev), + .dev = &priv->pdev->dev, + .napi = &ring->napi, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = hbg_get_page_size(ring), + }; + int ret = 0; + + ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(ring->page_pool)) { + ret = PTR_ERR(ring->page_pool); + dev_err(&priv->pdev->dev, + "failed to create page pool, ret = %d\n", ret); + ring->page_pool = NULL; + } + + return ret; +} + static void hbg_ring_uninit(struct hbg_ring *ring) { struct hbg_buffer *buffer; @@ -476,6 +588,7 @@ static void hbg_ring_uninit(struct hbg_ring *ring) buffer->priv = NULL; } + hbg_ring_page_pool_destory(ring); dma_free_coherent(&ring->priv->pdev->dev, ring->len * sizeof(*ring->queue), ring->queue, ring->queue_dma); @@ -491,8 +604,19 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring, { struct hbg_buffer *buffer; u32 i, len; + int ret; len = hbg_get_spec_fifo_max_num(priv, dir) + 1; + /* To improve receiving performance under high-stress scenarios, + * in the `hbg_napi_rx_poll()`, we first use the other half of + * the buffer to receive packets from the hardware via the + * `hbg_rx_fill_buffers()`, and then process the packets in the + * original half of the buffer to avoid packet loss caused by + * hardware overflow as much as possible. + */ + if (dir == HBG_DIR_RX) + len += hbg_get_spec_fifo_max_num(priv, dir); + ring->queue = dma_alloc_coherent(&priv->pdev->dev, len * sizeof(*ring->queue), &ring->queue_dma, GFP_KERNEL); @@ -514,11 +638,23 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring, ring->ntu = 0; ring->len = len; - if (dir == HBG_DIR_TX) + if (dir == HBG_DIR_TX) { netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll); - else + } else { netif_napi_add(priv->netdev, &ring->napi, napi_poll); + ret = hbg_ring_page_pool_init(priv, ring); + if (ret) { + netif_napi_del(&ring->napi); + dma_free_coherent(&ring->priv->pdev->dev, + ring->len * sizeof(*ring->queue), + ring->queue, ring->queue_dma); + ring->queue = NULL; + ring->len = 0; + return ret; + } + } + napi_enable(&ring->napi); return 0; } @@ -541,21 +677,16 @@ static int hbg_tx_ring_init(struct hbg_priv *priv) static int hbg_rx_ring_init(struct hbg_priv *priv) { int ret; - u32 i; ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX); if (ret) return ret; - for (i = 0; i < priv->rx_ring.len - 1; i++) { - ret = hbg_rx_fill_one_buffer(priv); - if (ret) { - hbg_ring_uninit(&priv->rx_ring); - return ret; - } - } + ret = hbg_rx_fill_buffers(priv); + if (ret) + hbg_ring_uninit(&priv->rx_ring); - return 0; + return ret; } int hbg_txrx_init(struct hbg_priv *priv) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 3b548f71fa8a..d7c3df1958f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -804,6 +804,11 @@ struct hnae3_ae_ops { int (*dbg_get_read_func)(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd, read_func *func); + int (*hwtstamp_get)(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config); + int (*hwtstamp_set)(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index bfa5568baa92..7a0654e2d3dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2419,6 +2419,35 @@ static int hns3_nic_do_ioctl(struct net_device *netdev, return h->ae_algo->ops->do_ioctl(h, ifr, cmd); } +static int hns3_nic_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_get) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_get(h, config); +} + +static int hns3_nic_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + if (!h->ae_algo->ops->hwtstamp_set) + return -EOPNOTSUPP; + + return h->ae_algo->ops->hwtstamp_set(h, config, extack); +} + static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { @@ -3048,6 +3077,8 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_rate = hns3_nic_set_vf_rate, .ndo_set_vf_mac = hns3_nic_set_vf_mac, .ndo_select_queue = hns3_nic_select_queue, + .ndo_hwtstamp_get = hns3_nic_hwtstamp_get, + .ndo_hwtstamp_set = hns3_nic_hwtstamp_set, }; bool hns3_is_phys_func(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 782bb48c9f3d..cf8abbe01840 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9444,15 +9444,8 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - switch (cmd) { - case SIOCGHWTSTAMP: - return hclge_ptp_get_cfg(hdev, ifr); - case SIOCSHWTSTAMP: - return hclge_ptp_set_cfg(hdev, ifr); - default: - if (!hdev->hw.mac.phydev) - return hclge_mii_ioctl(hdev, ifr, cmd); - } + if (!hdev->hw.mac.phydev) + return hclge_mii_ioctl(hdev, ifr, cmd); return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); } @@ -12900,6 +12893,8 @@ static const struct hnae3_ae_ops hclge_ops = { .get_dscp_prio = hclge_get_dscp_prio, .get_wol = hclge_get_wol, .set_wol = hclge_set_wol, + .hwtstamp_get = hclge_ptp_get_cfg, + .hwtstamp_set = hclge_ptp_set_cfg, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 4bd52eab3914..0081c5281455 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -204,13 +204,17 @@ static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +int hclge_ptp_get_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) return -EOPNOTSUPP; - return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg, - sizeof(struct hwtstamp_config)) ? -EFAULT : 0; + *config = hdev->ptp->ts_cfg; + return 0; } static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) @@ -269,7 +273,7 @@ static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg) return ret; } -static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, +static int hclge_ptp_set_tx_mode(struct kernel_hwtstamp_config *cfg, unsigned long *flags, u32 *ptp_cfg) { switch (cfg->tx_type) { @@ -287,7 +291,7 @@ static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg, return 0; } -static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, +static int hclge_ptp_set_rx_mode(struct kernel_hwtstamp_config *cfg, unsigned long *flags, u32 *ptp_cfg) { int rx_filter = cfg->rx_filter; @@ -332,7 +336,7 @@ static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg, } static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, - struct hwtstamp_config *cfg) + struct kernel_hwtstamp_config *cfg) { unsigned long flags = hdev->ptp->flags; u32 ptp_cfg = 0; @@ -359,9 +363,12 @@ static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev, return 0; } -int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) +int hclge_ptp_set_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config cfg; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; int ret; if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) { @@ -369,16 +376,13 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) return -EOPNOTSUPP; } - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - ret = hclge_ptp_set_ts_mode(hdev, &cfg); + ret = hclge_ptp_set_ts_mode(hdev, config); if (ret) return ret; - hdev->ptp->ts_cfg = cfg; + hdev->ptp->ts_cfg = *config; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } int hclge_ptp_get_ts_info(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index 61faddcc3dd0..0162fa5ac146 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -62,7 +62,7 @@ struct hclge_ptp { unsigned long flags; void __iomem *io_base; struct ptp_clock_info info; - struct hwtstamp_config ts_cfg; + struct kernel_hwtstamp_config ts_cfg; spinlock_t lock; /* protects ptp registers */ u32 ptp_cfg; u32 last_tx_seqid; @@ -133,8 +133,11 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); -int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); -int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); +int hclge_ptp_get_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config); +int hclge_ptp_set_cfg(struct hnae3_handle *handle, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); int hclge_ptp_init(struct hclge_dev *hdev); void hclge_ptp_uninit(struct hclge_dev *hdev); int hclge_ptp_get_ts_info(struct hnae3_handle *handle, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c index 0fa3c7900225..bbf22811a029 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -304,7 +304,7 @@ static int hinic3_open_channel(struct net_device *netdev) err = hinic3_configure(netdev); if (err) { - netdev_err(netdev, "Failed to init txrxq irq\n"); + netdev_err(netdev, "Failed to configure device resources\n"); goto err_uninit_qps_irq; } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 122ee23497e6..288fa8ce53af 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -296,6 +296,7 @@ config ICE depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB + select LIBETH_XDP select LIBIE select LIBIE_ADMINQ select LIBIE_FWLOG if DEBUG_FS diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 018e61aea787..aa08f397988e 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -461,6 +461,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) #define FLAG2_ENABLE_S0IX_FLOWS BIT(15) +#define FLAG2_DISABLE_K1 BIT(16) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 8e40bb50a01e..7b1ac90b3de4 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -26,6 +26,8 @@ struct e1000_stats { static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = { #define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0) "s0ix-enabled", +#define E1000E_PRIV_FLAGS_DISABLE_K1 BIT(1) + "disable-k1", }; #define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings) @@ -549,9 +551,9 @@ static int e1000_set_eeprom(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - size_t total_len, max_len; u16 *eeprom_buff; int ret_val = 0; + size_t max_len; int first_word; int last_word; void *ptr; @@ -569,10 +571,6 @@ static int e1000_set_eeprom(struct net_device *netdev, max_len = hw->nvm.word_size * 2; - if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) || - total_len > max_len) - return -EFBIG; - first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; eeprom_buff = kmalloc(max_len, GFP_KERNEL); @@ -2301,26 +2299,59 @@ static u32 e1000e_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS) priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED; + if (adapter->flags2 & FLAG2_DISABLE_K1) + priv_flags |= E1000E_PRIV_FLAGS_DISABLE_K1; + return priv_flags; } static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; unsigned int flags2 = adapter->flags2; + unsigned int changed; - flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS; - if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) { - struct e1000_hw *hw = &adapter->hw; + flags2 &= ~(FLAG2_ENABLE_S0IX_FLOWS | FLAG2_DISABLE_K1); - if (hw->mac.type < e1000_pch_cnp) + if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) { + if (hw->mac.type < e1000_pch_cnp) { + e_err("S0ix is not supported on this device\n"); return -EINVAL; + } + flags2 |= FLAG2_ENABLE_S0IX_FLOWS; } - if (flags2 != adapter->flags2) + if (priv_flags & E1000E_PRIV_FLAGS_DISABLE_K1) { + if (hw->mac.type < e1000_ich8lan) { + e_err("Disabling K1 is not supported on this device\n"); + return -EINVAL; + } + + flags2 |= FLAG2_DISABLE_K1; + } + + changed = adapter->flags2 ^ flags2; + if (changed) adapter->flags2 = flags2; + if (changed & FLAG2_DISABLE_K1) { + /* reset the hardware to apply the changes */ + while (test_and_set_bit(__E1000_RESETTING, + &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + } + return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index df4e7d781cb1..0ff8688ac3b8 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -286,21 +286,26 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) } /** - * e1000_reconfigure_k1_exit_timeout - reconfigure K1 exit timeout to - * align to MTP and later platform requirements. + * e1000_reconfigure_k1_params - reconfigure Kumeran K1 parameters. * @hw: pointer to the HW structure * + * By default K1 is enabled after MAC reset, so this function only + * disables it. + * * Context: PHY semaphore must be held by caller. * Return: 0 on success, negative on failure */ -static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) +static s32 e1000_reconfigure_k1_params(struct e1000_hw *hw) { u16 phy_timeout; u32 fextnvm12; s32 ret_val; - if (hw->mac.type < e1000_pch_mtp) + if (hw->mac.type < e1000_pch_mtp) { + if (hw->adapter->flags2 & FLAG2_DISABLE_K1) + return e1000_configure_k1_ich8lan(hw, false); return 0; + } /* Change Kumeran K1 power down state from P0s to P1 */ fextnvm12 = er32(FEXTNVM12); @@ -310,6 +315,8 @@ static s32 e1000_reconfigure_k1_exit_timeout(struct e1000_hw *hw) /* Wait for the interface the settle */ usleep_range(1000, 1100); + if (hw->adapter->flags2 & FLAG2_DISABLE_K1) + return e1000_configure_k1_ich8lan(hw, false); /* Change K1 exit timeout */ ret_val = e1e_rphy_locked(hw, I217_PHY_TIMEOUTS_REG, @@ -373,8 +380,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) /* At this point the PHY might be inaccessible so don't * propagate the failure */ - if (e1000_reconfigure_k1_exit_timeout(hw)) - e_dbg("Failed to reconfigure K1 exit timeout\n"); + if (e1000_reconfigure_k1_params(hw)) + e_dbg("Failed to reconfigure K1 parameters\n"); fallthrough; case e1000_pch_lpt: @@ -473,10 +480,10 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) if (hw->mac.type >= e1000_pch_mtp) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) { - e_err("Failed to reconfigure K1 exit timeout\n"); + e_err("Failed to reconfigure K1 parameters\n"); goto out; } - ret_val = e1000_reconfigure_k1_exit_timeout(hw); + ret_val = e1000_reconfigure_k1_params(hw); hw->phy.ops.release(hw); } } @@ -4948,17 +4955,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) u16 i; e1000_initialize_hw_bits_ich8lan(hw); - if (hw->mac.type >= e1000_pch_mtp) { - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_reconfigure_k1_exit_timeout(hw); - hw->phy.ops.release(hw); - if (ret_val) { - e_dbg("Error failed to reconfigure K1 exit timeout\n"); - return ret_val; - } + ret_val = e1000_reconfigure_k1_params(hw); + hw->phy.ops.release(hw); + if (ret_val) { + e_dbg("Error failed to reconfigure K1 parameters\n"); + return ret_val; } /* Initialize identification LED */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 201322dac233..116f3c92b5bc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7675,6 +7675,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* init PTP hardware clock */ e1000e_ptp_init(adapter); + if (hw->mac.type >= e1000_pch_mtp) + adapter->flags2 |= FLAG2_DISABLE_K1; + /* reset the hardware with the new settings */ e1000e_reset(adapter); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index bf2029144c1d..76e42abca965 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -734,22 +734,11 @@ static int fm10k_get_rssh_fields(struct net_device *dev, return 0; } -static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 __always_unused *rule_locs) +static u32 fm10k_get_rx_ring_count(struct net_device *dev) { struct fm10k_intfc *interface = netdev_priv(dev); - int ret = -EOPNOTSUPP; - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = interface->num_rx_queues; - ret = 0; - break; - default: - break; - } - - return ret; + return interface->num_rx_queues; } static int fm10k_set_rssh_fields(struct net_device *dev, @@ -1160,7 +1149,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .set_ringparam = fm10k_set_ringparam, .get_coalesce = fm10k_get_coalesce, .set_coalesce = fm10k_set_coalesce, - .get_rxnfc = fm10k_get_rxnfc, + .get_rx_ring_count = fm10k_get_rx_ring_count, .get_regs = fm10k_get_regs, .get_regs_len = fm10k_get_regs_len, .self_test = fm10k_self_test, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 801a57a925da..d2d03db2acec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -574,6 +574,10 @@ struct i40e_pf { struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; + /* If set to non-zero, the device uses this value + * as maximum number of MAC filters per VF. + */ + u32 max_mac_per_vf; u32 arq_overflows; /* Not fatal, possibly indicative of problems */ struct ratelimit_state mdd_message_rate_limit; /* DCBx/DCBNL capability for PF that indicates diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c index cc4e9e2addb7..229179ccc131 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c +++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c @@ -5,6 +5,42 @@ #include "i40e.h" #include "i40e_devlink.h" +static int i40e_max_mac_per_vf_set(struct devlink *devlink, + u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct i40e_pf *pf = devlink_priv(devlink); + + if (pf->num_alloc_vfs > 0) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change max_mac_per_vf while SR-IOV is enabled"); + return -EBUSY; + } + + pf->max_mac_per_vf = ctx->val.vu32; + return 0; +} + +static int i40e_max_mac_per_vf_get(struct devlink *devlink, + u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct i40e_pf *pf = devlink_priv(devlink); + + ctx->val.vu32 = pf->max_mac_per_vf; + return 0; +} + +static const struct devlink_param i40e_dl_params[] = { + DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + i40e_max_mac_per_vf_get, + i40e_max_mac_per_vf_set, + NULL), +}; + static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len) { u8 dsn[8]; @@ -165,7 +201,18 @@ void i40e_free_pf(struct i40e_pf *pf) **/ void i40e_devlink_register(struct i40e_pf *pf) { - devlink_register(priv_to_devlink(pf)); + struct devlink *dl = priv_to_devlink(pf); + struct device *dev = &pf->pdev->dev; + int err; + + err = devlink_params_register(dl, i40e_dl_params, + ARRAY_SIZE(i40e_dl_params)); + if (err) + dev_err(dev, + "devlink params register failed with error %d", err); + + devlink_register(dl); + } /** @@ -176,7 +223,11 @@ void i40e_devlink_register(struct i40e_pf *pf) **/ void i40e_devlink_unregister(struct i40e_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); + struct devlink *dl = priv_to_devlink(pf); + + devlink_unregister(dl); + devlink_params_unregister(dl, i40e_dl_params, + ARRAY_SIZE(i40e_dl_params)); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 86c72596617a..f2c2646ea298 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -3522,6 +3522,20 @@ no_input_set: } /** + * i40e_get_rx_ring_count - get RX ring count + * @netdev: network interface device structure + * + * Return: number of RX rings. + **/ +static u32 i40e_get_rx_ring_count(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + return vsi->rss_size; +} + +/** * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -3538,10 +3552,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = vsi->rss_size; - ret = 0; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = pf->fdir_pf_active_filters; /* report total rule count */ @@ -5819,6 +5829,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_msglevel = i40e_set_msglevel, .get_rxnfc = i40e_get_rxnfc, .set_rxnfc = i40e_set_rxnfc, + .get_rx_ring_count = i40e_get_rx_ring_count, .self_test = i40e_diag_test, .get_strings = i40e_get_strings, .get_eee = i40e_get_eee, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 081a4526a2f0..8b30a3accd31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2935,33 +2935,48 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, if (!f) ++mac_add_cnt; } - - /* If this VF is not privileged, then we can't add more than a limited - * number of addresses. + /* Determine the maximum number of MAC addresses this VF may use. + * + * - For untrusted VFs: use a fixed small limit. + * + * - For trusted VFs: limit is calculated by dividing total MAC + * filter pool across all VFs/ports. * - * If this VF is trusted, it can use more resources than untrusted. - * However to ensure that every trusted VF has appropriate number of - * resources, divide whole pool of resources per port and then across - * all VFs. + * - User can override this by devlink param "max_mac_per_vf". + * If set its value is used as a strict cap for both trusted and + * untrusted VFs. + * Note: + * even when overridden, this is a theoretical maximum; hardware + * may reject additional MACs if the absolute HW limit is reached. */ if (!vf_trusted) mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF; else mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports); + if (pf->max_mac_per_vf > 0) + mac_add_max = pf->max_mac_per_vf; + /* VF can replace all its filters in one step, in this case mac_add_max * will be added as active and another mac_add_max will be in * a to-be-removed state. Account for that. */ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max || (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) { + if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) { + dev_err(&pf->pdev->dev, + "Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n", + mac_add_max); + return -EPERM; + } if (!vf_trusted) { dev_err(&pf->pdev->dev, "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n"); return -EPERM; } else { dev_err(&pf->pdev->dev, - "Cannot add more MAC addresses, trusted VF exhausted it's resources\n"); + "Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n", + mac_add_max); return -EPERM; } } @@ -4788,6 +4803,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) unsigned long q_map; struct i40e_vf *vf; int abs_vf_id; + int old_link; int ret = 0; int tmp; @@ -4806,6 +4822,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* skip VF link state change if requested state is already set */ + if (!vf->link_forced) + old_link = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + old_link = IFLA_VF_LINK_STATE_ENABLE; + else + old_link = IFLA_VF_LINK_STATE_DISABLE; + + if (link == old_link) + goto error_out; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c index a9e1da35e248..4d12dfe1b481 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c @@ -91,6 +91,55 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) } /** + * iavf_fill_adv_rss_gtp_hdr - Fill GTP-related RSS protocol headers + * @proto_hdrs: pointer to the virtchnl protocol headers structure to populate + * @packet_hdrs: bitmask of packet header types to configure + * @hash_flds: RSS hash field configuration + * + * This function populates the virtchnl protocol header structure with + * appropriate GTP-related header types based on the specified packet_hdrs. + * It supports GTPC, GTPU with extension headers, and uplink/downlink PDU + * types. For certain GTPU types, it also appends an IPv4 header to enable + * hashing on the destination IP address. + * + * Return: 0 on success or -EOPNOTSUPP if the packet_hdrs value is unsupported. + */ +static int +iavf_fill_adv_rss_gtp_hdr(struct virtchnl_proto_hdrs *proto_hdrs, + u32 packet_hdrs, u64 hash_flds) +{ + struct virtchnl_proto_hdr *hdr; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + + switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID: + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPC); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_UP); + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN: + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH_PDU_DWN); + fallthrough; + case IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP: + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + iavf_fill_adv_rss_ip4_hdr(hdr, IAVF_ADV_RSS_HASH_FLD_IPV4_DA); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/** * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message * @rss_cfg: the virtchnl message to be filled with RSS configuration setting * @packet_hdrs: the RSS configuration protocol header types @@ -103,6 +152,8 @@ int iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, u32 packet_hdrs, u64 hash_flds, bool symm) { + const u32 packet_l3_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3; + const u32 packet_l4_hdrs = packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4; struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; struct virtchnl_proto_hdr *hdr; @@ -113,31 +164,41 @@ iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, proto_hdrs->tunnel_level = 0; /* always outer layer */ - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) { - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: - iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: - iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); - break; - default: - return -EINVAL; + if (packet_l3_hdrs) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_l3_hdrs) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4: + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6: + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } } - hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; - switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) { - case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: - iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: - iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); - break; - case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: - iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); - break; - default: - return -EINVAL; + if (packet_l4_hdrs) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_l4_hdrs) { + case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP: + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP: + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); + break; + case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP: + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); + break; + default: + return -EINVAL; + } + } + + if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) { + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + if (iavf_fill_adv_rss_gtp_hdr(proto_hdrs, packet_hdrs, hash_flds)) + return -EINVAL; } return 0; @@ -186,6 +247,8 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, proto = "UDP"; else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) proto = "SCTP"; + else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_GTP) + proto = "GTP"; else return; @@ -211,6 +274,16 @@ iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) strcat(hash_opt, "dst port,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPC_TEID) + strcat(hash_opt, "gtp-c,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID) + strcat(hash_opt, "gtp-u ip,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID) + strcat(hash_opt, "gtp-u ext,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID) + strcat(hash_opt, "gtp-u ul,"); + if (hash_flds & IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID) + strcat(hash_opt, "gtp-u dl,"); if (!action) action = ""; diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h index e31eb2afebea..74cc9e0d528c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h @@ -22,6 +22,12 @@ enum iavf_adv_rss_flow_seg_hdr { IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004, IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008, IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC = 0x00000400, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP = 0x00001000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH = 0x00002000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP = 0x00008000, }; #define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \ @@ -33,6 +39,14 @@ enum iavf_adv_rss_flow_seg_hdr { IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP) +#define IAVF_ADV_RSS_FLOW_SEG_HDR_GTP \ + (IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | \ + IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP) + enum iavf_adv_rss_flow_field { /* L3 */ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA, @@ -46,6 +60,17 @@ enum iavf_adv_rss_flow_field { IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT, IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT, IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT, + /* GTPC_TEID */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID, + /* GTPU_IP */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID, + /* GTPU_EH */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID, + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_QFI, + /* GTPU_UP */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID, + /* GTPU_DWN */ + IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID, /* The total number of enums must not exceed 64 */ IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX @@ -72,6 +97,12 @@ enum iavf_adv_rss_flow_field { BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT) #define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT) +#define IAVF_ADV_RSS_HASH_FLD_GTPC_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPC_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_IP_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_EH_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_UP_TEID) +#define IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID \ + BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_GTPU_DWN_TEID) /* bookkeeping of advanced RSS configuration */ struct iavf_adv_rss { diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 05d72be3fe80..2cc21289a707 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1336,6 +1336,56 @@ static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd) hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; break; + case GTPU_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPC_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPC_TEID_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | + IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_EH_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_UL_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_DL_V4_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; + break; + case GTPU_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPC_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPC_TEID_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_EH_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_UL_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; + case GTPU_DL_V6_FLOW: + hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | + IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; + break; default: break; } @@ -1353,6 +1403,12 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: + case GTPU_V4_FLOW: + case GTPC_V4_FLOW: + case GTPC_TEID_V4_FLOW: + case GTPU_EH_V4_FLOW: + case GTPU_UL_V4_FLOW: + case GTPU_DL_V4_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; if (cmd->data & RXH_IP_DST) @@ -1361,6 +1417,12 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: + case GTPU_V6_FLOW: + case GTPC_V6_FLOW: + case GTPC_TEID_V6_FLOW: + case GTPU_EH_V6_FLOW: + case GTPU_UL_V6_FLOW: + case GTPU_DL_V6_FLOW: if (cmd->data & RXH_IP_SRC) hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; if (cmd->data & RXH_IP_DST) @@ -1382,6 +1444,7 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) break; case UDP_V4_FLOW: case UDP_V6_FLOW: + case GTPC_V4_FLOW: if (cmd->data & RXH_L4_B_0_1) hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; if (cmd->data & RXH_L4_B_2_3) @@ -1398,6 +1461,32 @@ iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) break; } } + if (cmd->data & RXH_GTP_TEID) { + switch (cmd->flow_type) { + case GTPC_TEID_V4_FLOW: + case GTPC_TEID_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID; + break; + case GTPU_V4_FLOW: + case GTPU_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID; + break; + case GTPU_EH_V4_FLOW: + case GTPU_EH_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID; + break; + case GTPU_UL_V4_FLOW: + case GTPU_UL_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID; + break; + case GTPU_DL_V4_FLOW: + case GTPU_DL_V6_FLOW: + hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID; + break; + default: + break; + } + } return hfld; } @@ -1550,6 +1639,19 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) } /** + * iavf_get_rx_ring_count - get RX ring count + * @netdev: network interface device structure + * + * Return: number of RX rings. + **/ +static u32 iavf_get_rx_ring_count(struct net_device *netdev) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return adapter->num_active_queues; +} + +/** * iavf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -1564,10 +1666,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_active_queues; - ret = 0; - break; case ETHTOOL_GRXCLSRLCNT: if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) break; @@ -1777,6 +1875,7 @@ static const struct ethtool_ops iavf_ethtool_ops = { .set_per_queue_coalesce = iavf_set_per_queue_coalesce, .set_rxnfc = iavf_set_rxnfc, .get_rxnfc = iavf_get_rxnfc, + .get_rx_ring_count = iavf_get_rx_ring_count, .get_rxfh_indir_size = iavf_get_rxfh_indir_size, .get_rxfh = iavf_get_rxfh, .set_rxfh = iavf_set_rxfh, diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c index b4d5eda2e84f..9cbd8c154031 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ptp.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c @@ -252,6 +252,12 @@ static int iavf_ptp_gettimex64(struct ptp_clock_info *info, return iavf_read_phc_indirect(adapter, ts, sts); } +static int iavf_ptp_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + return -EOPNOTSUPP; +} + /** * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension * @adapter: private adapter structure @@ -320,6 +326,7 @@ static int iavf_ptp_register_clock(struct iavf_adapter *adapter) KBUILD_MODNAME, dev_name(dev)); ptp_info->owner = THIS_MODULE; ptp_info->gettimex64 = iavf_ptp_gettimex64; + ptp_info->settime64 = iavf_ptp_settime64; ptp_info->do_aux_work = iavf_ptp_do_aux_work; clock = ptp_clock_register(ptp_info, dev); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 34a422a4a29c..88156082a41d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -793,7 +793,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter) len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + dev_info(&adapter->pdev->dev, + "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl, vlan_id, --count); @@ -838,7 +839,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter) len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + dev_info(&adapter->pdev->dev, + "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl_v2, filters, --count); @@ -941,7 +943,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter) len = virtchnl_struct_size(vvfl, vlan_id, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n"); + dev_info(&adapter->pdev->dev, + "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl, vlan_id, --count); @@ -987,7 +990,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter) len = virtchnl_struct_size(vvfl_v2, filters, count); if (len > IAVF_MAX_AQ_BUF_SIZE) { - dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n"); + dev_info(&adapter->pdev->dev, + "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n"); while (len > IAVF_MAX_AQ_BUF_SIZE) len = virtchnl_struct_size(vvfl_v2, filters, --count); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index fb2de521731a..d88b7f3fd1f9 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -459,6 +459,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf) rtnl_lock(); ice_vsi_decfg(ice_get_main_vsi(pf)); rtnl_unlock(); + ice_deinit_pf(pf); ice_deinit_dev(pf); } @@ -609,11 +610,13 @@ exit_release_res: * @devlink: pointer to the devlink instance * @id: the parameter ID to set * @ctx: context to store the parameter value + * @extack: netlink extended ACK structure * * Return: zero on success and negative value on failure. */ static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); int err; @@ -1231,11 +1234,13 @@ static void ice_set_min_max_msix(struct ice_pf *pf) static int ice_devlink_reinit_up(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct device *dev = ice_pf_to_dev(pf); + bool need_dev_deinit = false; int err; err = ice_init_hw(&pf->hw); if (err) { - dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err); + dev_err(dev, "ice_init_hw failed: %d\n", err); return err; } @@ -1246,13 +1251,19 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) if (err) goto unroll_hw_init; + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); + goto unroll_dev_init; + } + vsi->flags = ICE_VSI_FLAG_INIT; rtnl_lock(); err = ice_vsi_cfg(vsi); rtnl_unlock(); if (err) - goto err_vsi_cfg; + goto unroll_pf_init; /* No need to take devl_lock, it's already taken by devlink API */ err = ice_load(pf); @@ -1265,10 +1276,14 @@ err_load: rtnl_lock(); ice_vsi_decfg(vsi); rtnl_unlock(); -err_vsi_cfg: - ice_deinit_dev(pf); +unroll_pf_init: + ice_deinit_pf(pf); +unroll_dev_init: + need_dev_deinit = true; unroll_hw_init: ice_deinit_hw(&pf->hw); + if (need_dev_deinit) + ice_deinit_dev(pf); return err; } @@ -1336,7 +1351,8 @@ static const struct devlink_ops ice_sf_devlink_ops; static int ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); struct iidc_rdma_core_dev_info *cdev; @@ -1402,7 +1418,8 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, static int ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); struct iidc_rdma_core_dev_info *cdev; @@ -1509,11 +1526,13 @@ static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) * @devlink: Pointer to the devlink instance. * @id: The parameter ID to set. * @ctx: Context to store the parameter value. + * @extack: netlink extended ACK structure * * Return: Zero. */ static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); struct ice_port_info *pi; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 22b8323ff0d0..147aaee192a7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -351,6 +351,7 @@ struct ice_vsi { u16 num_q_vectors; /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; + bool hsplit:1; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -374,6 +375,8 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; + u16 max_frame; + struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ @@ -509,7 +512,6 @@ enum ice_pf_flags { ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ - ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_VF_VLAN_PRUNING, @@ -1029,11 +1031,15 @@ int ice_open(struct net_device *netdev); int ice_open_internal(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); +void ice_start_service_task(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); +void ice_init_dev_hw(struct ice_pf *pf); int ice_init_dev(struct ice_pf *pf); void ice_deinit_dev(struct ice_pf *pf); +int ice_init_pf(struct ice_pf *pf); +void ice_deinit_pf(struct ice_pf *pf); int ice_change_mtu(struct net_device *netdev, int new_mtu); void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 2d35a278c555..eadb1e3d12b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <net/xdp_sock_drv.h> +#include <linux/net/intel/libie/rx.h> #include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" @@ -462,19 +463,6 @@ u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - return 0; -} - -/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -536,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) else rlan_ctx.l2tsel = 1; - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + if (ring->hdr_pp) { + rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + + /* + * If the frame is TCP/UDP/SCTP, it will be split by the + * payload. + * If not, but it's an IPv4/IPv6 frame, it will be split by + * the IP header. + * If not IP, it will be split by the Ethernet header. + * + * In any case, the header buffer will never be left empty. + */ + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 | + ICE_RLAN_RX_HSPLIT_0_SPLIT_IP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; + } else { + rlan_ctx.hbuf = 0; + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + } + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; /* This controls whether VLAN is stripped from inner headers @@ -549,7 +558,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, ring->max_frame, + rlan_ctx.rxmax = min_t(u32, vsi->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -586,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) if (vsi->type == ICE_VSI_VF) return 0; - /* configure Rx buffer alignment */ - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - ice_clear_ring_build_skb_ena(ring); - else - ice_set_ring_build_skb_ena(ring); - - ring->rx_offset = ice_rx_offset(ring); - /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); @@ -601,36 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } -static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +static int ice_rxq_pp_create(struct ice_rx_ring *rq) { - void *ctx_ptr = &ring->pkt_ctx; - struct xsk_cb_desc desc = {}; - - XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); - desc.src = &ctx_ptr; - desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - - sizeof(struct xdp_buff); - desc.bytes = sizeof(ctx_ptr); - xsk_pool_fill_cb(ring->xsk_pool, &desc); -} + struct libeth_fq fq = { + .count = rq->count, + .nid = NUMA_NO_NODE, + .hsplit = rq->vsi->hsplit, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + .buf_len = LIBIE_MAX_RX_BUF_LEN, + }; + int err; -/** - * ice_get_frame_sz - calculate xdp_buff::frame_sz - * @rx_ring: the ring being configured - * - * Return frame size based on underlying PAGE_SIZE - */ -static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) -{ - unsigned int frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + return err; + + rq->pp = fq.pp; + rq->rx_fqes = fq.fqes; + rq->truesize = fq.truesize; + rq->rx_buf_len = fq.buf_len; -#if (PAGE_SIZE >= 8192) - frame_sz = rx_ring->rx_buf_len; -#else - frame_sz = ice_rx_pg_size(rx_ring) / 2; -#endif + if (!fq.hsplit) + return 0; + + fq = (struct libeth_fq){ + .count = rq->count, + .type = LIBETH_FQE_HDR, + .nid = NUMA_NO_NODE, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + }; - return frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + goto destroy; + + rq->hdr_pp = fq.pp; + rq->hdr_fqes = fq.fqes; + rq->hdr_truesize = fq.truesize; + rq->rx_hdr_len = fq.buf_len; + + return 0; + +destroy: + ice_rxq_pp_destroy(rq); + + return err; } /** @@ -642,7 +658,8 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); - u32 num_bufs = ICE_RX_DESC_UNUSED(ring); + u32 num_bufs = ICE_DESC_UNUSED(ring); + u32 rx_buf_len; int err; if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { @@ -656,15 +673,19 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } ice_rx_xsk_pool(ring); + err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); + if (err) + return err; + if (ring->xsk_pool) { xdp_rxq_info_unreg(&ring->xdp_rxq); - ring->rx_buf_len = + rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, - ring->rx_buf_len); + rx_buf_len); if (err) return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -673,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); - ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { + err = ice_rxq_pp_create(ring); + if (err) + return err; + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, ring->rx_buf_len); if (err) - return err; + goto err_destroy_fq; } - - err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (err) - return err; + xdp_rxq_info_attach_page_pool(&ring->xdp_rxq, + ring->pp); } } - xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); ring->xdp.data = NULL; - ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", ring->q_index, err); - return err; + goto err_destroy_fq; } if (ring->xsk_pool) { @@ -730,9 +748,17 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (ring->vsi->type == ICE_VSI_CTRL) ice_init_ctrl_rx_descs(ring, num_bufs); else - ice_alloc_rx_bufs(ring, num_bufs); + err = ice_alloc_rx_bufs(ring, num_bufs); + + if (err) + goto err_destroy_fq; return 0; + +err_destroy_fq: + ice_rxq_pp_destroy(ring); + + return err; } int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) @@ -753,18 +779,10 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) */ static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; - ring->rx_buf_len = ICE_RXBUF_1664; -#if (PAGE_SIZE < 8192) - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; -#endif + if (!vsi->netdev) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; } else { - ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - ring->rx_buf_len = ICE_RXBUF_3072; + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; } } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2532b6f82e97..046bc9c65c51 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1161,6 +1161,9 @@ int ice_init_hw(struct ice_hw *hw) status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; + + ice_init_dev_hw(hw->back); + mutex_init(&hw->tnl_lock); ice_init_chk_recipe_reuse_support(hw); @@ -3389,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E825C_SGMII: return true; default: return false; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index dc131779d426..969d4f8f9c02 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -10,6 +10,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" #include <net/dcbnl.h> +#include <net/libeth/rx.h> struct ice_stats { char stat_string[ETH_GSTRING_LEN]; @@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_FLAG_VF_TRUE_PROMISC_ENA), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), - ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -794,8 +794,7 @@ static int ice_get_extended_regs(struct net_device *netdev, void *p) static void ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; u32 *regs_buf = (u32 *)p; unsigned int i; @@ -810,8 +809,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) static u32 ice_get_msglevel(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); #ifndef CONFIG_DYNAMIC_DEBUG if (pf->hw.debug_mask) @@ -824,8 +822,7 @@ static u32 ice_get_msglevel(struct net_device *netdev) static void ice_set_msglevel(struct net_device *netdev, u32 data) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); #ifndef CONFIG_DYNAMIC_DEBUG if (ICE_DBG_USER & data) @@ -840,16 +837,14 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data) static void ice_get_link_ext_stats(struct net_device *netdev, struct ethtool_link_ext_stats *stats) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); stats->link_down_events = pf->link_down_events; } static int ice_get_eeprom_len(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); return (int)pf->hw.flash.flash_size; } @@ -858,9 +853,7 @@ static int ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; struct device *dev; int ret; @@ -959,8 +952,7 @@ static u64 ice_link_test(struct net_device *netdev) */ static u64 ice_eeprom_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); netdev_info(netdev, "EEPROM test\n"); return !!(ice_nvm_validate_checksum(&pf->hw)); @@ -1239,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) */ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; int valid_frames, i; + struct page *page; u8 *received_buf; valid_frames = 0; @@ -1255,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; - rx_buf = &rx_ring->rx_buf[i]; - received_buf = page_address(rx_buf->page) + rx_buf->page_offset; + rx_buf = &rx_ring->rx_fqes[i]; + page = __netmem_to_page(rx_buf->netmem); + received_buf = page_address(page) + rx_buf->offset + + page->pp->p.offset; if (ice_lbtest_check_frame(received_buf)) valid_frames++; @@ -1274,9 +1269,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) */ static u64 ice_loopback_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *orig_vsi = np->vsi, *test_vsi; - struct ice_pf *pf = orig_vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *test_vsi; u8 *tx_frame __free(kfree) = NULL; u8 broadcast[ETH_ALEN], ret = 0; int num_frames, valid_frames; @@ -1365,8 +1359,7 @@ lbtest_vsi_close: */ static u64 ice_intr_test(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); u16 swic_old = pf->sw_int_count; netdev_info(netdev, "interrupt test\n"); @@ -1394,9 +1387,8 @@ static void ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { - struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = ice_netdev_to_pf(netdev); bool if_running = netif_running(netdev); - struct ice_pf *pf = np->vsi->back; struct device *dev; dev = ice_pf_to_dev(pf); @@ -1720,9 +1712,7 @@ static int ice_nway_reset(struct net_device *netdev) */ static u32 ice_get_priv_flags(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); u32 i, ret_flags = 0; for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { @@ -1869,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ice_nway_reset(netdev); } } - if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { - /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down_up(vsi); - } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported */ @@ -3098,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) } /** + * ice_get_rx_ring_count - get RX ring count + * @netdev: network interface device structure + * + * Return: number of RX rings. + */ +static u32 ice_get_rx_ring_count(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + + return vsi->rss_size; +} + +/** * ice_get_rxnfc - command to get Rx flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -3117,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, hw = &vsi->back->hw; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = vsi->rss_size; - ret = 0; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = hw->fdir_active_fltr; /* report total rule count */ @@ -3165,6 +3161,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, ring->rx_jumbo_max_pending = 0; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; + + kernel_ring->tcp_data_split = vsi->hsplit ? + ETHTOOL_TCP_DATA_SPLIT_ENABLED : + ETHTOOL_TCP_DATA_SPLIT_DISABLED; } static int @@ -3181,6 +3181,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, int i, timeout = 50, err = 0; struct ice_hw *hw = &pf->hw; u16 new_rx_cnt, new_tx_cnt; + bool hsplit; if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || ring->tx_pending < ICE_MIN_NUM_DESC || @@ -3206,9 +3207,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", new_rx_cnt); + hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && - new_rx_cnt == vsi->rx_rings[0]->count) { + new_rx_cnt == vsi->rx_rings[0]->count && + hsplit == vsi->hsplit) { netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; } @@ -3238,6 +3242,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, vsi->xdp_rings[i]->count = new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt; + vsi->hsplit = hsplit; + netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); goto done; } @@ -3321,7 +3327,8 @@ process_rx: rx_rings[i].count = new_rx_cnt; rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; rx_rings[i].desc = NULL; - rx_rings[i].rx_buf = NULL; + rx_rings[i].xdp_buf = NULL; + /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ @@ -3330,10 +3337,6 @@ process_rx: err = ice_setup_rx_ring(&rx_rings[i]); if (err) goto rx_unwind; - - /* allocate Rx buffers */ - err = ice_alloc_rx_bufs(&rx_rings[i], - ICE_RX_DESC_UNUSED(&rx_rings[i])); rx_unwind: if (err) { while (i) { @@ -3347,6 +3350,8 @@ rx_unwind: } process_link: + vsi->hsplit = hsplit; + /* Bring interface down, copy in the new ring info, then restore the * interface. if VSI is up, bring it down and then back up */ @@ -4417,9 +4422,7 @@ static int ice_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_hw *hw = &pf->hw; u8 sff8472_comp = 0; u8 sff8472_swap = 0; @@ -4491,12 +4494,10 @@ static int ice_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { - struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = ice_netdev_to_pf(netdev); #define SFF_READ_BLOCK_SIZE 8 u8 value[SFF_READ_BLOCK_SIZE] = { 0 }; u8 addr = ICE_I2C_EEPROM_DEV_ADDR; - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; bool is_sfp = false; unsigned int i, j; @@ -4661,6 +4662,98 @@ static void ice_get_fec_stats(struct net_device *netdev, pi->lport, err); } +static void ice_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + mac_stats->FramesTransmittedOK = ps->eth.tx_unicast + + ps->eth.tx_multicast + + ps->eth.tx_broadcast; + mac_stats->FramesReceivedOK = ps->eth.rx_unicast + + ps->eth.rx_multicast + + ps->eth.rx_broadcast; + mac_stats->FrameCheckSequenceErrors = ps->crc_errors; + mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes; + mac_stats->OctetsReceivedOK = ps->eth.rx_bytes; + mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast; + mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast; + mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast; + mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast; + mac_stats->InRangeLengthErrors = ps->rx_len_errors; + mac_stats->FrameTooLongErrors = ps->rx_oversize; +} + +static void ice_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *pause_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx; + pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx; +} + +static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1522 }, + { 1523, 9522 }, + {} +}; + +static void ice_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon, + const struct ethtool_rmon_hist_range **ranges) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_hw_port_stats *ps = &pf->stats; + + rmon->undersize_pkts = ps->rx_undersize; + rmon->oversize_pkts = ps->rx_oversize; + rmon->fragments = ps->rx_fragments; + rmon->jabbers = ps->rx_jabber; + + rmon->hist[0] = ps->rx_size_64; + rmon->hist[1] = ps->rx_size_127; + rmon->hist[2] = ps->rx_size_255; + rmon->hist[3] = ps->rx_size_511; + rmon->hist[4] = ps->rx_size_1023; + rmon->hist[5] = ps->rx_size_1522; + rmon->hist[6] = ps->rx_size_big; + + rmon->hist_tx[0] = ps->tx_size_64; + rmon->hist_tx[1] = ps->tx_size_127; + rmon->hist_tx[2] = ps->tx_size_255; + rmon->hist_tx[3] = ps->tx_size_511; + rmon->hist_tx[4] = ps->tx_size_1023; + rmon->hist_tx[5] = ps->tx_size_1522; + rmon->hist_tx[6] = ps->tx_size_big; + + *ranges = ice_rmon_ranges; +} + +/* ice_get_ts_stats - provide timestamping stats + * @netdev: the netdevice pointer from ethtool + * @ts_stats: the ethtool data structure to fill in + */ +static void ice_get_ts_stats(struct net_device *netdev, + struct ethtool_ts_stats *ts_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_ptp *ptp = &pf->ptp; + + ts_stats->pkts = ptp->tx_hwtstamp_good; + ts_stats->err = ptp->tx_hwtstamp_skipped + + ptp->tx_hwtstamp_flushed + + ptp->tx_hwtstamp_discarded; + ts_stats->lost = ptp->tx_hwtstamp_timeouts; +} + #define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ ETH_RESET_FILTER | ETH_RESET_OFFLOAD) @@ -4682,8 +4775,7 @@ static void ice_get_fec_stats(struct net_device *netdev, */ static int ice_ethtool_reset(struct net_device *dev, u32 *flags) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); enum ice_reset_req reset; switch (*flags) { @@ -4741,9 +4833,14 @@ static const struct ethtool_ops ice_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, .supported_input_xfrm = RXH_XFRM_SYM_XOR, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_fec_stats = ice_get_fec_stats, + .get_eth_mac_stats = ice_get_eth_mac_stats, + .get_pause_stats = ice_get_pause_stats, + .get_rmon_stats = ice_get_rmon_stats, + .get_ts_stats = ice_get_ts_stats, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, @@ -4766,6 +4863,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_sset_count = ice_get_sset_count, .get_rxnfc = ice_get_rxnfc, .set_rxnfc = ice_set_rxnfc, + .get_rx_ring_count = ice_get_rx_ring_count, .get_ringparam = ice_get_ringparam, .set_ringparam = ice_set_ringparam, .nway_reset = ice_nway_reset, diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 26b357c0ae15..b29fbdec9442 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -1121,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, * ice_fdir_has_frag - does flow type have 2 ptypes * @flow: flow ptype * - * returns true is there is a fragment packet for this ptype + * Return: true if there is a fragment packet for this ptype */ bool ice_fdir_has_frag(enum ice_fltr_ptype flow) { diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 013c93b6605e..c0dbec369366 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -574,9 +574,7 @@ ice_destroy_tunnel_end: int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); enum ice_tunnel_type tnl_type; int status; u16 index; @@ -598,9 +596,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); enum ice_tunnel_type tnl_type; int status; @@ -3582,6 +3578,19 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, } /** + * ice_set_tcam_flags - set TCAM flag don't care mask + * @mask: mask for flags + * @dc_mask: pointer to the don't care mask + */ +static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ]) +{ + u16 inverted_mask = ~mask; + + /* flags are lowest u16 */ + put_unaligned_le16(inverted_mask, dc_mask); +} + +/** * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list * @hw: pointer to the HW struct * @idx: the index of the TCAM entry to remove @@ -3651,6 +3660,9 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, if (!p) return -ENOMEM; + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(tcam->attr.mask, dc_msk); + status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, tcam->attr.flags, vl_msk, dc_msk, nm_msk); @@ -3677,6 +3689,34 @@ err_ice_prof_tcam_ena_dis: } /** + * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use + * @ptg_attr: pointer to the PTG and attribute pair to check + * @ptgs_used: bitmap that denotes which PTGs are in use + * @attr_used: array of PTG and attributes pairs already used + * @attr_cnt: count of entries in the attr_used array + * + * Return: true if the PTG and attribute pair is in use, false otherwise. + */ +static bool +ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used, + struct ice_tcam_inf *attr_used[], u16 attr_cnt) +{ + u16 i; + + if (!test_bit(ptg_attr->ptg, ptgs_used)) + return false; + + /* the PTG is used, so now look for correct attributes */ + for (i = 0; i < attr_cnt; i++) + if (attr_used[i]->ptg == ptg_attr->ptg && + attr_used[i]->attr.flags == ptg_attr->attr.flags && + attr_used[i]->attr.mask == ptg_attr->attr.mask) + return true; + + return false; +} + +/** * ice_adj_prof_priorities - adjust profile based on priorities * @hw: pointer to the HW struct * @blk: hardware block @@ -3688,10 +3728,16 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct list_head *chg) { DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); + struct ice_tcam_inf **attr_used; struct ice_vsig_prof *t; - int status; + u16 attr_used_cnt = 0; + int status = 0; u16 idx; + attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL); + if (!attr_used) + return -ENOMEM; + bitmap_zero(ptgs_used, ICE_XLT1_CNT); idx = vsig & ICE_VSIG_IDX_M; @@ -3709,11 +3755,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 i; for (i = 0; i < t->tcam_count; i++) { + bool used; + /* Scan the priorities from newest to oldest. * Make sure that the newest profiles take priority. */ - if (test_bit(t->tcam[i].ptg, ptgs_used) && - t->tcam[i].in_use) { + used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used, + attr_used, attr_used_cnt); + + if (used && t->tcam[i].in_use) { /* need to mark this PTG as never match, as it * was already in use and therefore duplicate * (and lower priority) @@ -3723,9 +3773,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, &t->tcam[i], chg); if (status) - return status; - } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && - !t->tcam[i].in_use) { + goto free_attr_used; + } else if (!used && !t->tcam[i].in_use) { /* need to enable this PTG, as it in not in use * and not enabled (highest priority) */ @@ -3734,15 +3783,21 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, &t->tcam[i], chg); if (status) - return status; + goto free_attr_used; } /* keep track of used ptgs */ - __set_bit(t->tcam[i].ptg, ptgs_used); + set_bit(t->tcam[i].ptg, ptgs_used); + if (attr_used_cnt < ICE_MAX_PTG_ATTRS) + attr_used[attr_used_cnt++] = &t->tcam[i]; + else + ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n"); } } - return 0; +free_attr_used: + kfree(attr_used); + return status; } /** @@ -3825,11 +3880,15 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, p->vsig = vsig; p->tcam_idx = t->tcam[i].tcam_idx; + /* set don't care masks for TCAM flags */ + ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk); + /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, t->tcam[i].prof_id, - t->tcam[i].ptg, vsig, 0, 0, - vl_msk, dc_msk, nm_msk); + t->tcam[i].ptg, vsig, 0, + t->tcam[i].attr.flags, vl_msk, + dc_msk, nm_msk); if (status) { devm_kfree(ice_hw_to_dev(hw), p); goto err_ice_add_prof_id_vsig; @@ -4143,9 +4202,6 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, u16 vsi_num; int status; - if (blk != ICE_BLK_FD) - return -EINVAL; - vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); if (status) { @@ -4154,6 +4210,9 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, return status; } + if (blk != ICE_BLK_FD) + return 0; + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); if (status) { diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index 817beca591e0..80c9e7c749c2 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -187,6 +187,7 @@ struct ice_prof_map { }; #define ICE_INVALID_TCAM 0xFFFF +#define ICE_MAX_PTG_ATTRS 1024 struct ice_tcam_inf { u16 tcam_idx; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index 6d5c939dc8a5..c9b6d0a84bd1 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -5,6 +5,38 @@ #include "ice_flow.h" #include <net/gre.h> +/* Size of known protocol header fields */ +#define ICE_FLOW_FLD_SZ_ETH_TYPE 2 +#define ICE_FLOW_FLD_SZ_VLAN 2 +#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 +#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 +#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4 +#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6 +#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8 +#define ICE_FLOW_FLD_SZ_IPV4_ID 2 +#define ICE_FLOW_FLD_SZ_IPV6_ID 4 +#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2 +#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4 +#define ICE_FLOW_FLD_SZ_IP_DSCP 1 +#define ICE_FLOW_FLD_SZ_IP_TTL 1 +#define ICE_FLOW_FLD_SZ_IP_PROT 1 +#define ICE_FLOW_FLD_SZ_PORT 2 +#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 +#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 +#define ICE_FLOW_FLD_SZ_ICMP_CODE 1 +#define ICE_FLOW_FLD_SZ_ARP_OPER 2 +#define ICE_FLOW_FLD_SZ_GRE_KEYID 4 +#define ICE_FLOW_FLD_SZ_GTP_TEID 4 +#define ICE_FLOW_FLD_SZ_GTP_QFI 2 +#define ICE_FLOW_FLD_SZ_PFCP_SEID 8 +#define ICE_FLOW_FLD_SZ_ESP_SPI 4 +#define ICE_FLOW_FLD_SZ_AH_SPI 4 +#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4 +#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2 +#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2 + /* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; @@ -20,6 +52,7 @@ struct ice_flow_field_info { .mask = 0, \ } +/* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */ #define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ @@ -61,7 +94,33 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_IPV6_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, + ICE_FLOW_FLD_SZ_IPV4_ID), + /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4, + ICE_FLOW_FLD_SZ_IPV6_ID), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), + /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, + ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR), /* Transport */ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)), @@ -76,7 +135,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), + /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM), + /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8, + ICE_FLOW_FLD_SZ_SCTP_CHKSUM), /* ARP */ /* ICE_FLOW_FIELD_IDX_ARP_SIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)), @@ -108,9 +174,17 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16), 0x3f00), /* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, + ICE_FLOW_FLD_SZ_GTP_TEID), + /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */ + ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22, + ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00), /* PPPoE */ /* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)), @@ -128,7 +202,16 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)), /* NAT_T_ESP */ /* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */ - ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)), + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, + ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI), + /* L2TPV2 */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12, + ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID), + /* L2TPV2_LEN */ + /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */ + ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14, + ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID), }; /* Bitmaps indicating relevant packet types for a particular protocol header @@ -137,9 +220,9 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { */ static const u32 ice_ptypes_mac_ofos[] = { 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, - 0x0000077E, 0x00000000, 0x00000000, 0x00000000, - 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000077E, 0x000003FF, 0x00000000, 0x00000000, + 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707, + 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -162,10 +245,10 @@ static const u32 ice_ptypes_macvlan_il[] = { * include IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, - 0x00000000, 0x000FC000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x000002A0, 0x00000000, + 0x00015000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -176,10 +259,10 @@ static const u32 ice_ptypes_ipv4_ofos[] = { * IPv4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos_all[] = { - 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, + 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000, 0x00000000, 0x00000155, 0x00000000, 0x00000000, - 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101, + 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -191,7 +274,7 @@ static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x001FF800, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -202,10 +285,10 @@ static const u32 ice_ptypes_ipv4_il[] = { * include IPv6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, + 0x00000000, 0x00000000, 0x76000000, 0x10002000, 0x00000000, 0x000002AA, 0x00000000, 0x00000000, - 0x00000000, 0x03F00000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x00000540, 0x00000000, + 0x0002A000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -216,10 +299,10 @@ static const u32 ice_ptypes_ipv6_ofos[] = { * IPv6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos_all[] = { - 0x00000000, 0x00000000, 0x77000000, 0x10002000, - 0x00000000, 0x000002AA, 0x00000000, 0x00000000, - 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000, + 0x0000077E, 0x000002AA, 0x00000000, 0x00000000, + 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206, + 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -231,7 +314,7 @@ static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x7FE00000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -304,8 +387,8 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = { static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00410000, 0x90842000, 0x00000007, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00410000, 0x908427E0, 0x00000007, + 0x0413F000, 0x00000041, 0x10410410, 0x00004104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -317,7 +400,7 @@ static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00820000, 0x21084000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08200000, 0x00000082, 0x20820820, 0x00008208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -329,7 +412,7 @@ static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x01040000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x10400000, 0x00000104, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -353,7 +436,7 @@ static const u32 ice_ptypes_icmp_il[] = { 0x00000000, 0x02040408, 0x40000102, 0x08101020, 0x00000408, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x42108000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x20800000, 0x00000208, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -365,7 +448,7 @@ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -374,7 +457,7 @@ static const u32 ice_ptypes_gre_of[] = { /* Packet types for packets with an Innermost/Last MAC header */ static const u32 ice_ptypes_mac_il[] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -388,7 +471,7 @@ static const u32 ice_ptypes_mac_il[] = { static const u32 ice_ptypes_gtpc[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000180, 0x00000000, + 0x00000000, 0x00000000, 0x000001E0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, @@ -2325,6 +2408,130 @@ static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof) } /** + * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile + * @hw: device HW + * @prof: parser profile describing extracted FV (field vector) entries + * @prof_id: RSS profile identifier used to program symmetry registers + * + * The routine scans the parser profile's FV entries and looks for + * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found, + * it programs XOR-based symmetry so that flows hash identically regardless + * of packet direction. This preserves CPU affinity for the same 5-tuple. + * + * Notes: + * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed + * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly. + * - We guard against out-of-bounds access before looking at fv[i + len]. + */ +static void ice_rss_cfg_raw_symm(struct ice_hw *hw, + const struct ice_parser_profile *prof, + u64 prof_id) +{ + for (size_t i = 0; i < prof->fv_num; i++) { + u8 proto_id = prof->fv[i].proto_id; + u16 src_off = 0, dst_off = 0; + size_t src_idx, dst_idx; + bool is_matched = false; + unsigned int len = 0; + + switch (proto_id) { + /* IPv4 address pairs (outer/inner variants) */ + case ICE_PROT_IPV4_OF_OR_S: + case ICE_PROT_IPV4_IL: + case ICE_PROT_IPV4_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV4_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET; + dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET; + break; + + /* IPv6 address pairs (outer/inner variants) */ + case ICE_PROT_IPV6_OF_OR_S: + case ICE_PROT_IPV6_IL: + case ICE_PROT_IPV6_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV6_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET; + dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET; + break; + + /* L4 port pairs (TCP/UDP/SCTP) */ + case ICE_PROT_TCP_IL: + case ICE_PROT_UDP_IL_OR_S: + case ICE_PROT_SCTP_IL: + len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ; + src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET; + dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET; + break; + + default: + continue; + } + + /* Bounds check before accessing fv[i + len]. */ + if (i + len >= prof->fv_num) + continue; + + /* Verify src/dst pairing for this protocol id. */ + is_matched = prof->fv[i].offset == src_off && + prof->fv[i + len].proto_id == proto_id && + prof->fv[i + len].offset == dst_off; + if (!is_matched) + continue; + + /* Program XOR symmetry for this field pair. */ + src_idx = i; + dst_idx = i + len; + + ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len); + + /* Skip over the pair we just handled; the loop's ++i advances + * one more element, hence the --i after the jump. + */ + i += (2 * len); + /* not strictly needed; keeps static analyzers happy */ + if (i == 0) + break; + --i; + } +} + +/* Max registers index per packet profile */ +#define ICE_SYMM_REG_INDEX_MAX 6 + +/** + * ice_rss_update_raw_symm - update symmetric hash configuration + * for raw pattern + * @hw: pointer to the hardware structure + * @cfg: configure parameters for raw pattern + * @id: profile tracking ID + * + * Update symmetric hash configuration for raw pattern if required. + * Otherwise only clear to default. + */ +void +ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id) +{ + struct ice_prof_map *map; + u8 prof_id, m; + + mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + map = ice_search_prof_id(hw, ICE_BLK_RSS, id); + if (map) + prof_id = map->prof_id; + mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock); + if (!map) + return; + /* clear to default */ + for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++) + wr32(hw, GLQF_HSYMM(prof_id, m), 0); + + if (cfg->symm) + ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id); +} + +/** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 52f906d89eca..6c6cdc8addb1 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -22,6 +22,15 @@ #define ICE_FLOW_HASH_IPV6 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) +#define ICE_FLOW_HASH_IPV6_PRE32 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA)) +#define ICE_FLOW_HASH_IPV6_PRE48 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA)) +#define ICE_FLOW_HASH_IPV6_PRE64 \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \ + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)) #define ICE_FLOW_HASH_TCP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) @@ -40,6 +49,33 @@ #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE32 \ + (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE48 \ + (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT) +#define ICE_HASH_TCP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT) +#define ICE_HASH_UDP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT) +#define ICE_HASH_SCTP_IPV6_PRE64 \ + (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT) + +#define ICE_FLOW_HASH_GTP_TEID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) + +#define ICE_FLOW_HASH_GTP_IPV4_TEID \ + (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID) +#define ICE_FLOW_HASH_GTP_IPV6_TEID \ + (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID) + #define ICE_FLOW_HASH_GTP_C_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) @@ -128,6 +164,23 @@ #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID) + +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \ + (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)) +#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \ + (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID) + +#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12 +#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16 +#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8 +#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24 +#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0 +#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2 + /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by @@ -160,10 +213,13 @@ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_AH = 0x00200000, ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, + ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000, + ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000, /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and - * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs + * ICE_FLOW_SEG_HDR_IPV6. */ - ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, + ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000, + ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000, }; /* These segments all have the same PTYPES, but are otherwise distinguished by @@ -200,6 +256,15 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_DA, + ICE_FLOW_FIELD_IDX_IPV4_CHKSUM, + ICE_FLOW_FIELD_IDX_IPV4_ID, + ICE_FLOW_FIELD_IDX_IPV6_ID, + ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA, + ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA, + ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA, + ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA, /* L4 */ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, @@ -208,6 +273,9 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_FLAGS, + ICE_FLOW_FIELD_IDX_TCP_CHKSUM, + ICE_FLOW_FIELD_IDX_UDP_CHKSUM, + ICE_FLOW_FIELD_IDX_SCTP_CHKSUM, /* ARP */ ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_DIP, @@ -228,13 +296,13 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, /* GTPU_UP */ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, + ICE_FLOW_FIELD_IDX_GTPU_UP_QFI, /* GTPU_DWN */ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, - /* PPPoE */ + ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI, ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, /* PFCP */ ICE_FLOW_FIELD_IDX_PFCP_SEID, - /* L2TPv3 */ ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, /* ESP */ ICE_FLOW_FIELD_IDX_ESP_SPI, @@ -242,10 +310,16 @@ enum ice_flow_field { ICE_FLOW_FIELD_IDX_AH_SPI, /* NAT_T ESP */ ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, + /* L2TPV2 SESSION ID*/ + ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID, + /* L2TPV2_LEN SESSION ID */ + ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; +static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64"); + #define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) #define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) #define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) @@ -296,6 +370,10 @@ enum ice_rss_cfg_hdr_type { /* take inner headers as inputset for packet with outer ipv6. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, /* take outer headers first then inner headers as inputset */ + /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE, + /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */ + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE, ICE_RSS_ANY_HEADERS }; @@ -406,6 +484,12 @@ struct ice_flow_prof { bool symm; /* Symmetric Hash for RSS */ }; +struct ice_rss_raw_cfg { + struct ice_parser_profile prof; + bool raw_ena; + bool symm; +}; + struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ @@ -444,4 +528,6 @@ int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); +void ice_rss_update_raw_symm(struct ice_hw *hw, + struct ice_rss_raw_cfg *cfg, u64 id); #endif /* _ICE_FLOW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index d86db081579f..973a13d3d92a 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -534,7 +534,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, } if (completion_retval) { - dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n", + dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n", component, module, libie_aq_str((enum libie_aq_err)completion_retval)); NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash"); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index aebf8e08a297..d2576d606e10 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2177,8 +2177,7 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) */ static void ice_lag_disable_sriov_bond(struct ice_lag *lag) { - struct ice_netdev_priv *np = netdev_priv(lag->netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(lag->netdev); ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG); diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 10c312d49e05..185672c7e17d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -342,6 +342,9 @@ enum ice_flg64_bits { /* for ice_32byte_rx_flex_desc.pkt_length member */ #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ +/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */ +#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0) + enum ice_rx_flex_desc_status_error_0_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 4479c824561e..15621707fbf8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1427,7 +1427,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->reg_idx = vsi->rxq_map[i]; ring->vsi = vsi; ring->netdev = vsi->netdev; - ring->dev = dev; ring->count = vsi->num_rx_desc; ring->cached_phctime = pf->ptp.cached_phc_time; @@ -2769,7 +2768,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) * @vsi: VSI pointer * * Associate queue[s] with napi for all vectors. - * The caller must hold rtnl_lock. */ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { @@ -2779,6 +2777,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) if (!netdev) return; + ASSERT_RTNL(); ice_for_each_rxq(vsi, q_idx) netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, &vsi->rx_rings[q_idx]->q_vector->napi); @@ -2799,7 +2798,6 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) * @vsi: VSI pointer * * Clear the association between all VSI queues queue[s] and napi. - * The caller must hold rtnl_lock. */ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { @@ -2809,6 +2807,7 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) if (!netdev) return; + ASSERT_RTNL(); /* Clear the NAPI's interrupt number */ ice_for_each_q_vector(vsi, v_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 86f5859e88ef..2533876f1a2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -37,6 +37,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS("LIBETH"); +MODULE_IMPORT_NS("LIBETH_XDP"); MODULE_IMPORT_NS("LIBIE"); MODULE_IMPORT_NS("LIBIE_ADMINQ"); MODULE_IMPORT_NS("LIBIE_FWLOG"); @@ -2957,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) */ static int ice_max_xdp_frame_size(struct ice_vsi *vsi) { - if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - return ICE_RXBUF_1664; - else - return ICE_RXBUF_3072; + return ICE_RXBUF_3072; } /** @@ -3018,19 +3017,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } } xdp_features_set_redirect_target(vsi->netdev, true); - /* reallocate Rx queues that are used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, true); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); - /* reallocate Rx queues that were used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, false); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } resume_if: @@ -3949,9 +3940,10 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf) * ice_deinit_pf - Unrolls initialziations done by ice_init_pf * @pf: board private structure to initialize */ -static void ice_deinit_pf(struct ice_pf *pf) +void ice_deinit_pf(struct ice_pf *pf) { - ice_service_task_stop(pf); + /* note that we unroll also on ice_init_pf() failure here */ + mutex_destroy(&pf->lag_mutex); mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); @@ -3977,6 +3969,9 @@ static void ice_deinit_pf(struct ice_pf *pf) if (pf->ptp.clock) ptp_clock_unregister(pf->ptp.clock); + if (!xa_empty(&pf->irq_tracker.entries)) + ice_free_irq_msix_misc(pf); + xa_destroy(&pf->dyn_ports); xa_destroy(&pf->sf_nums); } @@ -4030,13 +4025,25 @@ static void ice_set_pf_caps(struct ice_pf *pf) pf->max_pf_rxqs = func_caps->common_cap.num_rxq; } +void ice_start_service_task(struct ice_pf *pf) +{ + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task); + clear_bit(ICE_SERVICE_SCHED, pf->state); +} + /** * ice_init_pf - Initialize general software structures (struct ice_pf) * @pf: board private structure to initialize + * Return: 0 on success, negative errno otherwise. */ -static int ice_init_pf(struct ice_pf *pf) +int ice_init_pf(struct ice_pf *pf) { - ice_set_pf_caps(pf); + struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic; + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + int err = -ENOMEM; mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); @@ -4049,32 +4056,7 @@ static int ice_init_pf(struct ice_pf *pf) init_waitqueue_head(&pf->reset_wait_queue); - /* setup service timer and periodic service task */ - timer_setup(&pf->serv_tmr, ice_service_timer, 0); - pf->serv_tmr_period = HZ; - INIT_WORK(&pf->serv_task, ice_service_task); - clear_bit(ICE_SERVICE_SCHED, pf->state); - mutex_init(&pf->avail_q_mutex); - pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); - if (!pf->avail_txqs) - return -ENOMEM; - - pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); - if (!pf->avail_rxqs) { - bitmap_free(pf->avail_txqs); - pf->avail_txqs = NULL; - return -ENOMEM; - } - - pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); - if (!pf->txtime_txqs) { - bitmap_free(pf->avail_txqs); - pf->avail_txqs = NULL; - bitmap_free(pf->avail_rxqs); - pf->avail_rxqs = NULL; - return -ENOMEM; - } mutex_init(&pf->vfs.table_lock); hash_init(pf->vfs.table); @@ -4087,7 +4069,36 @@ static int ice_init_pf(struct ice_pf *pf) xa_init(&pf->dyn_ports); xa_init(&pf->sf_nums); + pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); + pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); + if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs) + goto undo_init; + + udp_tunnel_nic->set_port = ice_udp_tunnel_set_port; + udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port; + udp_tunnel_nic->shared = &hw->udp_tunnel_shared; + udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN]; + udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; + udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE]; + udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE; + + /* In case of MSIX we are going to setup the misc vector right here + * to handle admin queue events etc. In case of legacy and MSI + * the misc functionality and queue processing is combined in + * the same vector and that gets setup at open. + */ + err = ice_req_irq_msix_misc(pf); + if (err) { + dev_err(dev, "setup of misc vector failed: %d\n", err); + goto undo_init; + } + return 0; +undo_init: + /* deinit handles half-initialized pf just fine */ + ice_deinit_pf(pf); + return err; } /** @@ -4722,9 +4733,8 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -int ice_init_dev(struct ice_pf *pf) +void ice_init_dev_hw(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int err; @@ -4744,61 +4754,28 @@ int ice_init_dev(struct ice_pf *pf) */ ice_set_safe_mode_caps(hw); } +} - err = ice_init_pf(pf); - if (err) { - dev_err(dev, "ice_init_pf failed: %d\n", err); - return err; - } - - pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; - pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; - pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; - if (pf->hw.tnl.valid_count[TNL_VXLAN]) { - pf->hw.udp_tunnel_nic.tables[0].n_entries = - pf->hw.tnl.valid_count[TNL_VXLAN]; - pf->hw.udp_tunnel_nic.tables[0].tunnel_types = - UDP_TUNNEL_TYPE_VXLAN; - } - if (pf->hw.tnl.valid_count[TNL_GENEVE]) { - pf->hw.udp_tunnel_nic.tables[1].n_entries = - pf->hw.tnl.valid_count[TNL_GENEVE]; - pf->hw.udp_tunnel_nic.tables[1].tunnel_types = - UDP_TUNNEL_TYPE_GENEVE; - } +int ice_init_dev(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + ice_set_pf_caps(pf); err = ice_init_interrupt_scheme(pf); if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); - err = -EIO; - goto unroll_pf_init; + return -EIO; } - /* In case of MSIX we are going to setup the misc vector right here - * to handle admin queue events etc. In case of legacy and MSI - * the misc functionality and queue processing is combined in - * the same vector and that gets setup at open. - */ - err = ice_req_irq_msix_misc(pf); - if (err) { - dev_err(dev, "setup of misc vector failed: %d\n", err); - goto unroll_irq_scheme_init; - } + ice_start_service_task(pf); return 0; - -unroll_irq_scheme_init: - ice_clear_interrupt_scheme(pf); -unroll_pf_init: - ice_deinit_pf(pf); - return err; } void ice_deinit_dev(struct ice_pf *pf) { - ice_free_irq_msix_misc(pf); - ice_deinit_pf(pf); - ice_deinit_hw(&pf->hw); + ice_service_task_stop(pf); /* Service task is already stopped, so call reset directly. */ ice_reset(&pf->hw, ICE_RESET_PFR); @@ -5038,21 +5015,24 @@ static void ice_deinit_devlink(struct ice_pf *pf) static int ice_init(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); int err; - err = ice_init_dev(pf); - if (err) + err = ice_init_pf(pf); + if (err) { + dev_err(dev, "ice_init_pf failed: %d\n", err); return err; + } if (pf->hw.mac_type == ICE_MAC_E830) { err = pci_enable_ptm(pf->pdev, NULL); if (err) - dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); + dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n"); } err = ice_alloc_vsis(pf); if (err) - goto err_alloc_vsis; + goto unroll_pf_init; err = ice_init_pf_sw(pf); if (err) @@ -5089,8 +5069,8 @@ err_init_link: ice_deinit_pf_sw(pf); err_init_pf_sw: ice_dealloc_vsis(pf); -err_alloc_vsis: - ice_deinit_dev(pf); +unroll_pf_init: + ice_deinit_pf(pf); return err; } @@ -5101,7 +5081,7 @@ static void ice_deinit(struct ice_pf *pf) ice_deinit_pf_sw(pf); ice_dealloc_vsis(pf); - ice_deinit_dev(pf); + ice_deinit_pf(pf); } /** @@ -5235,6 +5215,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + bool need_dev_deinit = false; struct ice_adapter *adapter; struct ice_pf *pf; struct ice_hw *hw; @@ -5331,10 +5312,14 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } pf->adapter = adapter; - err = ice_init(pf); + err = ice_init_dev(pf); if (err) goto unroll_adapter; + err = ice_init(pf); + if (err) + goto unroll_dev_init; + devl_lock(priv_to_devlink(pf)); err = ice_load(pf); if (err) @@ -5352,10 +5337,14 @@ unroll_load: unroll_init: devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); +unroll_dev_init: + need_dev_deinit = true; unroll_adapter: ice_adapter_put(pdev); unroll_hw_init: ice_deinit_hw(hw); + if (need_dev_deinit) + ice_deinit_dev(pf); return err; } @@ -5450,10 +5439,6 @@ static void ice_remove(struct pci_dev *pdev) ice_hwmon_exit(pf); - ice_service_task_stop(pf); - ice_aq_cancel_waiting_tasks(pf); - set_bit(ICE_DOWN, pf->state); - if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); @@ -5471,6 +5456,11 @@ static void ice_remove(struct pci_dev *pdev) ice_set_wake(pf); ice_adapter_put(pdev); + ice_deinit_hw(&pf->hw); + + ice_deinit_dev(pf); + ice_aq_cancel_waiting_tasks(pf); + set_bit(ICE_DOWN, pf->state); } /** @@ -7138,6 +7128,9 @@ void ice_update_pf_stats(struct ice_pf *pf) &prev_ps->mac_remote_faults, &cur_ps->mac_remote_faults); + ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, + &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); + ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, &prev_ps->rx_undersize, &cur_ps->rx_undersize); @@ -7862,12 +7855,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu) frame_size - ICE_ETH_PKT_HDR_PAD); return -EINVAL; } - } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { - if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { - netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", - ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); - return -EINVAL; - } } /* if a reset is in progress, wait for some time for it to complete */ @@ -8071,9 +8058,7 @@ static int ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); u16 bmode; bmode = pf->first_sw->bridge_mode; @@ -8143,8 +8128,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 __always_unused flags, struct netlink_ext_ack __always_unused *extack) { - struct ice_netdev_priv *np = netdev_priv(dev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(dev); struct nlattr *attr, *br_spec; struct ice_hw *hw = &pf->hw; struct ice_sw *pf_sw; @@ -9578,8 +9562,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, */ int ice_open(struct net_device *netdev) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); if (ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't open net device while reset is in progress"); diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 7c09ea0f03ba..725167d557a8 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -82,26 +82,46 @@ enum ice_sw_tunnel_type { enum ice_prot_id { ICE_PROT_ID_INVAL = 0, ICE_PROT_MAC_OF_OR_S = 1, + ICE_PROT_MAC_O2 = 2, ICE_PROT_MAC_IL = 4, + ICE_PROT_MAC_IN_MAC = 7, ICE_PROT_ETYPE_OL = 9, ICE_PROT_ETYPE_IL = 10, + ICE_PROT_PAY = 15, + ICE_PROT_EVLAN_O = 16, + ICE_PROT_VLAN_O = 17, + ICE_PROT_VLAN_IF = 18, + ICE_PROT_MPLS_OL_MINUS_1 = 27, + ICE_PROT_MPLS_OL_OR_OS = 28, + ICE_PROT_MPLS_IL = 29, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, + ICE_PROT_IPV4_IL_IL = 34, ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, + ICE_PROT_IPV6_IL_IL = 42, + ICE_PROT_IPV6_NEXT_PROTO = 43, + ICE_PROT_IPV6_FRAG = 47, ICE_PROT_TCP_IL = 49, ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_GRE_OF = 64, + ICE_PROT_NSH_F = 84, ICE_PROT_ESP_F = 88, ICE_PROT_ESP_2 = 89, ICE_PROT_SCTP_IL = 96, ICE_PROT_ICMP_IL = 98, ICE_PROT_ICMPV6_IL = 100, + ICE_PROT_VRRP_F = 101, + ICE_PROT_OSPF = 102, ICE_PROT_PPPOE = 103, ICE_PROT_L2TPV3 = 104, + ICE_PROT_ATAOE_OF = 114, + ICE_PROT_CTRL_OF = 116, + ICE_PROT_LLDP_OF = 117, ICE_PROT_ARP_OF = 118, ICE_PROT_META_ID = 255, /* when offset == metadata */ + ICE_PROT_EAPOL_OF = 120, ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 8ec0f7d0fceb..4c8d20f2d2c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -500,6 +500,9 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) if (tstamp) { shhwtstamps.hwtstamp = ns_to_ktime(tstamp); ice_trace(tx_tstamp_complete, skb, idx); + + /* Count the number of Tx timestamps that succeeded */ + pf->ptp.tx_hwtstamp_good++; } skb_tstamp_tx(skb, &shhwtstamps); @@ -558,6 +561,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) { struct ice_ptp_port *ptp_port; unsigned long flags; + u32 tstamp_good = 0; struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; @@ -658,11 +662,16 @@ skip_ts_read: if (tstamp) { shhwtstamps.hwtstamp = ns_to_ktime(tstamp); ice_trace(tx_tstamp_complete, skb, idx); + + /* Count the number of Tx timestamps that succeeded */ + tstamp_good++; } skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } + + pf->ptp.tx_hwtstamp_good += tstamp_good; } /** @@ -2206,8 +2215,7 @@ static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, int ice_ptp_hwtstamp_get(struct net_device *netdev, struct kernel_hwtstamp_config *config) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); if (pf->ptp.state != ICE_PTP_READY) return -EIO; @@ -2278,8 +2286,7 @@ int ice_ptp_hwtstamp_set(struct net_device *netdev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); int err; if (pf->ptp.state != ICE_PTP_READY) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 137f2070a2d9..27016aac4f1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -237,6 +237,7 @@ struct ice_ptp_pin_desc { * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration * @reset_time: kernel time after clock stop on reset + * @tx_hwtstamp_good: number of completed Tx timestamp requests * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed @@ -261,6 +262,7 @@ struct ice_ptp { struct ptp_clock *clock; struct kernel_hwtstamp_config tstamp_config; u64 reset_time; + u64 tx_hwtstamp_good; u32 tx_hwtstamp_skipped; u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_flushed; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 843e82fd3bf9..6b1126ddb561 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1190,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) */ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vf_vsi; struct device *dev; struct ice_vf *vf; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 73f08d02f9c7..ad76768a4232 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -7,6 +7,8 @@ #include <linux/netdevice.h> #include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include <net/dsfield.h> #include <net/mpls.h> #include <net/xdp.h> @@ -111,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (dma_unmap_len(tx_buf, len)) + if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), @@ -125,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) dev_kfree_skb_any(tx_buf->skb); break; case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, false); break; case ICE_TX_BUF_XDP_XMIT: xdp_return_frame(tx_buf->xdpf); @@ -506,61 +508,67 @@ err: return -ENOMEM; } +void ice_rxq_pp_destroy(struct ice_rx_ring *rq) +{ + struct libeth_fq fq = { + .fqes = rq->rx_fqes, + .pp = rq->pp, + }; + + libeth_rx_fq_destroy(&fq); + rq->rx_fqes = NULL; + rq->pp = NULL; + + if (!rq->hdr_pp) + return; + + fq.fqes = rq->hdr_fqes; + fq.pp = rq->hdr_pp; + + libeth_rx_fq_destroy(&fq); + rq->hdr_fqes = NULL; + rq->hdr_pp = NULL; +} + /** * ice_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned */ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) { - struct xdp_buff *xdp = &rx_ring->xdp; - struct device *dev = rx_ring->dev; u32 size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buf) - return; if (rx_ring->xsk_pool) { ice_xsk_clean_rx_ring(rx_ring); goto rx_skip_free; } - if (xdp->data) { - xdp_return_buff(xdp); - xdp->data = NULL; - } + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_fqes) + return; + + libeth_xdp_return_stash(&rx_ring->xdp); /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem); - if (!rx_buf->page) - continue; + if (rx_ring->hdr_pp) + libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(dev, rx_buf->dma, - rx_buf->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - - rx_buf->page = NULL; - rx_buf->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } -rx_skip_free: - if (rx_ring->xsk_pool) - memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); - else - memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); + if (rx_ring->vsi->type == ICE_VSI_PF && + xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) { + xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + } + ice_rxq_pp_destroy(rx_ring); + +rx_skip_free: /* Zero out the descriptor ring */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -568,7 +576,6 @@ rx_skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; rx_ring->next_to_use = 0; } @@ -580,26 +587,20 @@ rx_skip_free: */ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) { + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; ice_clean_rx_ring(rx_ring); - if (rx_ring->vsi->type == ICE_VSI_PF) - if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); WRITE_ONCE(rx_ring->xdp_prog, NULL); if (rx_ring->xsk_pool) { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - } else { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; } if (rx_ring->desc) { size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); - dmam_free_coherent(rx_ring->dev, size, - rx_ring->desc, rx_ring->dma); + dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } } @@ -612,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) */ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) { - struct device *dev = rx_ring->dev; + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; - if (!dev) - return -ENOMEM; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_buf); - rx_ring->rx_buf = - kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); - if (!rx_ring->rx_buf) - return -ENOMEM; - /* round up to nearest page */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -633,22 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) if (!rx_ring->desc) { dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", size); - goto err; + return -ENOMEM; } rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); return 0; - -err: - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - return -ENOMEM; } /** @@ -662,7 +647,7 @@ err: * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static u32 -ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, +ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, union ice_32b_rx_flex_desc *eop_desc) { @@ -672,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; - ice_xdp_meta_set_desc(xdp, eop_desc); + xdp->desc = eop_desc; - act = bpf_prog_run_xdp(xdp_prog, xdp); + act = bpf_prog_run_xdp(xdp_prog, &xdp->base); switch (act) { case XDP_PASS: break; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); - ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); + ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); if (ret == ICE_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: - if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) + if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog)) goto out_failure; ret = ICE_XDP_REDIR; break; @@ -700,8 +685,10 @@ out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_DROP: + libeth_xdp_return_buff(xdp); ret = ICE_XDP_CONSUMED; } + exit: return ret; } @@ -790,53 +777,6 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } /** - * ice_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buf struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - */ -static bool -ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) - return true; - - /* alloc new page for storage */ - page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, ice_rx_pg_order(rx_ring)); - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = rx_ring->rx_offset; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; - - return true; -} - -/** * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. * @rx_ring: ring to init descriptors on * @count: number of descriptors to initialize @@ -882,9 +822,20 @@ void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) */ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) { + const struct libeth_fq_fp hdr_fq = { + .pp = rx_ring->hdr_pp, + .fqes = rx_ring->hdr_fqes, + .truesize = rx_ring->hdr_truesize, + .count = rx_ring->count, + }; + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; - struct ice_rx_buf *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -892,30 +843,39 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) /* get the Rx descriptor and buffer based on next_to_use */ rx_desc = ICE_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_buf[ntu]; do { - /* if we fail here, we have work remaining */ - if (!ice_alloc_mapped_page(rx_ring, bi)) - break; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + break; + } /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); + + if (!hdr_fq.pp) + goto next; + + addr = libeth_rx_alloc(&hdr_fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + + libeth_rx_recycle_slow(fq.fqes[ntu].netmem); + break; + } + + rx_desc->read.hdr_addr = cpu_to_le64(addr); +next: rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = ICE_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_buf; ntu = 0; } @@ -932,402 +892,6 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) } /** - * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse - * @rx_buf: Rx buffer to adjust - * @size: Size of adjustment - * - * Update the offset within page so that Rx buf will be ready to be reused. - * For systems with PAGE_SIZE < 8192 this function will flip the page offset - * so the second half of page assigned to Rx buffer will be used, otherwise - * the offset is moved by "size" bytes - */ -static void -ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) -{ -#if (PAGE_SIZE < 8192) - /* flip page offset to other buffer */ - rx_buf->page_offset ^= size; -#else - /* move offset up to the next cache line */ - rx_buf->page_offset += size; -#endif -} - -/** - * ice_can_reuse_rx_page - Determine if page can be reused for another Rx - * @rx_buf: buffer containing the page - * - * If page is reusable, we have a green light for calling ice_reuse_rx_page, - * which will assign the current buffer to the buffer that next_to_alloc is - * pointing to; otherwise, the DMA mapping needs to be destroyed and - * page freed - */ -static bool -ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) -{ - unsigned int pagecnt_bias = rx_buf->pagecnt_bias; - struct page *page = rx_buf->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - - /* if we are only owner of page we can reuse it */ - if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) - return false; -#if (PAGE_SIZE >= 8192) -#define ICE_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) - if (rx_buf->page_offset > ICE_LAST_OFFSET) - return false; -#endif /* PAGE_SIZE >= 8192) */ - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buf->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp buff to place the data into - * @rx_buf: buffer containing page to add - * @size: packet length from rx_desc - * - * This function will add the data contained in rx_buf->page to the xdp buf. - * It will just attach the page as a frag. - */ -static int -ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct ice_rx_buf *rx_buf, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(xdp)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(xdp); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) - return -ENOMEM; - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, - rx_buf->page_offset, size); - sinfo->xdp_frags_size += size; - - if (page_is_pfmemalloc(rx_buf->page)) - xdp_buff_set_frag_pfmemalloc(xdp); - - return 0; -} - -/** - * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: Rx descriptor ring to store buffers on - * @old_buf: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - */ -static void -ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) -{ - u16 nta = rx_ring->next_to_alloc; - struct ice_rx_buf *new_buf; - - new_buf = &rx_ring->rx_buf[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* Transfer page from old buffer to new buffer. - * Move each member individually to avoid possible store - * forwarding stalls and unnecessary copy of skb. - */ - new_buf->dma = old_buf->dma; - new_buf->page = old_buf->page; - new_buf->page_offset = old_buf->page_offset; - new_buf->pagecnt_bias = old_buf->pagecnt_bias; -} - -/** - * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use - * @rx_ring: Rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * @ntc: index of next to clean element - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct ice_rx_buf * -ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, - const unsigned int ntc) -{ - struct ice_rx_buf *rx_buf; - - rx_buf = &rx_ring->rx_buf[ntc]; - prefetchw(rx_buf->page); - - if (!size) - return rx_buf; - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buf->pagecnt_bias--; - - return rx_buf; -} - -/** - * ice_get_pgcnts - grab page_count() for gathered fragments - * @rx_ring: Rx descriptor ring to store the page counts on - * @ntc: the next to clean element (not included in this frame!) - * - * This function is intended to be called right before running XDP - * program so that the page recycling mechanism will be able to take - * a correct decision regarding underlying pages; this is done in such - * way as XDP program can change the refcount of page - */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) -{ - u32 idx = rx_ring->first_desc; - struct ice_rx_buf *rx_buf; - u32 cnt = rx_ring->count; - - while (idx != ntc) { - rx_buf = &rx_ring->rx_buf[idx]; - rx_buf->pgcnt = page_count(rx_buf->page); - - if (++idx == cnt) - idx = 0; - } -} - -/** - * ice_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function builds an skb around an existing XDP buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. Driver has - * already combined frags (if any) to skb_shared_info. - */ -static struct sk_buff * -ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - u8 metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - unsigned int nr_frags; - struct sk_buff *skb; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* Prefetch first cache line of first page. If xdp->data_meta - * is unused, this points exactly as xdp->data, otherwise we - * likely have a consumer accessing first few bytes of meta - * data, and then actual data. - */ - net_prefetch(xdp->data_meta); - /* build an skb around the page buffer */ - skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); - if (unlikely(!skb)) - return NULL; - - /* must to record Rx queue, otherwise OS features such as - * symmetric queue won't work - */ - skb_record_rx_queue(skb, rx_ring->q_index); - - /* update pointers within the skb to store the data */ - skb_reserve(skb, xdp->data - xdp->data_hard_start); - __skb_put(skb, xdp->data_end - xdp->data); - if (metasize) - skb_metadata_set(skb, metasize); - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - - return skb; -} - -/** - * ice_construct_skb - Allocate skb and populate it - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff * -ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int size = xdp->data_end - xdp->data; - struct skb_shared_info *sinfo = NULL; - struct ice_rx_buf *rx_buf; - unsigned int nr_frags = 0; - unsigned int headlen; - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - net_prefetch(xdp->data); - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; - skb_record_rx_queue(skb, rx_ring->q_index); - /* Determine available headroom for copy */ - headlen = size; - if (headlen > ICE_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, - sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (size) { - /* besides adding here a partial frag, we are going to add - * frags from xdp_buff, make sure there is enough space for - * them - */ - if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { - dev_kfree_skb(skb); - return NULL; - } - skb_add_rx_frag(skb, 0, rx_buf->page, - rx_buf->page_offset + headlen, size, - xdp->frame_sz); - } else { - /* buffer is unused, restore biased page count in Rx buffer; - * data was copied onto skb's linear part so there's no - * need for adjusting page offset and we can reuse this buffer - * as-is - */ - rx_buf->pagecnt_bias++; - } - - if (unlikely(xdp_buff_has_frags(xdp))) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - - memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], - sizeof(skb_frag_t) * nr_frags); - - xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags, - sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - } - - return skb; -} - -/** - * ice_put_rx_buf - Clean up used buffer and either recycle or free - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buf: Rx buffer to pull data from - * - * This function will clean up the contents of the rx_buf. It will either - * recycle the buffer or unmap it and free the associated resources. - */ -static void -ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) -{ - if (!rx_buf) - return; - - if (ice_can_reuse_rx_page(rx_buf)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, - ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buf->page = NULL; -} - -/** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame - * @rx_ring: Rx ring with all the auxiliary data - * @xdp: XDP buffer carrying linear + frags part - * @ntc: the next to clean element (not included in this frame!) - * @verdict: return code from XDP program execution - * - * Called after XDP program is completed, or on error with verdict set to - * ICE_XDP_CONSUMED. - * - * Walk through buffers from first_desc to the end of the frame, releasing - * buffers and satisfying internal page recycle mechanism. The action depends - * on verdict from XDP program. - */ -static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 ntc, u32 verdict) -{ - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - u32 xdp_frags = 0; - int i = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - - while (idx != ntc) { - buf = &rx_ring->rx_buf[idx]; - if (++idx == cnt) - idx = 0; - - /* An XDP program could release fragments from the end of the - * buffer. For these, we need to keep the pagecnt_bias as-is. - * To do this, only adjust pagecnt_bias for fragments up to - * the total remaining after the XDP program has run. - */ - if (verdict != ICE_XDP_CONSUMED) - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - else if (i++ <= xdp_frags) - buf->pagecnt_bias++; - - ice_put_rx_buf(rx_ring, buf); - } - - xdp->data = NULL; - rx_ring->first_desc = ntc; -} - -/** * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on * @@ -1361,9 +925,8 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) total_rx_pkts++; } - rx_ring->first_desc = ntc; rx_ring->next_to_clean = ntc; - ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring)); } /** @@ -1381,16 +944,17 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; - unsigned int offset = rx_ring->rx_offset; - struct xdp_buff *xdp = &rx_ring->xdp; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + LIBETH_XDP_ONSTACK_BUFF(xdp); u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; bool failure; + libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { xdp_ring = rx_ring->xdp_ring; @@ -1400,19 +964,21 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; struct sk_buff *skb; unsigned int size; u16 stat_err_bits; u16 vlan_tci; + bool rxe; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + /* + * The DD bit will always be zero for unused descriptors + * because it's cleared in cleanup or when setting the DMA + * address of the header buffer, which never uses the DD bit. + * If the hardware wrote the descriptor, it will be non-zero. */ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) @@ -1426,71 +992,65 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ice_trace(clean_rx_irq, rx_ring, rx_desc); + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); + rxe = ice_test_staterr(rx_desc->wb.status_error0, + stat_err_bits); + + if (!rx_ring->hdr_pp) + goto payload; + + size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1, + ICE_RX_FLEX_DESC_HDR_LEN_M); + if (unlikely(rxe)) + size = 0; + + rx_buf = &rx_ring->hdr_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); + rx_buf->netmem = 0; + +payload: size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + if (unlikely(rxe)) + size = 0; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + rx_buf = &rx_ring->rx_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); - /* Increment ntc before calls to ice_put_rx_mbuf() */ if (++ntc == cnt) ntc = 0; - if (!xdp->data) { - void *hard_start; - - hard_start = page_address(rx_buf->page) + rx_buf->page_offset - - offset; - xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); - xdp_buff_clear_frags_flag(xdp); - } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); - break; - } - /* skip if it is NOP desc */ - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data)) continue; - ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; - total_rx_bytes += xdp_get_buff_len(xdp); - total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); + if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) + xdp_xmit |= xdp_verdict; + total_rx_bytes += xdp_get_buff_len(&xdp->base); + total_rx_pkts++; + xdp->data = NULL; continue; + construct_skb: - if (likely(ice_ring_uses_build_skb(rx_ring))) - skb = ice_build_skb(rx_ring, xdp); - else - skb = ice_construct_skb(rx_ring, xdp); + skb = xdp_build_skb_from_buff(&xdp->base); + xdp->data = NULL; + /* exit if we failed to retrieve a buffer */ if (!skb) { + libeth_xdp_return_buff_slow(xdp); rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - xdp_verdict = ICE_XDP_CONSUMED; - } - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - - if (!skb) - break; - - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); - if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, - stat_err_bits))) { - dev_kfree_skb_any(skb); continue; } vlan_tci = ice_get_vlan_tci(rx_desc); - /* pad the skb if needed, to make a valid ethernet frame */ - if (eth_skb_pad(skb)) - continue; - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1507,11 +1067,13 @@ construct_skb: rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ - failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring)); if (xdp_xmit) ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); + libeth_xdp_save_buff(&rx_ring->xdp, xdp); + if (rx_ring->ring_stats) ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 841a07bfba54..e440c55d9e9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,6 +4,8 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include <net/libeth/types.h> + #include "ice_type.h" #define ICE_DFLT_IRQ_WORK 256 @@ -27,72 +29,6 @@ #define ICE_MAX_TXQ_PER_TXQG 128 -/* Attempt to maximize the headroom available for incoming frames. We use a 2K - * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. - * This leaves us with 512 bytes of room. From that we need to deduct the - * space needed for the shared info and the padding needed to IP align the - * frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define ICE_2K_TOO_SMALL_WITH_PADDING \ - ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ - SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) - -/** - * ice_compute_pad - compute the padding - * @rx_buf_len: buffer length - * - * Figure out the size of half page based on given buffer length and - * then subtract the skb_shared_info followed by subtraction of the - * actual buffer length; this in turn results in the actual space that - * is left for padding usage - */ -static inline int ice_compute_pad(int rx_buf_len) -{ - int half_page_size; - - half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; -} - -/** - * ice_skb_pad - determine the padding that we can supply - * - * Figure out the right Rx buffer size and based on that calculate the - * padding - */ -static inline int ice_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (ICE_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = ICE_RXBUF_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return ice_compute_pad(rx_buf_len); -} - -#define ICE_SKB_PAD ice_skb_pad() -#else -#define ICE_2K_TOO_SMALL_WITH_PADDING false -#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - /* We are assuming that the cache line is always 64 Bytes here for ice. * In order to make sure that is a correct assumption there is a check in probe * to print a warning if the read from GLPCI_CNF2 tells us that the cache line @@ -112,10 +48,6 @@ static inline int ice_skb_pad(void) (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define ICE_RX_DESC_UNUSED(R) \ - ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->first_desc - (R)->next_to_use - 1) - #define ICE_RING_QUARTER(R) ((R)->count >> 2) #define ICE_TX_FLAGS_TSO BIT(0) @@ -197,14 +129,6 @@ struct ice_tx_offload_params { u8 header_len; }; -struct ice_rx_buf { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - unsigned int pgcnt; - unsigned int pagecnt_bias; -}; - struct ice_q_stats { u64 pkts; u64 bytes; @@ -262,15 +186,6 @@ struct ice_pkt_ctx { __be16 vlan_proto; }; -struct ice_xdp_buff { - struct xdp_buff xdp_buff; - const union ice_32b_rx_flex_desc *eop_desc; - const struct ice_pkt_ctx *pkt_ctx; -}; - -/* Required for compatibility with xdp_buffs from xsk_pool */ -static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); - /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -323,7 +238,7 @@ struct ice_tstamp_ring { struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + struct page_pool *pp; struct net_device *netdev; /* netdev ring maps to */ struct ice_vsi *vsi; /* Backreference to associated VSI */ struct ice_q_vector *q_vector; /* Backreference to associated vector */ @@ -335,14 +250,19 @@ struct ice_rx_ring { u16 next_to_alloc; union { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_fqes; struct xdp_buff **xdp_buf; }; + /* CL2 - 2nd cacheline starts here */ + struct libeth_fqe *hdr_fqes; + struct page_pool *hdr_pp; + union { - struct ice_xdp_buff xdp_ext; - struct xdp_buff xdp; + struct libeth_xdp_buff_stash xdp; + struct libeth_xdp_buff *xsk; }; + /* CL3 - 3rd cacheline starts here */ union { struct ice_pkt_ctx pkt_ctx; @@ -352,12 +272,13 @@ struct ice_rx_ring { }; }; struct bpf_prog *xdp_prog; - u16 rx_offset; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u16 first_desc; + + u32 hdr_truesize; + u32 truesize; /* stats structs */ struct ice_ring_stats *ring_stats; @@ -368,12 +289,11 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u16 max_frame; + u16 rx_hdr_len; u16 rx_buf_len; dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; -#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) #define ICE_RX_FLAGS_MULTIDEV BIT(3) #define ICE_RX_FLAGS_RING_GCS BIT(4) @@ -422,21 +342,6 @@ struct ice_tx_ring { u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; -static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) -{ - return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); -} - -static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; -} - -static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; -} - static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) { return !!ring->ch; @@ -491,18 +396,13 @@ struct ice_coalesce_stored { static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) { -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif return 0; } -#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) - union ice_32b_rx_flex_desc; void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs); +void ice_rxq_pp_destroy(struct ice_rx_ring *rq); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 45cfaabc41cb..956da38d63b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -3,6 +3,7 @@ #include <linux/filter.h> #include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -230,9 +231,12 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, if (ice_is_port_repr_netdev(netdev)) ice_repr_inc_rx_stats(netdev, skb->len); + + /* __skb_push() is needed because xdp_build_skb_from_buff() + * calls eth_type_trans() + */ + __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, netdev); - } else { - skb->protocol = eth_type_trans(skb, rx_ring->netdev); } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -270,19 +274,18 @@ static void ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, struct xdp_frame_bulk *bq) { - dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - switch (tx_buf->type) { case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, true); break; case ICE_TX_BUF_XDP_XMIT: + dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); xdp_return_frame_bulk(tx_buf->xdpf, bq); break; } + dma_unmap_len_set(tx_buf, len, 0); tx_buf->type = ICE_TX_BUF_EMPTY; } @@ -377,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf; u32 cnt = xdp_ring->count; void *data = xdp->data; + struct page *page; u32 nr_frags = 0; u32 free_space; u32 frag = 0; + u32 offset; free_space = ICE_DESC_UNUSED(xdp_ring); if (free_space < ICE_RING_QUARTER(xdp_ring)) @@ -399,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_head = &xdp_ring->tx_buf[ntu]; tx_buf = tx_head; + page = virt_to_page(data); + offset = offset_in_page(xdp->data); + for (;;) { dma_addr_t dma; - dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) - goto dma_unmap; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_buf, len, size); - dma_unmap_addr_set(tx_buf, dma, dma); - if (frame) { + dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_unmap; tx_buf->type = ICE_TX_BUF_FRAG; } else { + dma = page_pool_get_dma_addr(page) + offset; + dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL); tx_buf->type = ICE_TX_BUF_XDP_TX; tx_buf->raw_buf = data; } + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); @@ -430,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_desc = ICE_TX_DESC(xdp_ring, ntu); tx_buf = &xdp_ring->tx_buf[ntu]; + page = skb_frag_page(&sinfo->frags[frag]); + offset = skb_frag_off(&sinfo->frags[frag]); data = skb_frag_address(&sinfo->frags[frag]); size = skb_frag_size(&sinfo->frags[frag]); frag++; @@ -514,10 +525,13 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, */ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; - *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, - xdp_ext->pkt_ctx); + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc, + &rx_ring->pkt_ctx); if (!*ts_ns) return -ENODATA; @@ -545,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; - *hash = ice_get_rx_hash(xdp_ext->eop_desc); - *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + *hash = ice_get_rx_hash(xdp_ext->desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc); if (!likely(*hash)) return -ENODATA; @@ -567,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, u16 *vlan_tci) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; + + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); - *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + *vlan_proto = rx_ring->pkt_ctx.vlan_proto; if (!*vlan_proto) return -ENODATA; - *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + *vlan_tci = ice_get_vlan_tci(xdp_ext->desc); if (!*vlan_tci) return -ENODATA; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 99717730f21a..6a3f10f7a53f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -135,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, void ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); -static inline void -ice_xdp_meta_set_desc(struct xdp_buff *xdp, - union ice_32b_rx_flex_desc *eop_desc) -{ - struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, - xdp_buff); - - xdp_ext->eop_desc = eop_desc; -} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index b0a1b67071c5..6a2ec8389a8f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -1063,6 +1063,7 @@ struct ice_hw_port_stats { u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ + u64 rx_len_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index b00708907176..7a9c75d1d07c 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -53,6 +53,46 @@ struct ice_mdd_vf_events { u16 last_printed; }; +enum ice_hash_ip_ctx_type { + ICE_HASH_IP_CTX_IP = 0, + ICE_HASH_IP_CTX_IP_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP, + ICE_HASH_IP_CTX_IP_AH, + ICE_HASH_IP_CTX_IP_PFCP, + ICE_HASH_IP_CTX_IP_UDP, + ICE_HASH_IP_CTX_IP_TCP, + ICE_HASH_IP_CTX_IP_SCTP, + ICE_HASH_IP_CTX_MAX, +}; + +struct ice_vf_hash_ip_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX]; +}; + +enum ice_hash_gtpu_ctx_type { + ICE_HASH_GTPU_CTX_EH_IP = 0, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + ICE_HASH_GTPU_CTX_MAX, +}; + +struct ice_vf_hash_gtpu_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; + +struct ice_vf_hash_ctx { + struct ice_vf_hash_ip_ctx v4; + struct ice_vf_hash_ip_ctx v6; + struct ice_vf_hash_gtpu_ctx ipv4; + struct ice_vf_hash_gtpu_ctx ipv6; +}; + /* Structure to store fdir fv entry */ struct ice_fdir_prof_info { struct ice_parser_profile prof; @@ -66,6 +106,12 @@ struct ice_vf_qs_bw { u8 tc; }; +/* Structure to store RSS field vector entry */ +struct ice_rss_prof_info { + struct ice_parser_profile prof; + bool symm; +}; + /* VF operations */ struct ice_vf_ops { enum ice_disq_rst_src reset_type; @@ -106,6 +152,8 @@ struct ice_vf { u16 ctrl_vsi_idx; struct ice_vf_fdir fdir; struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; + struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; + struct ice_vf_hash_ctx hash_ctx; u64 rss_hashcfg; /* RSS hash configuration */ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ struct virtchnl_version_info vf_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 575fd48f485f..989ff1fd9110 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -3,6 +3,7 @@ #include <linux/bpf_trace.h> #include <linux/unroll.h> +#include <net/libeth/xdp.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ice.h" @@ -169,50 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure */ -static int +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) { - size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : - sizeof(*rx_ring->rx_buf); - void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); - - if (!sw_ring) - return -ENOMEM; - if (pool_present) { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - rx_ring->xdp_buf = sw_ring; + rx_ring->xdp_buf = kcalloc(rx_ring->count, + sizeof(*rx_ring->xdp_buf), + GFP_KERNEL); + if (!rx_ring->xdp_buf) + return -ENOMEM; } else { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - rx_ring->rx_buf = sw_ring; - } - - return 0; -} - -/** - * ice_realloc_zc_buf - reallocate XDP ZC queue pairs - * @vsi: Current VSI - * @zc: is zero copy set - * - * Reallocate buffer for rx_rings that might be used by XSK. - * XDP requires more memory, than rx_buf provides. - * Returns 0 on success, negative on failure - */ -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) -{ - struct ice_rx_ring *rx_ring; - uint i; - - ice_for_each_rxq(vsi, i) { - rx_ring = vsi->rx_rings[i]; - if (!rx_ring->xsk_pool) - continue; - - if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) - return -ENOMEM; } return 0; @@ -228,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) */ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { + struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0; @@ -241,8 +211,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ice_is_xdp_ena_vsi(vsi); if (if_running) { - struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; - ret = ice_qp_dis(vsi, qid); if (ret) { netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); @@ -303,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; - /* Put private info that changes on a per-packet basis - * into xdp_buff_xsk->cb. - */ - ice_xdp_meta_set_desc(*xdp, rx_desc); - rx_desc++; xdp++; } @@ -393,69 +356,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, } /** - * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer - * @rx_ring: Rx ring - * @xdp: Pointer to XDP buffer - * - * This function allocates a new skb from a zero-copy Rx buffer. - * - * Returns the skb on success, NULL on failure. - */ -static struct sk_buff * -ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int totalsize = xdp->data_end - xdp->data_meta; - unsigned int metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - struct sk_buff *skb; - u32 nr_frags = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - net_prefetch(xdp->data_meta); - - skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); - if (unlikely(!skb)) - return NULL; - - memcpy(__skb_put(skb, totalsize), xdp->data_meta, - ALIGN(totalsize, sizeof(long))); - - if (metasize) { - skb_metadata_set(skb, metasize); - __skb_pull(skb, metasize); - } - - if (likely(!xdp_buff_has_frags(xdp))) - goto out; - - for (int i = 0; i < nr_frags; i++) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - skb_frag_t *frag = &sinfo->frags[i]; - struct page *page; - void *addr; - - page = dev_alloc_page(); - if (!page) { - dev_kfree_skb(skb); - return NULL; - } - addr = page_to_virt(page); - - memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); - - __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, - addr, 0, skb_frag_size(frag)); - } - -out: - xsk_buff_free(xdp); - return skb; -} - -/** * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ * @xdp_ring: XDP Tx ring * @xsk_pool: AF_XDP buffer pool pointer @@ -669,10 +569,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, int budget) { + struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk; unsigned int total_rx_bytes = 0, total_rx_packets = 0; u32 ntc = rx_ring->next_to_clean; u32 ntu = rx_ring->next_to_use; - struct xdp_buff *first = NULL; struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; @@ -686,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, xdp_prog = READ_ONCE(rx_ring->xdp_prog); xdp_ring = rx_ring->xdp_ring; - if (ntc != rx_ring->first_desc) - first = *ice_xdp_buf(rx_ring, rx_ring->first_desc); - while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; @@ -724,15 +621,17 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, first = xdp; } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { xsk_buff_free(first); - break; + first = NULL; } if (++ntc == cnt) ntc = 0; - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first)) continue; + ((struct libeth_xdp_buff *)first)->desc = rx_desc; + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring, xsk_pool); if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { @@ -740,7 +639,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, } else if (xdp_res == ICE_XDP_EXIT) { failure = true; first = NULL; - rx_ring->first_desc = ntc; break; } else if (xdp_res == ICE_XDP_CONSUMED) { xsk_buff_free(first); @@ -752,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, total_rx_packets++; first = NULL; - rx_ring->first_desc = ntc; continue; construct_skb: /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, first); + skb = xdp_build_skb_from_zc(first); if (!skb) { + xsk_buff_free(first); + first = NULL; + rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - break; + continue; } first = NULL; - rx_ring->first_desc = ntc; - - if (eth_skb_pad(skb)) { - skb = NULL; - continue; - } total_rx_bytes += skb->len; total_rx_packets++; @@ -781,7 +675,9 @@ construct_skb: } rx_ring->next_to_clean = ntc; - entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); + rx_ring->xsk = (struct libeth_xdp_buff *)first; + + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, entries_to_alloc); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 600cbeeaa203..5275fcedc9e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -22,7 +22,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present); void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid); void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, @@ -77,8 +77,8 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } static inline int -ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, - bool __always_unused zc) +ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, + bool __always_unused pool_present) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c index 370f6ec2a374..f73d5a3e83d4 100644 --- a/drivers/net/ethernet/intel/ice/virt/queues.c +++ b/drivers/net/ethernet/intel/ice/virt/queues.c @@ -842,18 +842,20 @@ int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; + ring->rx_buf_len = qpi->rxq.databuffer_size; + if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - ring->max_frame = qpi->rxq.max_pkt_size; + vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - ring->max_frame += VLAN_HLEN; + vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c index cbdbb32d512b..085e69ec0cfc 100644 --- a/drivers/net/ethernet/intel/ice/virt/rss.c +++ b/drivers/net/ethernet/intel/ice/virt/rss.c @@ -36,6 +36,11 @@ static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = { {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP}, {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH}, {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION}, + {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC}, + {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG}, + {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE}, }; struct ice_vc_hash_field_match_type { @@ -87,8 +92,125 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, - {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), + ICE_FLOW_HASH_IPV4}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_IPV4_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), @@ -110,6 +232,35 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), + ICE_FLOW_HASH_IPV6_PRE64}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + ICE_FLOW_HASH_IPV6_PRE64 | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, + {VIRTCHNL_PROTO_HDR_IPV6, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) | + BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)}, {VIRTCHNL_PROTO_HDR_TCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)}, @@ -120,6 +271,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT), ICE_FLOW_HASH_TCP_PORT}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_TCP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM), + ICE_FLOW_HASH_TCP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_UDP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)}, @@ -130,6 +300,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT), ICE_FLOW_HASH_UDP_PORT}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_UDP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM), + ICE_FLOW_HASH_UDP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_SCTP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT), BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)}, @@ -140,6 +329,25 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT), ICE_FLOW_HASH_SCTP_PORT}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, + {VIRTCHNL_PROTO_HDR_SCTP, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) | + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM), + ICE_FLOW_HASH_SCTP_PORT | + BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)}, {VIRTCHNL_PROTO_HDR_PPPOE, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID), BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)}, @@ -155,8 +363,54 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list[] = { BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)}, {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID), BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)}, + {VIRTCHNL_PROTO_HDR_GTPC, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID), + BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)}, + {VIRTCHNL_PROTO_HDR_L2TPV2, + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID), + BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)}, }; +static int +ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type) +{ + struct ice_vsi_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + /* clear previous hash_type */ + ctx->info.q_opt_rss = vsi->info.q_opt_rss & + ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; + /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */ + ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, + hash_type); + + /* Preserve existing queueing option setting */ + ctx->info.q_opt_tc = vsi->info.q_opt_tc; + ctx->info.q_opt_flags = vsi->info.q_opt_flags; + + ctx->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + + ret = ice_update_vsi(hw, vsi->idx, ctx, NULL); + if (ret) { + dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n", + ret, libie_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.q_opt_rss = ctx->info.q_opt_rss; + } + + kfree(ctx); + + return ret; +} + /** * ice_vc_validate_pattern * @vf: pointer to the VF info @@ -271,6 +525,11 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, const struct ice_vc_hash_field_match_type *hf_list; const struct ice_vc_hdr_match_type *hdr_list; int i, hf_list_len, hdr_list_len; + bool outer_ipv4 = false; + bool outer_ipv6 = false; + bool inner_hdr = false; + bool has_gre = false; + u32 *addl_hdrs = &hash_cfg->addl_hdrs; u64 *hash_flds = &hash_cfg->hash_flds; @@ -290,17 +549,17 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, for (i = 0; i < rss_cfg->proto_hdrs.count; i++) { struct virtchnl_proto_hdr *proto_hdr = &rss_cfg->proto_hdrs.proto_hdr[i]; - bool hdr_found = false; + u32 hdr_found = 0; int j; - /* Find matched ice headers according to virtchnl headers. */ + /* Find matched ice headers according to virtchnl headers. + * Also figure out the outer type of GTPU headers. + */ for (j = 0; j < hdr_list_len; j++) { struct ice_vc_hdr_match_type hdr_map = hdr_list[j]; - if (proto_hdr->type == hdr_map.vc_hdr) { - *addl_hdrs |= hdr_map.ice_hdr; - hdr_found = true; - } + if (proto_hdr->type == hdr_map.vc_hdr) + hdr_found = hdr_map.ice_hdr; } if (!hdr_found) @@ -318,8 +577,98 @@ static bool ice_vc_parse_rss_cfg(struct ice_hw *hw, break; } } + + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr) + outer_ipv4 = true; + else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 && + !inner_hdr) + outer_ipv6 = true; + /* for GRE and L2TPv2, take inner header as input set if no + * any field is selected from outer headers. + * for GTPU, take inner header and GTPU teid as input set. + */ + else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN || + proto_hdr->type == + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) || + ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 || + proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) && + *hash_flds == 0)) { + /* set inner_hdr flag, and clean up outer header */ + inner_hdr = true; + + /* clear outer headers */ + *addl_hdrs = 0; + + if (outer_ipv4 && outer_ipv6) + return false; + + if (outer_ipv4) + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; + else if (outer_ipv6) + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; + else + hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS; + + if (has_gre && outer_ipv4) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE; + if (has_gre && outer_ipv6) + hash_cfg->hdr_type = + ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE; + + if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) + has_gre = true; + } + + *addl_hdrs |= hdr_found; + + /* refine hash hdrs and fields for IP fragment */ + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID); + } + if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) && + proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) { + *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG; + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID); + VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr, + VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID); + } + } + + /* refine gtpu header if we take outer as input set for a no inner + * ip gtpu flow. + */ + if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS && + *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) { + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP); + *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP; } + /* refine hash field for esp and nat-t-esp. */ + if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) && + (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) { + *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP); + *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP; + *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)); + *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI); + } + + /* refine hash hdrs for L4 udp/tcp/sctp. */ + if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_SCTP) && + *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER) + *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER; + return true; } @@ -337,6 +686,874 @@ static bool ice_vf_adv_rss_offload_ena(u32 caps) } /** + * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid + * @cfg: RSS hash configuration to test + * + * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise. + */ +static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg) +{ + return cfg->hash_flds && cfg->addl_hdrs; +} + +/** + * ice_hash_cfg_reset - Reset an RSS hash context + * @cfg: RSS hash configuration to reset + * + * Reset fields of @cfg that store the active rule information. + */ +static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg) +{ + cfg->hash_flds = 0; + cfg->addl_hdrs = 0; + cfg->hdr_type = ICE_RSS_OUTER_HEADERS; + cfg->symm = 0; +} + +/** + * ice_hash_cfg_record - Record an RSS hash context + * @ctx: destination (global) RSS hash configuration + * @cfg: source RSS hash configuration to record + * + * Copy the active rule information from @cfg into @ctx. + */ +static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx, + struct ice_rss_hash_cfg *cfg) +{ + ctx->hash_flds = cfg->hash_flds; + ctx->addl_hdrs = cfg->addl_hdrs; + ctx->hdr_type = cfg->hdr_type; + ctx->symm = cfg->symm; +} + +/** + * ice_hash_moveout - Delete an RSS configuration (keep context) + * @vf: VF pointer + * @cfg: RSS hash configuration + * + * Return: 0 on success (including when already absent); -ENOENT if @cfg is + * invalid or VSI is missing; -EBUSY on hardware removal failure. + */ +static int +ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (!ice_is_hash_cfg_valid(cfg) || !vsi) + return -ENOENT; + + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); + if (ret && ret != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return -EBUSY; + } + + return 0; +} + +/** + * ice_hash_moveback - Add an RSS hash configuration for a VF + * @vf: VF pointer + * @cfg: RSS hash configuration to apply + * + * Add @cfg to @vf if the context is valid and VSI exists; programs HW. + * + * Return: + * * 0 on success + * * -ENOENT if @cfg is invalid or VSI is missing + * * -EBUSY if hardware programming fails + */ +static int +ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (!ice_is_hash_cfg_valid(cfg) || !vsi) + return -ENOENT; + + ret = ice_add_rss_cfg(hw, vsi, cfg); + if (ret) { + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return -EBUSY; + } + + return 0; +} + +/** + * ice_hash_remove - remove a RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * This function will delete a RSS hash configuration and also delete the + * hash context which stores the rule info. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int +ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + int ret; + + ret = ice_hash_moveout(vf, cfg); + if (ret && ret != -ENOENT) + return ret; + + ice_hash_cfg_reset(cfg); + + return 0; +} + +struct ice_gtpu_ctx_action { + u32 ctx_idx; + const u32 *remove_list; + int remove_count; + const u32 *moveout_list; + int moveout_count; +}; + +/** + * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration + * @vf: pointer to the VF info + * @ctx: pointer to the context of the GTPU hash + * @ctx_idx: index of the hash context + * + * Pre-processes the GTPU hash configuration before adding a new + * hash context. It removes or reorders existing hash configurations that may + * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is + * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due + * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule + * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is + * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to + * avoid conflict. + * + * Return: 0 on success or a negative error code on failure + */ +static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf, + struct ice_vf_hash_gtpu_ctx *ctx, + u32 ctx_idx) +{ + int ret, i; + + static const u32 remove_eh_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP, + ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + + static const u32 remove_eh_ip_udp[] = { + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + }; + static const u32 moveout_eh_ip_udp[] = { + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + + static const u32 remove_eh_ip_tcp[] = { + ICE_HASH_GTPU_CTX_UP_IP_TCP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + static const u32 moveout_eh_ip_tcp[] = { + ICE_HASH_GTPU_CTX_UP_IP, + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP, + ICE_HASH_GTPU_CTX_DW_IP_UDP, + }; + + static const u32 remove_up_ip[] = { + ICE_HASH_GTPU_CTX_UP_IP_UDP, + ICE_HASH_GTPU_CTX_UP_IP_TCP, + }; + static const u32 moveout_up_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const u32 moveout_up_ip_udp_tcp[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const u32 remove_dw_ip[] = { + ICE_HASH_GTPU_CTX_DW_IP_UDP, + ICE_HASH_GTPU_CTX_DW_IP_TCP, + }; + static const u32 moveout_dw_ip[] = { + ICE_HASH_GTPU_CTX_EH_IP, + ICE_HASH_GTPU_CTX_EH_IP_UDP, + ICE_HASH_GTPU_CTX_EH_IP_TCP, + }; + + static const struct ice_gtpu_ctx_action actions[] = { + { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip, + ARRAY_SIZE(remove_eh_ip), NULL, 0 }, + { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp, + ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp, + ARRAY_SIZE(moveout_eh_ip_udp) }, + { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp, + ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp, + ARRAY_SIZE(moveout_eh_ip_tcp) }, + { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip, + ARRAY_SIZE(remove_up_ip), moveout_up_ip, + ARRAY_SIZE(moveout_up_ip) }, + { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp, + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, + { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp, + ARRAY_SIZE(moveout_up_ip_udp_tcp) }, + { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip, + ARRAY_SIZE(remove_dw_ip), moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip, + ARRAY_SIZE(moveout_dw_ip) }, + }; + + for (i = 0; i < ARRAY_SIZE(actions); i++) { + if (actions[i].ctx_idx != ctx_idx) + continue; + + if (actions[i].remove_list) { + for (int j = 0; j < actions[i].remove_count; j++) { + u16 rm = actions[i].remove_list[j]; + + ret = ice_hash_remove(vf, &ctx->ctx[rm]); + if (ret && ret != -ENOENT) + return ret; + } + } + + if (actions[i].moveout_list) { + for (int j = 0; j < actions[i].moveout_count; j++) { + u16 mv = actions[i].moveout_list[j]; + + ret = ice_hash_moveout(vf, &ctx->ctx[mv]); + if (ret && ret != -ENOENT) + return ret; + } + } + break; + } + + return 0; +} + +/** + * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration + * @vf: VF pointer + * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP) + * + * Remove covered/recorded IP RSS configurations prior to adding a new one. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx) +{ + int i, ret; + + for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++) + if (ice_is_hash_cfg_valid(&ctx->ctx[i])) { + ret = ice_hash_remove(vf, &ctx->ctx[i]); + if (ret) + return ret; + } + + return 0; +} + +/** + * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index + * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_* + * + * Determine the GTPU hash context index based on the combination of + * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport + * protocols (UDP, TCP) within IPv4 or IPv6 flows. + * + * Return: A valid context index (0-8) if the header combination is supported, + * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid. + */ +static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs) +{ + u32 eh_idx, ip_idx; + + if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) + eh_idx = 0; + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) + eh_idx = 1; + else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) + eh_idx = 2; + else + return ICE_HASH_GTPU_CTX_MAX; + + ip_idx = 0; + if (hdrs & ICE_FLOW_SEG_HDR_UDP) + ip_idx = 1; + else if (hdrs & ICE_FLOW_SEG_HDR_TCP) + ip_idx = 2; + + if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)) + return eh_idx * 3 + ip_idx; + else + return ICE_HASH_GTPU_CTX_MAX; +} + +/** + * ice_map_ip_ctx_idx - map the index of the IP L4 hash context + * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX. + * + * The IP L4 hash context use the index to classify for IPv4/IPv6 with + * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP + * this function map the index based on the protocol headers. + * + * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX + * if no matching context is found. + */ +static u8 ice_map_ip_ctx_idx(u32 hdrs) +{ + u8 i; + + static struct { + u32 hdrs; + u8 ctx_idx; + } ip_ctx_idx_map[] = { + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_ESP, + ICE_HASH_IP_CTX_IP_ESP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_NAT_T_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_AH, + ICE_HASH_IP_CTX_IP_AH }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_PFCP_SESSION, + ICE_HASH_IP_CTX_IP_PFCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_ESP, + ICE_HASH_IP_CTX_IP_ESP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_NAT_T_ESP, + ICE_HASH_IP_CTX_IP_UDP_ESP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_AH, + ICE_HASH_IP_CTX_IP_AH }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER | + ICE_FLOW_SEG_HDR_PFCP_SESSION, + ICE_HASH_IP_CTX_IP_PFCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN | + ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + /* the remaining mappings are used for default RSS */ + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP, + ICE_HASH_IP_CTX_IP_UDP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP, + ICE_HASH_IP_CTX_IP_TCP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP, + ICE_HASH_IP_CTX_IP_SCTP }, + { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, + ICE_HASH_IP_CTX_IP }, + }; + + for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) { + if (hdrs == ip_ctx_idx_map[i].hdrs) + return ip_ctx_idx_map[i].ctx_idx; + } + + return ICE_HASH_IP_CTX_MAX; +} + +/** + * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF + * @vf: pointer to the VF structure + * @cfg: pointer to the RSS hash configuration + * + * Prepare the RSS hash context for a given VF based on the additional + * protocol headers specified in @cfg. This includes pre-configuration + * for IP and GTPU-based flows. + * + * If the configuration matches a known IP context, the function sets up + * the appropriate IP hash context. If the configuration includes GTPU + * headers, it prepares the GTPU-specific context accordingly. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int +ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) { + int ret = 0; + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6); + + if (ret) + return ret; + } + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4, + ice_gtpu_ctx_idx); + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { + return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6, + ice_gtpu_ctx_idx); + } + + return 0; +} + +/** + * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration + * @vf: pointer to the VF info + * @ctx: pointer to the context of the GTPU hash + * @cfg: pointer to the RSS hash configuration + * @ctx_idx: index of the hash context + * + * Post-processes the GTPU hash configuration after a new hash + * context has been successfully added. It updates the context with the new + * configuration and restores any previously removed hash contexts that need + * to be re-applied. This ensures proper TCAM rule ordering and avoids + * conflicts between overlapping GTPU rules. + * + * Return: 0 on success or a negative error code on failure + */ +static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf, + struct ice_vf_hash_gtpu_ctx *ctx, + struct ice_rss_hash_cfg *cfg, u32 ctx_idx) +{ + /* GTPU hash moveback lookup table indexed by context ID. + * Each entry is a bitmap indicating which contexts need moveback + * operations when the corresponding context index is processed. + */ + static const unsigned long + ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = { + [ICE_HASH_GTPU_CTX_EH_IP] = 0, + [ICE_HASH_GTPU_CTX_EH_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_UP_IP) | + BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP), + [ICE_HASH_GTPU_CTX_EH_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_UP_IP) | + BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP) | + BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP), + [ICE_HASH_GTPU_CTX_UP_IP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_UP_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_UP_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP_UDP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + [ICE_HASH_GTPU_CTX_DW_IP_TCP] = + BIT(ICE_HASH_GTPU_CTX_EH_IP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) | + BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP), + }; + unsigned long moveback_mask; + int ret; + int i; + + if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX)) + return 0; + + ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs; + ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds; + ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type; + ctx->ctx[ctx_idx].symm = cfg->symm; + + moveback_mask = ice_gtpu_moveback_tbl[ctx_idx]; + for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) { + ret = ice_hash_moveback(vf, &ctx->ctx[i]); + if (ret && ret != -ENOENT) + return ret; + } + + return 0; +} + +static int +ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg); + } + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) { + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4, + cfg, ice_gtpu_ctx_idx); + } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) { + return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6, + cfg, ice_gtpu_ctx_idx); + } + + return 0; +} + +/** + * ice_rem_rss_cfg_post - post-process the RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Post process the RSS hash configuration after deleting a hash + * config. Such as, it will reset the hash context for the GTPU hash. + */ +static void +ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs); + u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs); + + if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) { + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]); + } + + if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX) + return; + + if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) + ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]); + else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) + ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]); +} + +/** + * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Wrapper function to delete a flow profile base on an RSS configuration, + * and also post process the hash context base on the rollback mechanism + * which handle some rules conflict by ice_add_rss_cfg_wrap. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + ret = ice_rem_rss_cfg(hw, vsi->idx, cfg); + /* We just ignore -ENOENT, because if two configurations share the same + * profile remove one of them actually removes both, since the + * profile is deleted. + */ + if (ret && ret != -ENOENT) { + dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return ret; + } + + ice_rem_rss_cfg_post(vf, cfg); + + return 0; +} + +/** + * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration + * @vf: pointer to the VF info + * @cfg: pointer to the RSS hash configuration + * + * Add a flow profile based on an RSS configuration. Use a rollback + * mechanism to handle rule conflicts due to TCAM + * write sequence from top to down. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_hw *hw = &vf->pf->hw; + int ret; + + if (ice_add_rss_cfg_pre(vf, cfg)) + return -EINVAL; + + ret = ice_add_rss_cfg(hw, vsi, cfg); + if (ret) { + dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n", + vf->vf_id, vf->lan_vsi_idx, ret); + return ret; + } + + if (ice_add_rss_cfg_post(vf, cfg)) + ret = -EINVAL; + + return ret; +} + +/** + * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS + * @vf: pointer to the VF info + * @proto: pointer to the virtchnl protocol header + * @raw_cfg: pointer to the RSS raw pattern configuration + * + * Parser function to get spec and mask from virtchnl message, and parse + * them to get the corresponding profile and offset. The profile is used + * to add RSS configuration. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, + struct ice_rss_raw_cfg *raw_cfg) +{ + struct ice_parser_result pkt_parsed; + struct ice_hw *hw = &vf->pf->hw; + struct ice_parser_profile prof; + struct ice_parser *psr; + u8 *pkt_buf, *msk_buf; + u16 pkt_len; + int ret = 0; + + pkt_len = proto->raw.pkt_len; + if (!pkt_len) + return -EINVAL; + if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) + pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET; + + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) { + ret = -ENOMEM; + goto free_alloc; + } + + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len); + + psr = ice_parser_create(hw); + if (IS_ERR(psr)) { + ret = PTR_ERR(psr); + goto free_alloc; + } + + ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed); + if (ret) + goto parser_destroy; + + ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf, + pkt_len, ICE_BLK_RSS, &prof); + if (ret) + goto parser_destroy; + + memcpy(&raw_cfg->prof, &prof, sizeof(prof)); + +parser_destroy: + ice_parser_destroy(psr); +free_alloc: + kfree(pkt_buf); + kfree(msk_buf); + return ret; +} + +/** + * ice_add_raw_rss_cfg - add RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function adds the RSS configuration for raw pattern. + * Check if current profile is matched. If not, remove the old + * one and add the new profile to HW directly. Update the symmetric + * hash configuration as well. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_rss_prof_info *rss_prof; + struct ice_hw *hw = &vf->pf->hw; + int i, ptg, ret = 0; + u16 vsi_handle; + u64 id; + + vsi_handle = vf->lan_vsi_idx; + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + rss_prof = &vf->rss_prof_info[ptg]; + + /* check if ptg already has a profile */ + if (rss_prof->prof.fv_num) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + if (rss_prof->prof.fv[i].proto_id != + prof->fv[i].proto_id || + rss_prof->prof.fv[i].offset != + prof->fv[i].offset) + break; + } + + /* current profile is matched, check symmetric hash */ + if (i == ICE_MAX_FV_WORDS) { + if (rss_prof->symm != cfg->symm) + goto update_symm; + return ret; + } + + /* current profile is not matched, remove it */ + ret = + ice_rem_prof_id_flow(hw, ICE_BLK_RSS, + ice_get_hw_vsi_num(hw, vsi_handle), + id); + if (ret) { + dev_err(dev, "remove RSS flow failed\n"); + return ret; + } + + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (ret) { + dev_err(dev, "remove RSS profile failed\n"); + return ret; + } + } + + /* add new profile */ + ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS); + if (ret) { + dev_err(dev, "HW profile add failed\n"); + return ret; + } + + memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile)); + +update_symm: + rss_prof->symm = cfg->symm; + ice_rss_update_raw_symm(hw, cfg, id); + return ret; +} + +/** + * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern + * @vf: pointer to the VF info + * @cfg: pointer to the RSS raw pattern configuration + * + * This function removes the RSS configuration for raw pattern. + * Check if vsi group is already removed first. If not, remove the + * profile. + * + * Return: 0 on success; negative error code on failure. + */ +static int +ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg) +{ + struct ice_parser_profile *prof = &cfg->prof; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_hw *hw = &vf->pf->hw; + int ptg, ret = 0; + u16 vsig, vsi; + u64 id; + + id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + + ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id]; + + memset(&vf->rss_prof_info[ptg], 0, + sizeof(struct ice_rss_prof_info)); + + /* check if vsig is already removed */ + vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx); + if (vsi >= ICE_MAX_VSI) { + ret = -EINVAL; + goto err; + } + + vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig; + if (vsig) { + ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id); + if (ret) + goto err; + + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (ret) + goto err; + } + + return ret; + +err: + dev_err(dev, "HW profile remove failed\n"); + return ret; +} + +/** * ice_vc_handle_rss_cfg * @vf: pointer to the VF info * @msg: pointer to the message buffer @@ -352,6 +1569,9 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) struct device *dev = ice_pf_to_dev(vf->pf); struct ice_hw *hw = &vf->pf->hw; struct ice_vsi *vsi; + u8 hash_type; + bool symm; + int ret; if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n", @@ -387,49 +1607,44 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) goto error_param; } - if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + if (ret) + v_ret = ice_err_to_virt_err(ret); goto error_param; } - if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) { - struct ice_vsi_ctx *ctx; - u8 lut_type, hash_type; - int status; + hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ : + ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + ret = ice_vc_rss_hash_update(hw, vsi, hash_type); + if (ret) { + v_ret = ice_err_to_virt_err(ret); + goto error_param; + } - lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; - hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR : - ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ; + symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; + /* Configure RSS hash for raw pattern */ + if (rss_cfg->proto_hdrs.tunnel_level == 0 && + rss_cfg->proto_hdrs.count == 0) { + struct ice_rss_raw_cfg raw_cfg; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs, + &raw_cfg)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - ctx->info.q_opt_rss = - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) | - FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type); - - /* Preserve existing queueing option setting */ - ctx->info.q_opt_rss |= (vsi->info.q_opt_rss & - ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M); - ctx->info.q_opt_tc = vsi->info.q_opt_tc; - ctx->info.q_opt_flags = vsi->info.q_opt_rss; - - ctx->info.valid_sections = - cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); - - status = ice_update_vsi(hw, vsi->idx, ctx, NULL); - if (status) { - dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n", - status, libie_aq_str(hw->adminq.sq_last_status)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (add) { + raw_cfg.symm = symm; + if (ice_add_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } else { - vsi->info.q_opt_rss = ctx->info.q_opt_rss; + if (ice_rem_raw_rss_cfg(vf, &raw_cfg)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; } - - kfree(ctx); } else { struct ice_rss_hash_cfg cfg; @@ -448,24 +1663,12 @@ int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add) } if (add) { - if (ice_add_rss_cfg(hw, vsi, &cfg)) { + cfg.symm = symm; + if (ice_add_rss_cfg_wrap(vf, &cfg)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n", - vsi->vsi_num, v_ret); - } } else { - int status; - - status = ice_rem_rss_cfg(hw, vsi->idx, &cfg); - /* We just ignore -ENOENT, because if two configurations - * share the same profile remove one of them actually - * removes both, since the profile is deleted. - */ - if (status && status != -ENOENT) { + if (ice_rem_rss_cfg_wrap(vf, &cfg)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n", - vf->vf_id, status); - } } } diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index ca4da0c89979..8cfc68cbfa06 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -131,14 +131,12 @@ enum idpf_cap_field { /** * enum idpf_vport_state - Current vport state - * @__IDPF_VPORT_DOWN: Vport is down - * @__IDPF_VPORT_UP: Vport is up - * @__IDPF_VPORT_STATE_LAST: Must be last, number of states + * @IDPF_VPORT_UP: Vport is up + * @IDPF_VPORT_STATE_NBITS: Must be last, number of states */ enum idpf_vport_state { - __IDPF_VPORT_DOWN, - __IDPF_VPORT_UP, - __IDPF_VPORT_STATE_LAST, + IDPF_VPORT_UP, + IDPF_VPORT_STATE_NBITS }; /** @@ -162,7 +160,7 @@ struct idpf_netdev_priv { u16 vport_idx; u16 max_tx_hdr_size; u16 tx_max_bufs; - enum idpf_vport_state state; + DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS); struct rtnl_link_stats64 netstats; spinlock_t stats_lock; }; @@ -735,12 +733,10 @@ static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) #define IDPF_CAP_RSS (\ VIRTCHNL2_FLOW_IPV4_TCP |\ - VIRTCHNL2_FLOW_IPV4_TCP |\ VIRTCHNL2_FLOW_IPV4_UDP |\ VIRTCHNL2_FLOW_IPV4_SCTP |\ VIRTCHNL2_FLOW_IPV4_OTHER |\ VIRTCHNL2_FLOW_IPV6_TCP |\ - VIRTCHNL2_FLOW_IPV6_TCP |\ VIRTCHNL2_FLOW_IPV6_UDP |\ VIRTCHNL2_FLOW_IPV6_SCTP |\ VIRTCHNL2_FLOW_IPV6_OTHER) diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index a5a1eec9ade8..2589e124e41c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -6,6 +6,25 @@ #include "idpf_virtchnl.h" /** + * idpf_get_rx_ring_count - get RX ring count + * @netdev: network interface device structure + * + * Return: number of RX rings. + */ +static u32 idpf_get_rx_ring_count(struct net_device *netdev) +{ + struct idpf_vport *vport; + u32 num_rxq; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + num_rxq = vport->num_rxq; + idpf_vport_ctrl_unlock(netdev); + + return num_rxq; +} + +/** * idpf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -28,9 +47,6 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, user_config = &np->adapter->vport_config[np->vport_idx]->user_config; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = vport->num_rxq; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = user_config->num_fsteer_fltrs; cmd->data = idpf_fsteer_max_rules(vport); @@ -386,7 +402,7 @@ static int idpf_get_rxfh(struct net_device *netdev, } rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data; - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; rxfh->hfunc = ETH_RSS_HASH_TOP; @@ -436,7 +452,7 @@ static int idpf_set_rxfh(struct net_device *netdev, } rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && @@ -1167,7 +1183,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - if (np->state != __IDPF_VPORT_UP) { + if (!test_bit(IDPF_VPORT_UP, np->state)) { idpf_vport_ctrl_unlock(netdev); return; @@ -1319,7 +1335,7 @@ static int idpf_get_q_coalesce(struct net_device *netdev, idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; if (q_num >= vport->num_rxq && q_num >= vport->num_txq) { @@ -1507,7 +1523,7 @@ static int idpf_set_coalesce(struct net_device *netdev, idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) goto unlock_mutex; for (i = 0; i < vport->num_txq; i++) { @@ -1710,7 +1726,7 @@ static void idpf_get_ts_stats(struct net_device *netdev, ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded); } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start)); - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) goto exit; for (u16 i = 0; i < vport->num_txq_grp; i++) { @@ -1757,6 +1773,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_channels = idpf_get_channels, .get_rxnfc = idpf_get_rxnfc, .set_rxnfc = idpf_set_rxnfc, + .get_rx_ring_count = idpf_get_rx_ring_count, .get_rxfh_key_size = idpf_get_rxfh_key_size, .get_rxfh_indir_size = idpf_get_rxfh_indir_size, .get_rxfh = idpf_get_rxfh, diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 8a941f0fb048..7a7e101afeb6 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -519,7 +519,7 @@ static int idpf_del_mac_filter(struct idpf_vport *vport, } spin_unlock_bh(&vport_config->mac_filter_list_lock); - if (np->state == __IDPF_VPORT_UP) { + if (test_bit(IDPF_VPORT_UP, np->state)) { int err; err = idpf_add_del_mac_filters(vport, np, false, async); @@ -590,7 +590,7 @@ static int idpf_add_mac_filter(struct idpf_vport *vport, if (err) return err; - if (np->state == __IDPF_VPORT_UP) + if (test_bit(IDPF_VPORT_UP, np->state)) err = idpf_add_del_mac_filters(vport, np, true, async); return err; @@ -894,7 +894,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); - if (np->state <= __IDPF_VPORT_DOWN) + if (!test_bit(IDPF_VPORT_UP, np->state)) return; if (rtnl) @@ -921,7 +921,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl) idpf_xdp_rxq_info_deinit_all(vport); idpf_vport_queues_rel(vport); idpf_vport_intr_rel(vport); - np->state = __IDPF_VPORT_DOWN; + clear_bit(IDPF_VPORT_UP, np->state); if (rtnl) rtnl_unlock(); @@ -1345,7 +1345,7 @@ static int idpf_up_complete(struct idpf_vport *vport) netif_tx_start_all_queues(vport->netdev); } - np->state = __IDPF_VPORT_UP; + set_bit(IDPF_VPORT_UP, np->state); return 0; } @@ -1391,7 +1391,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) struct idpf_vport_config *vport_config; int err; - if (np->state != __IDPF_VPORT_DOWN) + if (test_bit(IDPF_VPORT_UP, np->state)) return -EBUSY; if (rtnl) @@ -1602,7 +1602,7 @@ void idpf_init_task(struct work_struct *work) /* Once state is put into DOWN, driver is ready for dev_open */ np = netdev_priv(vport->netdev); - np->state = __IDPF_VPORT_DOWN; + clear_bit(IDPF_VPORT_UP, np->state); if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) idpf_vport_open(vport, true); @@ -1801,7 +1801,7 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter) continue; np = netdev_priv(adapter->netdevs[i]); - if (np->state == __IDPF_VPORT_UP) + if (test_bit(IDPF_VPORT_UP, np->state)) set_bit(IDPF_VPORT_UP_REQUESTED, adapter->vport_config[i]->flags); } @@ -1939,7 +1939,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_reset_cause reset_cause) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); - enum idpf_vport_state current_state = np->state; + bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state); struct idpf_adapter *adapter = vport->adapter; struct idpf_vport *new_vport; int err; @@ -1990,7 +1990,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, goto free_vport; } - if (current_state <= __IDPF_VPORT_DOWN) { + if (!vport_is_up) { idpf_send_delete_queues_msg(vport); } else { set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); @@ -2023,7 +2023,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, if (err) goto err_open; - if (current_state == __IDPF_VPORT_UP) + if (vport_is_up) err = idpf_vport_open(vport, false); goto free_vport; @@ -2033,7 +2033,7 @@ err_reset: vport->num_rxq, vport->num_bufq); err_open: - if (current_state == __IDPF_VPORT_UP) + if (vport_is_up) idpf_vport_open(vport, false); free_vport: diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index 8cf4ff697572..de5d722cc21d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -3,16 +3,94 @@ #include "idpf.h" #include "idpf_devids.h" +#include "idpf_lan_vf_regs.h" #include "idpf_virtchnl.h" #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" +#define IDPF_NETWORK_ETHERNET_PROGIF 0x01 +#define IDPF_CLASS_NETWORK_ETHERNET_PROGIF \ + (PCI_CLASS_NETWORK_ETHERNET << 8 | IDPF_NETWORK_ETHERNET_PROGIF) +#define IDPF_VF_TEST_VAL 0xfeed0000u + MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_IMPORT_NS("LIBETH"); MODULE_IMPORT_NS("LIBETH_XDP"); MODULE_LICENSE("GPL"); /** + * idpf_get_device_type - Helper to find if it is a VF or PF device + * @pdev: PCI device information struct + * + * Return: PF/VF device ID or -%errno on failure. + */ +static int idpf_get_device_type(struct pci_dev *pdev) +{ + void __iomem *addr; + int ret; + + addr = ioremap(pci_resource_start(pdev, 0) + VF_ARQBAL, 4); + if (!addr) { + pci_err(pdev, "Failed to allocate BAR0 mbx region\n"); + return -EIO; + } + + writel(IDPF_VF_TEST_VAL, addr); + if (readl(addr) == IDPF_VF_TEST_VAL) + ret = IDPF_DEV_ID_VF; + else + ret = IDPF_DEV_ID_PF; + + iounmap(addr); + + return ret; +} + +/** + * idpf_dev_init - Initialize device specific parameters + * @adapter: adapter to initialize + * @ent: entry in idpf_pci_tbl + * + * Return: %0 on success, -%errno on failure. + */ +static int idpf_dev_init(struct idpf_adapter *adapter, + const struct pci_device_id *ent) +{ + int ret; + + if (ent->class == IDPF_CLASS_NETWORK_ETHERNET_PROGIF) { + ret = idpf_get_device_type(adapter->pdev); + switch (ret) { + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + default: + return ret; + } + + return 0; + } + + switch (ent->device) { + case IDPF_DEV_ID_PF: + idpf_dev_ops_init(adapter); + break; + case IDPF_DEV_ID_VF: + idpf_vf_dev_ops_init(adapter); + adapter->crc_enable = true; + break; + default: + return -ENODEV; + } + + return 0; +} + +/** * idpf_remove - Device removal routine * @pdev: PCI device information struct */ @@ -167,21 +245,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->req_tx_splitq = true; adapter->req_rx_splitq = true; - switch (ent->device) { - case IDPF_DEV_ID_PF: - idpf_dev_ops_init(adapter); - break; - case IDPF_DEV_ID_VF: - idpf_vf_dev_ops_init(adapter); - adapter->crc_enable = true; - break; - default: - err = -ENODEV; - dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", - ent->device); - goto err_free; - } - adapter->pdev = pdev; err = pcim_enable_device(pdev); if (err) @@ -261,11 +324,18 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* setup msglvl */ adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); + err = idpf_dev_init(adapter, ent); + if (err) { + dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", + ent->device); + goto destroy_vc_event_wq; + } + err = idpf_cfg_hw(adapter); if (err) { dev_err(dev, "Failed to configure HW structure for adapter: %d\n", err); - goto err_cfg_hw; + goto destroy_vc_event_wq; } mutex_init(&adapter->vport_ctrl_lock); @@ -286,7 +356,7 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -err_cfg_hw: +destroy_vc_event_wq: destroy_workqueue(adapter->vc_event_wq); err_vc_event_wq_alloc: destroy_workqueue(adapter->stats_wq); @@ -306,6 +376,7 @@ err_free: static const struct pci_device_id idpf_pci_tbl[] = { { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, + { PCI_DEVICE_CLASS(IDPF_CLASS_NETWORK_ETHERNET_PROGIF, ~0)}, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 61e613066140..e3ddf18dcbf5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -570,7 +570,7 @@ fetch_next_txq_desc: np = netdev_priv(tx_q->netdev); nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - dont_wake = np->state != __IDPF_VPORT_UP || + dont_wake = !test_bit(IDPF_VPORT_UP, np->state) || !netif_carrier_ok(tx_q->netdev); __netif_txq_completed_wake(nq, ss.packets, ss.bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 828f7c444d30..1d91c56f7469 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -134,7 +134,7 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) { idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); - if (!complq->comp) + if (!complq->desc_ring) return; dma_free_coherent(complq->netdev->dev.parent, complq->size, @@ -922,8 +922,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) err = idpf_rx_desc_alloc(vport, q); if (err) { pci_err(vport->adapter->pdev, - "Memory allocation for Rx Queue %u failed\n", - i); + "Memory allocation for Rx queue %u from queue group %u failed\n", + j, i); goto err_out; } } @@ -939,8 +939,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) err = idpf_bufq_desc_alloc(vport, q); if (err) { pci_err(vport->adapter->pdev, - "Memory allocation for Rx Buffer Queue %u failed\n", - i); + "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n", + j, i); goto err_out; } } @@ -2275,7 +2275,7 @@ fetch_next_desc: /* Update BQL */ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); - dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP || + dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) || !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index cbb5fa30f5a0..44cd4b466c48 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -68,7 +68,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter, vport->link_up = v2e->link_status; - if (np->state != __IDPF_VPORT_UP) + if (!test_bit(IDPF_VPORT_UP, np->state)) return; if (vport->link_up) { @@ -2755,7 +2755,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) /* Don't send get_stats message if the link is down */ - if (np->state <= __IDPF_VPORT_DOWN) + if (!test_bit(IDPF_VPORT_UP, np->state)) return 0; stats_msg.vport_id = cpu_to_le32(vport->vport_id); diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 21ce25b0567f..958d16f87424 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -418,7 +418,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport, if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) || !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) || !!vport->xdp_prog == !!prog) { - if (np->state == __IDPF_VPORT_UP) + if (test_bit(IDPF_VPORT_UP, np->state)) idpf_xdp_copy_prog_to_rqs(vport, prog); old = xchg(&vport->xdp_prog, prog); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 10e2445e0ded..b507576b28b2 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2541,6 +2541,13 @@ static int igb_get_rxfh_fields(struct net_device *dev, return 0; } +static u32 igb_get_rx_ring_count(struct net_device *dev) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + return adapter->num_rx_queues; +} + static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -2548,10 +2555,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_queues; - ret = 0; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = adapter->nfc_filter_count; ret = 0; @@ -3473,6 +3476,7 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_ts_info = igb_get_ts_info, .get_rxnfc = igb_get_rxnfc, .set_rxnfc = igb_set_rxnfc, + .get_rx_ring_count = igb_get_rx_ring_count, .get_eee = igb_get_eee, .set_eee = igb_set_eee, .get_module_info = igb_get_module_info, diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 61dfcd8cb370..ac57212ab02b 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, true)) { - dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); + dev_warn(&adapter->pdev->dev, "Vlan id %d is not added\n", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index bb783042d1af..e94c1922b97a 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1091,15 +1091,19 @@ static int igc_ethtool_get_rxfh_fields(struct net_device *dev, return 0; } +static u32 igc_ethtool_get_rx_ring_count(struct net_device *dev) +{ + struct igc_adapter *adapter = netdev_priv(dev); + + return adapter->num_rx_queues; +} + static int igc_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct igc_adapter *adapter = netdev_priv(dev); switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = adapter->num_rx_queues; - return 0; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = adapter->nfc_rule_count; return 0; @@ -2170,6 +2174,7 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_coalesce = igc_ethtool_set_coalesce, .get_rxnfc = igc_ethtool_get_rxnfc, .set_rxnfc = igc_ethtool_set_rxnfc, + .get_rx_ring_count = igc_ethtool_get_rx_ring_count, .get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size, .get_rxfh = igc_ethtool_get_rxfh, .set_rxfh = igc_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index d5b1b974b4a3..3069b583fd81 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -198,7 +198,7 @@ static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. + * previous prot_autoc_read_82599. * * This part (82599) may need to hold a the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. @@ -1622,7 +1622,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; } - /* store source and destination IP masks (big-enian) */ + /* store source and destination IP masks (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 2d660e9edb80..2ad81f687a84 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2805,6 +2805,14 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) return 64; } +static u32 ixgbe_get_rx_ring_count(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); + + return min_t(u32, adapter->num_rx_queues, + ixgbe_rss_indir_tbl_max(adapter)); +} + static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -2812,11 +2820,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, int ret = -EOPNOTSUPP; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = min_t(int, adapter->num_rx_queues, - ixgbe_rss_indir_tbl_max(adapter)); - ret = 0; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = adapter->fdir_filter_count; ret = 0; @@ -3743,6 +3746,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .get_ethtool_stats = ixgbe_get_ethtool_stats, .get_coalesce = ixgbe_get_coalesce, .set_coalesce = ixgbe_set_coalesce, + .get_rx_ring_count = ixgbe_get_rx_ring_count, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, .get_rxfh_indir_size = ixgbe_rss_indir_size, @@ -3791,6 +3795,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = { .get_ethtool_stats = ixgbe_get_ethtool_stats, .get_coalesce = ixgbe_get_coalesce, .set_coalesce = ixgbe_set_coalesce, + .get_rx_ring_count = ixgbe_get_rx_ring_count, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, .get_rxfh_indir_size = ixgbe_rss_indir_size, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 170a29d162c6..a1d04914fbbc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -318,7 +318,7 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * @@ -492,7 +492,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3190ce7e44c7..4af3b3e71ff1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7449,7 +7449,7 @@ int ixgbe_open(struct net_device *netdev) adapter->hw.link.link_info.link_cfg_err); err = ixgbe_non_sfp_link_config(&adapter->hw); - if (ixgbe_non_sfp_link_config(&adapter->hw)) + if (err) e_dev_err("Link setup failed, err %d.\n", err); } @@ -12046,7 +12046,7 @@ err_dma: * @pdev: PCI device information struct * * ixgbe_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index bebad564188e..537a60d5276f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -867,19 +867,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, return 0; } -static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) +static u32 ixgbevf_get_rx_ring_count(struct net_device *dev) { struct ixgbevf_adapter *adapter = netdev_priv(dev); - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = adapter->num_rx_queues; - return 0; - default: - hw_dbg(&adapter->hw, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } + return adapter->num_rx_queues; } static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) @@ -987,7 +979,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_ethtool_stats = ixgbevf_get_ethtool_stats, .get_coalesce = ixgbevf_get_coalesce, .set_coalesce = ixgbevf_set_coalesce, - .get_rxnfc = ixgbevf_get_rxnfc, + .get_rx_ring_count = ixgbevf_get_rx_ring_count, .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, .get_rxfh = ixgbevf_get_rxfh, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 039187607e98..516a6fdd23d0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -241,23 +241,7 @@ struct ixgbevf_q_vector { char name[IFNAMSIZ + 9]; /* for dynamic allocation of rings associated with this q_vector */ - struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp; -#ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define IXGBEVF_QV_STATE_IDLE 0 -#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ -#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ -#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) -#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) -#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ -#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ - IXGBEVF_QV_STATE_POLL_YIELD) -#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ - IXGBEVF_QV_STATE_POLL_YIELD) - spinlock_t lock; -#endif /* CONFIG_NET_RX_BUSY_POLL */ + struct ixgbevf_ring ring[] ____cacheline_internodealigned_in_smp; }; /* microsecond values for various ITR rates shifted by 2 to fit itr register diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 89ccb8eb82c7..7af44f858fa3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -5012,17 +5012,9 @@ static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) return MVNETA_RSS_LU_TABLE_SIZE; } -static int mvneta_ethtool_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *info, - u32 *rules __always_unused) +static u32 mvneta_ethtool_get_rx_ring_count(struct net_device *dev) { - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = rxq_number; - return 0; - default: - return -EOPNOTSUPP; - } + return rxq_number; } static int mvneta_config_rss(struct mvneta_port *pp) @@ -5356,7 +5348,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = { .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, - .get_rxnfc = mvneta_ethtool_get_rxnfc, + .get_rx_ring_count = mvneta_ethtool_get_rx_ring_count, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, .get_link_ksettings = mvneta_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index ab0c99aa9f9a..33426fded919 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5580,6 +5580,13 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, return phylink_ethtool_ksettings_set(port->phylink, cmd); } +static u32 mvpp2_ethtool_get_rx_ring_count(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + return port->nrxqs; +} + static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { @@ -5590,9 +5597,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = port->nrxqs; - break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = port->n_rfs_rules; break; @@ -5827,6 +5831,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_pauseparam = mvpp2_ethtool_set_pause_param, .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, + .get_rx_ring_count = mvpp2_ethtool_get_rx_ring_count, .get_rxnfc = mvpp2_ethtool_get_rxnfc, .set_rxnfc = mvpp2_ethtool_set_rxnfc, .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 532813d8d028..244de500963e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,5 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o cn20k/mbox_init.o + rvu_rep.o cn20k/mbox_init.o cn20k/nix.o cn20k/debugfs.o \ + cn20k/npa.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index ec0e11c77cbf..42044cd810b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1994,7 +1994,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) nvec = pci_msix_vec_count(cgx->pdev); err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); - if (err < 0 || err != nvec) { + if (err < 0) { dev_err(dev, "Request for %d msix vectors failed, err %d\n", nvec, err); goto err_release_regions; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c new file mode 100644 index 000000000000..498968bf4cf5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "debugfs.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx) +{ + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", + sq_ctx->ena, sq_ctx->qint_idx); + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", + sq_ctx->substream, sq_ctx->sdp_mcast); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", + sq_ctx->cq, sq_ctx->sqe_way_mask); + + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", + sq_ctx->default_chan, sq_ctx->sqb_count); + + seq_printf(m, "W1: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", + sq_ctx->sqb_aura, sq_ctx->sq_int); + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); + + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", + sq_ctx->max_sqe_size, sq_ctx->cq_limit); + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", + sq_ctx->lmt_dis, sq_ctx->mnq_dis); + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", + sq_ctx->tail_offset, sq_ctx->smenq_offset); + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); + + seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n", + sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend); + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W13: aged_drop_octs \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_octs); + seq_printf(m, "W13: aged_drop_pkts \t\t\t%llu\n\n", + (u64)sq_ctx->aged_drop_pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + struct nix_cn20k_cq_ctx_s *cq_ctx = &rsp->cq; + + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); + + seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); + seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n", + cq_ctx->avg_con, cq_ctx->cint_idx); + seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n", + cq_ctx->cq_err, cq_ctx->qint_idx); + seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n", + cq_ctx->bpid, cq_ctx->bp_ena); + + seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high); + seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med); + seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low); + seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n", + cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 | + cq_ctx->lbpid_low); + seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena); + + seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n", + cq_ctx->update_time, cq_ctx->avg_level); + seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n", + cq_ctx->head, cq_ctx->tail); + + seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n", + cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); + seq_printf(m, "W3: qsize \t\t\t%d\nW3:stashing \t\t\t%d\n", + cq_ctx->qsize, cq_ctx->stashing); + + seq_printf(m, "W3: caching \t\t\t%d\n", cq_ctx->caching); + seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac); + seq_printf(m, "W3: stash_thresh \t\t\t%d\n", + cq_ctx->stash_thresh); + + seq_printf(m, "W3: msh_valid \t\t\t%d\nW3:msh_dst \t\t\t%d\n", + cq_ctx->msh_valid, cq_ctx->msh_dst); + + seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n", + cq_ctx->cpt_drop_err_en); + seq_printf(m, "W3: ena \t\t\t%d\n", + cq_ctx->ena); + seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n", + cq_ctx->drop_ena, cq_ctx->drop); + seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp); + + seq_printf(m, "W4: lbpid_ext \t\t\t\t%d\n\n", cq_ctx->lbpid_ext); + seq_printf(m, "W4: bpid_ext \t\t\t\t%d\n\n", cq_ctx->bpid_ext); +} + +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_aura_s *aura = &rsp->aura; + + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); + + seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", + aura->ena, aura->pool_caching); + seq_printf(m, "W1: avg con\t\t%d\n", aura->avg_con); + seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n", + aura->pool_drop_ena, aura->aura_drop_ena); + seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n", + aura->bp_ena, aura->aura_drop); + seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n", + aura->shift, aura->avg_level); + + seq_printf(m, "W2: count\t\t%llu\nW2: nix_bpid\t\t%d\n", + (u64)aura->count, aura->bpid); + + seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", + (u64)aura->limit, aura->bp, aura->fc_ena); + + seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", + aura->fc_up_crossing, aura->fc_stype); + seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); + + seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr); + + seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n", + aura->pool_drop, aura->update_time); + seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n", + aura->err_int, aura->err_int_ena); + seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n", + aura->thresh_int, aura->thresh_int_ena); + seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n", + aura->thresh_up, aura->thresh_qint_idx); + seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); + + seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); +} + +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + struct npa_cn20k_pool_s *pool = &rsp->pool; + + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); + + seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", + pool->ena, pool->nat_align); + seq_printf(m, "W1: stack_caching\t%d\n", + pool->stack_caching); + seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n", + pool->buf_offset, pool->buf_size); + + seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n", + pool->stack_max_pages, pool->stack_pages); + + seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n", + pool->stack_offset, pool->shift, pool->avg_level); + seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n", + pool->avg_con, pool->fc_ena, pool->fc_stype); + seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", + pool->fc_hyst_bits, pool->fc_up_crossing); + seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); + + seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); + + seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start); + + seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end); + + seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n", + pool->err_int, pool->err_int_ena); + seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); + seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", + pool->thresh_int_ena, pool->thresh_up); + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", + pool->thresh_qint_idx, pool->err_qint_idx); + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h new file mode 100644 index 000000000000..a2e3a2cd6edb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef DEBUFS_H +#define DEBUFS_H + +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../mbox.h" + +void print_nix_cn20k_sq_ctx(struct seq_file *m, + struct nix_cn20k_sq_ctx_s *sq_ctx); +void print_nix_cn20k_cq_ctx(struct seq_file *m, + struct nix_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_aura_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); +void print_npa_cn20k_pool_ctx(struct seq_file *m, + struct npa_cn20k_aq_enq_rsp *rsp); + +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c new file mode 100644 index 000000000000..aa2016fd1bba --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_nix_cn20k_aq_enq(struct rvu *rvu, + struct nix_cn20k_aq_enq_req *req, + struct nix_cn20k_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, + (struct nix_aq_enq_rsp *)rsp); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c new file mode 100644 index 000000000000..fe8f926c8b75 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/module.h> +#include <linux/pci.h> + +#include "struct.h" +#include "../rvu.h" + +int rvu_mbox_handler_npa_cn20k_aq_enq(struct rvu *rvu, + struct npa_cn20k_aq_enq_req *req, + struct npa_cn20k_aq_enq_rsp *rsp) +{ + return rvu_npa_aq_enq_inst(rvu, (struct npa_aq_enq_req *)req, + (struct npa_aq_enq_rsp *)rsp); +} +EXPORT_SYMBOL(rvu_mbox_handler_npa_cn20k_aq_enq); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h index 76ce3ec6da9c..763f6cabd7c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -8,6 +8,8 @@ #ifndef STRUCT_H #define STRUCT_H +#define NIX_MAX_CTX_SIZE 128 + /* * CN20k RVU PF MBOX Interrupt Vector Enumeration * @@ -37,4 +39,342 @@ enum rvu_af_cn20k_int_vec_e { RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, RVU_AF_CN20K_INT_VEC_CNT = 0xa, }; + +struct nix_cn20k_sq_ctx_s { + u64 ena : 1; /* W0 */ + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; + u64 smq : 11; /* W1 */ + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_weight : 14; + u64 default_chan : 12; + u64 sqb_count : 16; + u64 reserved_120_120 : 1; + u64 smq_rr_count_lb : 7; + u64 smq_rr_count_ub : 25; /* W2 */ + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 reserved_191_191 : 1; + u64 max_sqe_size : 2; /* W3 */ + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 reserved_253_255 : 3; + u64 next_sqb : 64; /* W4 */ + u64 tail_sqb : 64; /* W5 */ + u64 smenq_sqb : 64; /* W6 */ + u64 smenq_next_sqb : 64; /* W7 */ + u64 head_sqb : 64; /* W8 */ + u64 reserved_576_583 : 8; /* W9 */ + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 reserved_630_639 : 10; + u64 scm_lso_rem : 18; /* W10 */ + u64 reserved_658_703 : 46; + u64 octs : 48; /* W11 */ + u64 reserved_752_767 : 16; + u64 pkts : 48; /* W12 */ + u64 reserved_816_831 : 16; + u64 aged_drop_octs : 32; /* W13 */ + u64 aged_drop_pkts : 32; + u64 dropped_octs : 48; /* W14 */ + u64 reserved_944_959 : 16; + u64 dropped_pkts : 48; /* W15 */ + u64 reserved_1008_1023 : 16; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_cq_ctx_s { + u64 base : 64; /* W0 */ + u64 lbp_ena : 1; /* W1 */ + u64 lbpid_low : 3; + u64 bp_ena : 1; + u64 lbpid_med : 3; + u64 bpid : 9; + u64 lbpid_high : 3; + u64 qint_idx : 7; + u64 cq_err : 1; + u64 cint_idx : 7; + u64 avg_con : 9; + u64 wrptr : 20; + u64 tail : 20; /* W2 */ + u64 head : 20; + u64 avg_level : 8; + u64 update_time : 16; + u64 bp : 8; /* W3 */ + u64 drop : 8; + u64 drop_ena : 1; + u64 ena : 1; + u64 cpt_drop_err_en : 1; + u64 reserved_211_211 : 1; + u64 msh_dst : 11; + u64 msh_valid : 1; + u64 stash_thresh : 4; + u64 lbp_frac : 4; + u64 caching : 1; + u64 stashing : 1; + u64 reserved_234_235 : 2; + u64 qsize : 4; + u64 cq_err_int : 8; + u64 cq_err_int_ena : 8; + u64 bpid_ext : 2; /* W4 */ + u64 reserved_258_259 : 2; + u64 lbpid_ext : 2; + u64 reserved_262_319 : 58; + u64 reserved_320_383 : 64; /* W5 */ + u64 reserved_384_447 : 64; /* W6 */ + u64 reserved_448_511 : 64; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct nix_cn20k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct nix_cn20k_rq_ctx_s { + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 reserved_24_34 : 11; + u64 port_il4_dis : 1; + u64 port_ol4_dis : 1; + u64 lenerr_dis : 1; + u64 csum_il4_dis : 1; + u64 csum_ol4_dis : 1; + u64 len_il4_dis : 1; + u64 len_il3_dis : 1; + u64 len_ol4_dis : 1; + u64 len_ol3_dis : 1; + u64 wqe_aura : 20; + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 pb_stashing : 1; + u64 ipsecd_drop_en : 1; + u64 chi_ena : 1; + u64 reserved_125_127 : 3; + u64 band_prof_id_l : 10; + u64 sso_fc_ena : 1; + u64 policer_ena : 1; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 spb_high_sizem1 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 reserved_171_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 band_prof_id_h : 4; + u64 reserved_188_189 : 2; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; + u64 xqe_drop : 8; + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; + u64 lpb_aura_drop : 8; + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 reserved_288_291 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 reserved_315_319 : 5; + u64 ltag : 24; + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 ipsec_vwqe : 1; + u64 vwqe_ena : 1; + u64 vtime_wait : 8; + u64 max_vsize_exp : 4; + u64 vwqe_skip : 2; + u64 reserved_382_383 : 2; + u64 octs : 48; + u64 reserved_432_447 : 16; + u64 pkts : 48; + u64 reserved_496_511 : 16; + u64 drop_octs : 48; + u64 reserved_560_575 : 16; + u64 drop_pkts : 48; + u64 reserved_624_639 : 16; + u64 re_pkts : 48; + u64 reserved_688_703 : 16; + u64 reserved_704_767 : 64; + u64 reserved_768_831 : 64; + u64 reserved_832_895 : 64; + u64 reserved_896_959 : 64; + u64 reserved_960_1023 : 64; +}; + +static_assert(sizeof(struct nix_cn20k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_aura_s { + u64 pool_addr; /* W0 */ + u64 ena : 1; /* W1 */ + u64 reserved_65 : 2; + u64 pool_caching : 1; + u64 reserved_68 : 16; + u64 avg_con : 9; + u64 reserved_93 : 1; + u64 pool_drop_ena : 1; + u64 aura_drop_ena : 1; + u64 bp_ena : 1; + u64 reserved_97_103 : 7; + u64 aura_drop : 8; + u64 shift : 6; + u64 reserved_118_119 : 2; + u64 avg_level : 8; + u64 count : 36; /* W2 */ + u64 reserved_164_167 : 4; + u64 bpid : 12; + u64 reserved_180_191 : 12; + u64 limit : 36; /* W3 */ + u64 reserved_228_231 : 4; + u64 bp : 7; + u64 reserved_239_243 : 5; + u64 fc_ena : 1; + u64 fc_up_crossing : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 reserved_252_255 : 4; + u64 fc_addr; /* W4 */ + u64 pool_drop : 8; /* W5 */ + u64 update_time : 16; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_363 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_371 : 1; + u64 err_qint_idx : 7; + u64 reserved_379_383 : 5; + u64 thresh : 36; /* W6*/ + u64 rsvd_423_420 : 4; + u64 fc_msh_dst : 11; + u64 reserved_435_438 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_445_445 : 1; + u64 stream_ctx : 1; + u64 unified_ctx : 1; + u64 reserved_448_511; /* W7 */ + u64 padding[8]; +}; + +static_assert(sizeof(struct npa_cn20k_aura_s) == NIX_MAX_CTX_SIZE); + +struct npa_cn20k_pool_s { + u64 stack_base; /* W0 */ + u64 ena : 1; + u64 nat_align : 1; + u64 reserved_66_67 : 2; + u64 stack_caching : 1; + u64 reserved_69_87 : 19; + u64 buf_offset : 12; + u64 reserved_100_103 : 4; + u64 buf_size : 12; + u64 reserved_116_119 : 4; + u64 ref_cnt_prof : 3; + u64 reserved_123_127 : 5; + u64 stack_max_pages : 32; + u64 stack_pages : 32; + uint64_t bp_0 : 7; + uint64_t bp_1 : 7; + uint64_t bp_2 : 7; + uint64_t bp_3 : 7; + uint64_t bp_4 : 7; + uint64_t bp_5 : 7; + uint64_t bp_6 : 7; + uint64_t bp_7 : 7; + uint64_t bp_ena_0 : 1; + uint64_t bp_ena_1 : 1; + uint64_t bp_ena_2 : 1; + uint64_t bp_ena_3 : 1; + uint64_t bp_ena_4 : 1; + uint64_t bp_ena_5 : 1; + uint64_t bp_ena_6 : 1; + uint64_t bp_ena_7 : 1; + u64 stack_offset : 4; + u64 reserved_260_263 : 4; + u64 shift : 6; + u64 reserved_270_271 : 2; + u64 avg_level : 8; + u64 avg_con : 9; + u64 fc_ena : 1; + u64 fc_stype : 2; + u64 fc_hyst_bits : 4; + u64 fc_up_crossing : 1; + u64 reserved_297_299 : 3; + u64 update_time : 16; + u64 reserved_316_319 : 4; + u64 fc_addr; /* W5 */ + u64 ptr_start; /* W6 */ + u64 ptr_end; /* W7 */ + u64 bpid_0 : 12; + u64 reserved_524_535 : 12; + u64 err_int : 8; + u64 err_int_ena : 8; + u64 thresh_int : 1; + u64 thresh_int_ena : 1; + u64 thresh_up : 1; + u64 reserved_555 : 1; + u64 thresh_qint_idx : 7; + u64 reserved_563 : 1; + u64 err_qint_idx : 7; + u64 reserved_571_575 : 5; + u64 thresh : 36; + u64 rsvd_612_615 : 4; + u64 fc_msh_dst : 11; + u64 reserved_627_630 : 4; + u64 op_dpc_ena : 1; + u64 op_dpc_set : 5; + u64 reserved_637_637 : 1; + u64 stream_ctx : 1; + u64 reserved_639 : 1; + u64 reserved_640_703; /* W10 */ + u64 reserved_704_767; /* W11 */ + u64 reserved_768_831; /* W12 */ + u64 reserved_832_895; /* W13 */ + u64 reserved_896_959; /* W14 */ + u64 reserved_960_1023; /* W15 */ +}; + +static_assert(sizeof(struct npa_cn20k_pool_s) == NIX_MAX_CTX_SIZE); + #endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 933073cd2280..a3e273126e4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -203,6 +203,8 @@ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ +M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req, \ + npa_cn20k_aq_enq_rsp) \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ @@ -336,6 +338,8 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ +M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, \ + nix_cn20k_aq_enq_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -832,6 +836,39 @@ struct npa_aq_enq_rsp { }; }; +struct npa_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 aura_id; + u8 ctype; + u8 op; + union { + /* Valid when op == WRITE/INIT and ctype == AURA. + * LF fills the pool_id in aura.pool_addr. AF will translate + * the pool_id to pool context pointer. + */ + struct npa_cn20k_aura_s aura; + /* Valid when op == WRITE/INIT and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; + /* Mask data when op == WRITE (1=write, 0=don't write) */ + union { + /* Valid when op == WRITE and ctype == AURA */ + struct npa_cn20k_aura_s aura_mask; + /* Valid when op == WRITE and ctype == POOL */ + struct npa_cn20k_pool_s pool_mask; + }; +}; + +struct npa_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + /* Valid when op == READ and ctype == AURA */ + struct npa_cn20k_aura_s aura; + /* Valid when op == READ and ctype == POOL */ + struct npa_cn20k_pool_s pool; + }; +}; + /* Disable all contexts of type 'ctype' */ struct hwctx_disable_req { struct mbox_msghdr hdr; @@ -940,6 +977,42 @@ struct nix_lf_free_req { u64 flags; }; +/* CN20K NIX AQ enqueue msg */ +struct nix_cn20k_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; + union { + struct nix_cn20k_rq_ctx_s rq_mask; + struct nix_cn20k_sq_ctx_s sq_mask; + struct nix_cn20k_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + struct nix_bandprof_s prof_mask; + }; +}; + +struct nix_cn20k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_cn20k_rq_ctx_s rq; + struct nix_cn20k_sq_ctx_s sq; + struct nix_cn20k_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + struct nix_bandprof_s prof; + }; +}; + /* CN10K NIX AQ enqueue msg */ struct nix_cn10k_aq_enq_req { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b58283341923..e85dac2c806d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -498,6 +498,14 @@ struct channel_fwdata { u8 reserved[RVU_CHANL_INFO_RESERVED]; }; +struct altaf_intr_notify { + unsigned long flr_pf_bmap[2]; + unsigned long flr_vf_bmap[2]; + unsigned long gint_paddr; + unsigned long gint_iova_addr; + unsigned long reserved[6]; +}; + struct rvu_fwdata { #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ #define RVU_FWDATA_VERSION 0x0001 @@ -517,7 +525,8 @@ struct rvu_fwdata { u32 ptp_ext_clk_rate; u32 ptp_ext_tstamp; struct channel_fwdata channel_data; -#define FWDATA_RESERVED_MEM 958 + struct altaf_intr_notify altaf_intr_info; +#define FWDATA_RESERVED_MEM 946 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 9 #define CGX_LMACS_MAX 4 @@ -648,6 +657,7 @@ struct rvu { struct mutex mbox_lock; /* Serialize mbox up and down msgs */ u16 rep_pcifunc; + bool altaf_ready; int rep_cnt; u16 *rep2pfvf_map; u8 rep_mode; @@ -1032,6 +1042,9 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, int blkaddr, int nixlf); void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr); +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp); + /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 8375f18c8e07..15d3cb0b9da6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -21,6 +21,8 @@ #include "rvu_npc_hash.h" #include "mcs.h" +#include "cn20k/debugfs.h" + #define DEBUGFS_DIR_NAME "octeontx2" enum { @@ -1101,6 +1103,11 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_aura_s *aura = &rsp->aura; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_aura_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n", @@ -1149,6 +1156,11 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) struct npa_pool_s *pool = &rsp->pool; struct rvu *rvu = m->private; + if (is_cn20k(rvu->pdev)) { + print_npa_cn20k_pool_ctx(m, (struct npa_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n", @@ -1651,6 +1663,9 @@ static void print_tm_tree(struct seq_file *m, int blkaddr; u64 cfg; + if (!sq_ctx->ena) + return; + blkaddr = nix_hw->blkaddr; schq = sq_ctx->smq; @@ -2009,10 +2024,16 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_sq_ctx(m, (struct nix_cn20k_sq_ctx_s *)sq_ctx); + return; + } + if (!is_rvu_otx2(rvu)) { print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); return; } + seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", sq_ctx->sqe_way_mask, sq_ctx->cq); seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", @@ -2103,7 +2124,9 @@ static void print_nix_cn10k_rq_ctx(struct seq_file *m, seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); - seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); + seq_printf(m, "W2: band_prof_id \t\t%d\n", + (u16)rq_ctx->band_prof_id_h << 10 | rq_ctx->band_prof_id); + seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", @@ -2225,6 +2248,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) struct nix_hw *nix_hw = m->private; struct rvu *rvu = nix_hw->rvu; + if (is_cn20k(rvu->pdev)) { + print_nix_cn20k_cq_ctx(m, (struct nix_cn20k_aq_enq_rsp *)rsp); + return; + } + seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base); seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr); @@ -2254,6 +2282,7 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int); seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n", cq_ctx->qsize, cq_ctx->caching); + seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n", cq_ctx->substream, cq_ctx->ena); if (!is_rvu_otx2(rvu)) { @@ -2615,7 +2644,10 @@ static void print_band_prof_ctx(struct seq_file *m, (prof->rc_action == 1) ? "DROP" : "RED"; seq_printf(m, "W1: rc_action\t\t%s\n", str); seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo); - seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id); + + seq_printf(m, "W1: band_prof_id\t%d\n", + (u16)prof->band_prof_id_h << 7 | prof->band_prof_id); + seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en); seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts); @@ -2784,6 +2816,9 @@ static void rvu_dbg_npa_init(struct rvu *rvu) &rvu_dbg_npa_aura_ctx_fops); debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_pool_ctx_fops); + + if (is_cn20k(rvu->pdev)) /* NDC not appliable for cn20k */ + return; debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, &rvu_dbg_npa_ndc_cache_fops); debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, @@ -3950,6 +3985,9 @@ static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) static const char *rvu_get_dbg_dir_name(struct rvu *rvu) { + if (is_cn20k(rvu->pdev)) + return "cn20k"; + if (!is_rvu_otx2(rvu)) return "cn10k"; else diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 3735372539bd..0f9953eaf1b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1233,7 +1233,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, } static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1259,7 +1260,8 @@ enum rvu_af_dl_param_id { }; static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1314,7 +1316,8 @@ static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id, } static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1376,7 +1379,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink } static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1402,7 +1406,8 @@ static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id, } static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 828316211b24..2f485a930edd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1019,6 +1019,12 @@ static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, { struct nix_cn10k_aq_enq_req *aq_req; + if (is_cn20k(rvu->pdev)) { + *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; + *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; + return; + } + if (!is_rvu_otx2(rvu)) { aq_req = (struct nix_cn10k_aq_enq_req *)req; *smq = aq_req->sq.smq; @@ -1149,36 +1155,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, case NIX_AQ_INSTOP_WRITE: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(mask, &req->rq_mask, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(mask, &req->sq_mask, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(mask, &req->cq_mask, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(mask, &req->rss_mask, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(mask, &req->prof_mask, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) - memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) - memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) - memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) - memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) - memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) - memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s)); + memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: @@ -1243,22 +1249,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, if (req->op == NIX_AQ_INSTOP_READ) { if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(&rsp->rq, ctx, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(&rsp->sq, ctx, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(&rsp->cq, ctx, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(&rsp->rss, ctx, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) memcpy(&rsp->prof, ctx, - sizeof(struct nix_bandprof_s)); + NIX_MAX_CTX_SIZE); } } @@ -1289,8 +1295,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, /* Make copy of original context & mask which are required * for resubmission */ - memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); - memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); + memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); /* exclude fields which HW can update */ aq_req.cq_mask.cq_err = 0; @@ -1309,7 +1315,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, * updated fields are masked out for request and response * comparison */ - for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); word++) { *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); @@ -1317,14 +1323,14 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); } - if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; return 0; } -static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp) +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; int err, retries = 5; @@ -5812,6 +5818,8 @@ static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) } } +#define NIX_BW_PROF_HI_MASK GENMASK(10, 7) + static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc) { @@ -5850,7 +5858,8 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, return -EINVAL; ipolicer = &nix_hw->ipolicer[hi_layer]; - prof_idx = req->prof.band_prof_id; + prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); + prof_idx |= req->prof.band_prof_id; if (prof_idx >= ipolicer->band_prof.max || ipolicer->pfvf_map[prof_idx] != pcifunc) return -EINVAL; @@ -6015,8 +6024,10 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, aq_req->op = NIX_AQ_INSTOP_WRITE; aq_req->qidx = leaf_prof; - aq_req->prof.band_prof_id = mid_prof; + aq_req->prof.band_prof_id = mid_prof & 0x7F; aq_req->prof_mask.band_prof_id = GENMASK(6, 0); + aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); + aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); aq_req->prof.hl_en = 1; aq_req->prof_mask.hl_en = 1; @@ -6025,6 +6036,8 @@ static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, (struct nix_aq_enq_rsp *)aq_rsp); } +#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) + int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, u16 rq_idx, u16 match_id) { @@ -6056,7 +6069,8 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, return 0; /* Get the bandwidth profile ID mapped to this RQ */ - leaf_prof = aq_rsp.rq.band_prof_id; + leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); + leaf_prof |= aq_rsp.rq.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; ipolicer->match_id[leaf_prof] = match_id; @@ -6094,7 +6108,10 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, * to different RQs and marked with same match_id * are rate limited in a aggregate fashion */ - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, + aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, &aq_req, &aq_rsp, leaf_prof, mid_prof); @@ -6216,7 +6233,8 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, if (!aq_rsp.prof.hl_en) return; - mid_prof = aq_rsp.prof.band_prof_id; + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; ipolicer->ref_count[mid_prof]--; /* If ref_count is zero, free mid layer profile */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 4f5ca5ab13a4..e2a33e46b48a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -464,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, return 0; } +static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block) +{ + u64 cfg; + + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return; + + /* Do not bypass NDC cache */ + cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); + cfg &= ~0x03DULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of stack pages */ + cfg |= 0x10ULL; +#endif + rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); +} + static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) { u64 cfg; @@ -479,14 +496,7 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg); #endif - /* Do not bypass NDC cache */ - cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG); - cfg &= ~0x03DULL; -#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING - /* Disable caching of stack pages */ - cfg |= 0x10ULL; -#endif - rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + npa_aq_ndc_config(rvu, block); /* For CN10K NPA BATCH DMA set 35 cache lines */ if (!is_rvu_otx2(rvu)) { @@ -567,6 +577,9 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr) int bank, max_bank, line, max_line, err; u64 reg, ndc_af_const; + if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */ + return 0; + /* Set the ENABLE bit(63) to '0' */ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL); rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 0596a3ac4c12..8e868f815de1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -13,6 +13,8 @@ #define RVU_MULTI_BLK_VER 0x7ULL +#define NIX_MAX_CTX_SIZE 128 + /* RVU Block Address Enumeration */ enum rvu_block_addr_e { BLKADDR_RVUM = 0x0ULL, @@ -370,8 +372,12 @@ struct nix_cq_ctx_s { u64 qsize : 4; u64 cq_err_int : 8; u64 cq_err_int_ena : 8; + /* Ensure all context sizes are 128 bytes */ + u64 padding[12]; }; +static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Receive queue context structure */ struct nix_cn10k_rq_ctx_s { u64 ena : 1; @@ -413,7 +419,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_171 : 1; u64 later_skip : 6; u64 xqe_imm_size : 6; - u64 rsvd_189_184 : 6; + u64 band_prof_id_h : 4; + u64 rsvd_189_188 : 2; u64 xqe_imm_copy : 1; u64 xqe_hdr_split : 1; u64 xqe_drop : 8; /* W3 */ @@ -460,6 +467,8 @@ struct nix_cn10k_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* CN10K NIX Send queue context structure */ struct nix_cn10k_sq_ctx_s { u64 ena : 1; @@ -523,6 +532,8 @@ struct nix_cn10k_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive queue context structure */ struct nix_rq_ctx_s { u64 ena : 1; @@ -594,6 +605,8 @@ struct nix_rq_ctx_s { u64 rsvd_1023_960; /* W15 */ }; +static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX sqe sizes */ enum nix_maxsqesz { NIX_MAXSQESZ_W16 = 0x0, @@ -668,13 +681,18 @@ struct nix_sq_ctx_s { u64 rsvd_1023_1008 : 16; }; +static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE); + /* NIX Receive side scaling entry structure*/ struct nix_rsse_s { uint32_t rq : 20; uint32_t reserved_20_31 : 12; - + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE); + /* NIX receive multicast/mirror entry structure */ struct nix_rx_mce_s { uint64_t op : 2; @@ -684,8 +702,12 @@ struct nix_rx_mce_s { uint64_t rsvd_31_24 : 8; uint64_t pf_func : 16; uint64_t next : 16; + /* Ensure all context sizes are minimum 128 bytes */ + u64 padding[15]; }; +static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE); + enum nix_band_prof_layers { BAND_PROF_LEAF_LAYER = 0, BAND_PROF_INVAL_LAYER = 1, @@ -736,7 +758,8 @@ struct nix_bandprof_s { uint64_t rc_action : 2; uint64_t meter_algo : 2; uint64_t band_prof_id : 7; - uint64_t reserved_111_118 : 8; + uint64_t band_prof_id_h : 4; + uint64_t reserved_115_118 : 4; uint64_t hl_en : 1; uint64_t reserved_120_127 : 8; uint64_t ts : 48; /* W2 */ @@ -769,6 +792,8 @@ struct nix_bandprof_s { uint64_t reserved_1008_1023 : 16; }; +static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE); + enum nix_lsoalg { NIX_LSOALG_NOP, NIX_LSOALG_ADD_SEGNUM, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index bec7d5b4d7cc..3e1bf22cba69 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -15,6 +15,8 @@ static struct dev_hw_ops otx2_hw_ops = { .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -23,6 +25,8 @@ static struct dev_hw_ops cn10k_hw_ops = { .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, + .aura_aq_init = otx2_aura_aq_init, + .pool_aq_init = otx2_pool_aq_init, }; void otx2_init_hw_ops(struct otx2_nic *pfvf) @@ -337,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx, aq->rq.band_prof_id = policer; aq->rq_mask.band_prof_id = GENMASK(9, 0); + /* If policer id is greater than 1023 then it implies hardware supports + * more leaf profiles. In that case use band_prof_id_h for 4 MSBs. + */ + aq->rq.band_prof_id_h = policer >> 10; + aq->rq_mask.band_prof_id_h = GENMASK(3, 0); + /* Fill AQ info */ aq->qidx = rq_idx; aq->ctype = NIX_AQ_CTYPE_RQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c index ec8cde98076d..a60f8cf53feb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -10,17 +10,6 @@ #include "otx2_struct.h" #include "cn10k.h" -static struct dev_hw_ops cn20k_hw_ops = { - .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, - .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, - .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, -}; - -void cn20k_init(struct otx2_nic *pfvf) -{ - pfvf->hw_ops = &cn20k_hw_ops; -} -EXPORT_SYMBOL(cn20k_init); /* CN20K mbox AF => PFx irq handler */ irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) { @@ -250,3 +239,212 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) return 0; } + +#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ + +static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id) +{ +#ifdef CONFIG_DCB + return pfvf->queue_to_pfc_map[aura_id]; +#else + return 0; +#endif +} + +static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + u8 bpid_idx; + int err; + + pool = &pfvf->qset.pool[pool_id]; + + /* Allocate memory for HW to update Aura count. + * Alloc one cache line, so that it fits all FC_STYPE modes. + */ + if (!pool->fc_addr) { + err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); + if (err) + return err; + } + + /* Initialize this aura's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + return err; + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + } + + aq->aura_id = aura_id; + + /* Will be filled by AF with correct pool context address */ + aq->aura.pool_addr = pool_id; + aq->aura.pool_caching = 1; + aq->aura.shift = ilog2(numptrs) - 8; + aq->aura.count = numptrs; + aq->aura.limit = numptrs; + aq->aura.avg_level = 255; + aq->aura.ena = 1; + aq->aura.fc_ena = 1; + aq->aura.fc_addr = pool->fc_addr->iova; + aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ + + /* Enable backpressure for RQ aura */ + if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { + aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; + + bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id); + aq->aura.bpid = pfvf->bpid[bpid_idx]; + + /* Set backpressure level for RQ's Aura */ + aq->aura.bp = RQ_BP_LVL_AURA; + } + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_AURA; + aq->op = NPA_AQ_INSTOP_INIT; + + return 0; +} + +static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type) +{ + struct page_pool_params pp_params = { 0 }; + struct npa_cn20k_aq_enq_req *aq; + struct otx2_pool *pool; + int err, sz; + + pool = &pfvf->qset.pool[pool_id]; + /* Alloc memory for stack which is used to store buffer pointers */ + err = qmem_alloc(pfvf->dev, &pool->stack, + stack_pages, pfvf->hw.stack_pg_bytes); + if (err) + return err; + + pool->rbsize = buf_size; + + /* Initialize this pool's context via AF */ + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + /* Shared mbox memory buffer is full, flush it and retry */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + qmem_free(pfvf->dev, pool->stack); + return err; + } + aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox); + if (!aq) { + qmem_free(pfvf->dev, pool->stack); + return -ENOMEM; + } + } + + aq->aura_id = pool_id; + aq->pool.stack_base = pool->stack->iova; + aq->pool.stack_caching = 1; + aq->pool.ena = 1; + aq->pool.buf_size = buf_size / 128; + aq->pool.stack_max_pages = stack_pages; + aq->pool.shift = ilog2(numptrs) - 8; + aq->pool.ptr_start = 0; + aq->pool.ptr_end = ~0ULL; + + /* Fill AQ info */ + aq->ctype = NPA_AQ_CTYPE_POOL; + aq->op = NPA_AQ_INSTOP_INIT; + + if (type != AURA_NIX_RQ) { + pool->page_pool = NULL; + return 0; + } + + sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE); + pp_params.order = get_order(sz); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } + + return 0; +} + +static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) +{ + struct nix_cn20k_aq_enq_req *aq; + struct otx2_nic *pfvf = dev; + + /* Get memory to put this msg */ + aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox); + if (!aq) + return -ENOMEM; + + aq->sq.cq = pfvf->hw.rx_queues + qidx; + aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ + aq->sq.cq_ena = 1; + aq->sq.ena = 1; + aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; + aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ + aq->sq.sqb_aura = sqb_aura; + aq->sq.sq_int_ena = NIX_SQINT_BITS; + aq->sq.qint_idx = 0; + /* Due pipelining impact minimum 2000 unused SQ CQE's + * need to maintain to avoid CQ overflow. + */ + aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt); + + /* Fill AQ info */ + aq->qidx = qidx; + aq->ctype = NIX_AQ_CTYPE_SQ; + aq->op = NIX_AQ_INSTOP_INIT; + + return otx2_sync_mbox_msg(&pfvf->mbox); +} + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, + .sq_aq_init = cn20k_sq_aq_init, + .sqe_flush = cn10k_sqe_flush, + .aura_freeptr = cn10k_aura_freeptr, + .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .aura_aq_init = cn20k_aura_aq_init, + .pool_aq_init = cn20k_pool_aq_init, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 902d6abaa3ec..75ebb17419c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1369,6 +1369,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs) { + return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id, + numptrs); +} + +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs) +{ struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1446,6 +1453,13 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { + return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs, + buf_size, type); +} + +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type) +{ struct page_pool_params pp_params = { 0 }; struct xsk_buff_pool *xsk_pool; struct npa_aq_enq_req *aq; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1c8a3c078a64..e616a727a3a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -14,6 +14,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> +#include <linux/soc/marvell/silicons.h> #include <linux/soc/marvell/octeontx2/asm.h> #include <net/macsec.h> #include <net/pkt_cls.h> @@ -375,6 +376,11 @@ struct dev_hw_ops { irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); + int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); + int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, + int type); }; #define CN10K_MCS_SA_PER_SC 4 @@ -527,7 +533,7 @@ struct otx2_nic { u32 nix_lmt_size; struct otx2_ptp *ptp; - struct hwtstamp_config tstamp; + struct kernel_hwtstamp_config tstamp; unsigned long rq_bmap; @@ -1059,6 +1065,10 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); int otx2_set_hw_capabilities(struct otx2_nic *pfvf); +int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id, + int pool_id, int numptrs); +int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id, + int stack_pages, int numptrs, int buf_size, int type); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1098,8 +1108,11 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); -int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); -int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); +int otx2_config_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int otx2_config_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index e13ae5484c19..a72694219df4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -48,7 +48,8 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, } static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; @@ -84,7 +85,8 @@ static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id, } static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e808995703cf..a7feb4c392b3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2445,18 +2445,26 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) return 0; } -int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +int otx2_config_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct otx2_nic *pfvf = netdev_priv(netdev); + + *config = pfvf->tstamp; + return 0; +} +EXPORT_SYMBOL(otx2_config_hwtstamp_get); + +int otx2_config_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); - struct hwtstamp_config config; if (!pfvf->ptp) return -ENODEV; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC) pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC; @@ -2465,8 +2473,11 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) otx2_config_hw_tx_tstamp(pfvf, false); break; case HWTSTAMP_TX_ONESTEP_SYNC: - if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) + if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag)) { + NL_SET_ERR_MSG_MOD(extack, + "One-step time stamping is not supported"); return -ERANGE; + } pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC; schedule_delayed_work(&pfvf->ptp->synctstamp_work, msecs_to_jiffies(500)); @@ -2478,7 +2489,7 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: otx2_config_hw_rx_tstamp(pfvf, false); break; @@ -2497,35 +2508,17 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: otx2_config_hw_rx_tstamp(pfvf, true); - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - memcpy(&pfvf->tstamp, &config, sizeof(config)); + pfvf->tstamp = *config; - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; -} -EXPORT_SYMBOL(otx2_config_hwtstamp); - -int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) -{ - struct otx2_nic *pfvf = netdev_priv(netdev); - struct hwtstamp_config *cfg = &pfvf->tstamp; - - switch (cmd) { - case SIOCSHWTSTAMP: - return otx2_config_hwtstamp(netdev, req); - case SIOCGHWTSTAMP: - return copy_to_user(req->ifr_data, cfg, - sizeof(*cfg)) ? -EFAULT : 0; - default: - return -EOPNOTSUPP; - } + return 0; } -EXPORT_SYMBOL(otx2_ioctl); +EXPORT_SYMBOL(otx2_config_hwtstamp_set); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { @@ -2942,7 +2935,6 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_features = otx2_set_features, .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, - .ndo_eth_ioctl = otx2_ioctl, .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, @@ -2951,6 +2943,8 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, + .ndo_hwtstamp_get = otx2_config_hwtstamp_get, + .ndo_hwtstamp_set = otx2_config_hwtstamp_set, }; int otx2_wq_init(struct otx2_nic *pf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 25381f079b97..f4fdbfba8667 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -534,8 +534,9 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, - .ndo_eth_ioctl = otx2_ioctl, .ndo_setup_tc = otx2_setup_tc, + .ndo_hwtstamp_get = otx2_config_hwtstamp_get, + .ndo_hwtstamp_set = otx2_config_hwtstamp_set, }; static int otx2_vf_wq_init(struct otx2_nic *vf) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index a68cd3f0304c..ad6298456639 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1727,6 +1727,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) } +static u32 mlx4_en_get_rx_ring_count(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->rx_ring_num; +} + static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -1743,9 +1750,6 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return -EINVAL; switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = priv->rx_ring_num; - break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = mlx4_en_get_num_flows(priv); break; @@ -2154,6 +2158,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .set_ringparam = mlx4_en_set_ringparam, .get_rxnfc = mlx4_en_get_rxnfc, .set_rxnfc = mlx4_en_set_rxnfc, + .get_rx_ring_count = mlx4_en_get_rx_ring_count, .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, .get_rxfh = mlx4_en_get_rxfh, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 308b4458e0d4..81bf8908b897 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2420,21 +2420,22 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +static int mlx4_en_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; /* device doesn't support time stamping */ - if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) { + NL_SET_ERR_MSG_MOD(extack, + "device doesn't support time stamping"); return -EINVAL; + } /* TX HW timestamp */ - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -2443,7 +2444,7 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) } /* RX HW timestamp */ - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: @@ -2461,39 +2462,27 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } if (mlx4_en_reset_config(dev, config, dev->features)) { - config.tx_type = HWTSTAMP_TX_OFF; - config.rx_filter = HWTSTAMP_FILTER_NONE; + config->tx_type = HWTSTAMP_TX_OFF; + config->rx_filter = HWTSTAMP_FILTER_NONE; } - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; } -static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +static int mlx4_en_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct mlx4_en_priv *priv = netdev_priv(dev); - return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, - sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; -} - -static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx4_en_hwtstamp_set(dev, ifr); - case SIOCGHWTSTAMP: - return mlx4_en_hwtstamp_get(dev, ifr); - default: - return -EOPNOTSUPP; - } + *config = priv->hwtstamp_config; + return 0; } static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, @@ -2560,7 +2549,7 @@ static int mlx4_en_set_features(struct net_device *netdev, } if (reset) { - ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + ret = mlx4_en_reset_config(netdev, &priv->hwtstamp_config, features); if (ret) return ret; @@ -2844,7 +2833,6 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, - .ndo_eth_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, @@ -2858,6 +2846,8 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, .ndo_bpf = mlx4_xdp, + .ndo_hwtstamp_get = mlx4_en_hwtstamp_get, + .ndo_hwtstamp_set = mlx4_en_hwtstamp_set, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -3512,7 +3502,7 @@ out: } int mlx4_en_reset_config(struct net_device *dev, - struct hwtstamp_config ts_config, + struct kernel_hwtstamp_config *ts_config, netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -3522,8 +3512,8 @@ int mlx4_en_reset_config(struct net_device *dev, int port_up = 0; int err = 0; - if (priv->hwtstamp_config.tx_type == ts_config.tx_type && - priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + if (priv->hwtstamp_config.tx_type == ts_config->tx_type && + priv->hwtstamp_config.rx_filter == ts_config->rx_filter && !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) return 0; /* Nothing to change */ @@ -3542,7 +3532,7 @@ int mlx4_en_reset_config(struct net_device *dev, mutex_lock(&mdev->state_lock); memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); - memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); + memcpy(&new_prof.hwtstamp_config, ts_config, sizeof(*ts_config)); err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) @@ -3560,7 +3550,7 @@ int mlx4_en_reset_config(struct net_device *dev, dev->features |= NETIF_F_HW_VLAN_CTAG_RX; else dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + } else if (ts_config->rx_filter == HWTSTAMP_FILTER_NONE) { /* RX time-stamping is OFF, update the RX vlan offload * to the latest wanted state */ @@ -3581,7 +3571,7 @@ int mlx4_en_reset_config(struct net_device *dev, * Regardless of the caller's choice, * Turn Off RX vlan offload in case of time-stamping is ON */ - if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (ts_config->rx_filter != HWTSTAMP_FILTER_NONE) { if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 03d2fc7d9b09..2de226951e19 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -174,7 +174,8 @@ MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is defaul static atomic_t pf_loading = ATOMIC_INIT(0); static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { ctx->val.vbool = !!mlx4_internal_err_reset; return 0; @@ -189,7 +190,8 @@ static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, } static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx4_priv *priv = devlink_priv(devlink); struct mlx4_dev *dev = &priv->dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index ad0d91a75184..aab97694f86b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -388,7 +388,7 @@ struct mlx4_en_port_profile { u8 num_up; int rss_rings; int inline_thold; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; }; struct mlx4_en_profile { @@ -612,7 +612,7 @@ struct mlx4_en_priv { bool wol; struct device *ddev; struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; - struct hwtstamp_config hwtstamp_config; + struct kernel_hwtstamp_config hwtstamp_config; u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB @@ -780,7 +780,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); int mlx4_en_moderation_update(struct mlx4_en_priv *priv); int mlx4_en_reset_config(struct net_device *dev, - struct hwtstamp_config ts_config, + struct kernel_hwtstamp_config *ts_config, netdev_features_t new_features); void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, struct mlx4_en_stats_bitmap *stats_bitmap, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 722282cebce9..5b08e5ffe0e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -181,6 +181,7 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { lockdep_assert_held(&cmd->alloc_lock); + cmd->ent_arr[idx] = NULL; set_bit(idx, &cmd->vars.bitmask); } @@ -1200,6 +1201,44 @@ out_err: return err; } +/* Check if all command slots are stalled (timed out and not recovered). + * returns true if all slots timed out on a recent command and have not been + * completed by FW yet. (stalled state) + * false otherwise (at least one slot is not stalled). + * + * In such odd situation "all_stalled", this serves as a protection mechanism + * to avoid blocking the kernel for long periods of time in case FW is not + * responding to commands. + */ +static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + bool all_stalled = true; + unsigned long flags; + int i; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + + /* at least one command slot is free */ + if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) { + all_stalled = false; + goto out; + } + + for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) { + struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; + + if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) { + all_stalled = false; + break; + } + } +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return all_stalled; +} + /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion @@ -1230,6 +1269,15 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (callback && page_queue) return -EINVAL; + if (!page_queue && mlx5_cmd_all_stalled(dev)) { + mlx5_core_err_rl(dev, + "All CMD slots are stalled, aborting command\n"); + /* there's no reason to wait and block the whole kernel if FW + * isn't currently responding to all slots, fail immediately + */ + return -EAGAIN; + } + ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, callback, context, page_queue); if (IS_ERR(ent)) @@ -1700,6 +1748,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force if (test_bit(i, &vector)) { ent = cmd->ent_arr[i]; + if (forced && ent->ret == -ETIMEDOUT) + set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, + &ent->state); + else if (!forced) /* real FW completion */ + clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, + &ent->state); + /* if we already completed the command, ignore it */ if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 891bbbbfbbf1..64c04f52990f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -564,10 +564,14 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev) { - u64 fsystem_guid, psystem_guid; + u8 fsystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 psystem_guid[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 flen; + u8 plen; - fsystem_guid = mlx5_query_nic_system_image_guid(dev); - psystem_guid = mlx5_query_nic_system_image_guid(peer_dev); + mlx5_query_nic_sw_system_image_guid(dev, fsystem_guid, &flen); + mlx5_query_nic_sw_system_image_guid(peer_dev, psystem_guid, &plen); - return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); + return plen && flen && flen == plen && + !memcmp(fsystem_guid, psystem_guid, flen); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index c9555119a661..43b9bf8829cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -26,7 +26,8 @@ enum mlx5_devlink_param_id { MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH, MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW, MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH, - MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE + MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE, + MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE, }; struct mlx5_trap_ctx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 080e7eab52c7..7bcf822a89f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -54,7 +54,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) { mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a163f81f07c1..811178d8976c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -699,7 +699,7 @@ struct mlx5e_rq { struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; struct mlx5e_cq_decomp cqd; - struct hwtstamp_config *tstamp; + struct kernel_hwtstamp_config *hwtstamp_config; struct mlx5_clock *clock; struct mlx5e_icosq *icosq; struct mlx5e_priv *priv; @@ -787,7 +787,6 @@ struct mlx5e_channel { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); int ix; int vec_ix; @@ -921,7 +920,7 @@ struct mlx5e_priv { u8 max_opened_tc; bool tx_ptp_opened; bool rx_ptp_opened; - struct hwtstamp_config tstamp; + struct kernel_hwtstamp_config hwtstamp_config; u16 q_counter[MLX5_SD_MAX_GROUP_SZ]; u16 drop_rq_q_counter; struct notifier_block events_nb; @@ -1030,8 +1029,11 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); +int mlx5e_hwtstamp_set(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int mlx5e_hwtstamp_get(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config); int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, @@ -1157,7 +1159,9 @@ extern const struct ethtool_ops mlx5e_ethtool_ops; int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); -int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, +int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb); +int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb, bool enable_mc_lb); void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); @@ -1245,7 +1249,7 @@ void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); -void mlx5e_set_xdp_feature(struct net_device *netdev); +void mlx5e_set_xdp_feature(struct mlx5e_priv *priv); netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index 0b1ac6e5c890..8818f65d1fbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -40,11 +40,8 @@ void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev) static void mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { - u64 parent_id; - - parent_id = mlx5_query_nic_system_image_guid(dev); - ppid->id_len = sizeof(parent_id); - memcpy(ppid->id, &parent_id, sizeof(parent_id)); + BUILD_BUG_ON(MLX5_SW_IMAGE_GUID_MAX_BYTES > MAX_PHYS_ITEM_ID_LEN); + mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len); } int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c index 4e72ca8070e2..1de18c7e96ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c @@ -6,6 +6,7 @@ #include <linux/xarray.h> #include <linux/hashtable.h> #include <linux/refcount.h> +#include <linux/mlx5/driver.h> #include "mapping.h" @@ -24,7 +25,8 @@ struct mapping_ctx { struct delayed_work dwork; struct list_head pending_list; spinlock_t pending_list_lock; /* Guards pending list */ - u64 id; + u8 id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 id_len; u8 type; struct list_head list; refcount_t refcount; @@ -220,13 +222,15 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal) } struct mapping_ctx * -mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal) +mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id, + bool delayed_removal) { struct mapping_ctx *ctx; mutex_lock(&shared_ctx_lock); list_for_each_entry(ctx, &shared_ctx_list, list) { - if (ctx->id == id && ctx->type == type) { + if (ctx->type == type && ctx->id_len == id_len && + !memcmp(id, ctx->id, id_len)) { if (refcount_inc_not_zero(&ctx->refcount)) goto unlock; break; @@ -237,7 +241,8 @@ mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delaye if (IS_ERR(ctx)) goto unlock; - ctx->id = id; + memcpy(ctx->id, id, id_len); + ctx->id_len = id_len; ctx->type = type; list_add(&ctx->list, &shared_ctx_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h index 4e2119f0f4c1..e86a103d58b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h @@ -27,6 +27,7 @@ void mapping_destroy(struct mapping_ctx *ctx); /* adds mapping with an id or get an existing mapping with the same id */ struct mapping_ctx * -mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal); +mapping_create_for_id(u8 *id, u8 id_len, u8 type, size_t data_size, u32 max_id, + bool delayed_removal); #endif /* __MLX5_MAPPING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index c93ee969ea64..424f8a2728a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -82,7 +82,7 @@ static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb) } static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, - struct mlx5e_ptp_cq_stats *cq_stats) + struct mlx5e_ptpsq *ptpsq) { struct skb_shared_hwtstamps hwts = {}; ktime_t diff; @@ -92,8 +92,17 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, /* Maximal allowed diff is 1 / 128 second */ if (diff > (NSEC_PER_SEC >> 7)) { - cq_stats->abort++; - cq_stats->abort_abs_diff_ns += diff; + struct mlx5e_txqsq *sq = &ptpsq->txqsq; + + ptpsq->cq_stats->abort++; + ptpsq->cq_stats->abort_abs_diff_ns += diff; + if (diff > (NSEC_PER_SEC >> 1) && + !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { + netdev_warn(sq->channel->netdev, + "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n", + (s64)diff, sq->sqn); + queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work); + } return; } @@ -103,7 +112,7 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, ktime_t hwtstamp, - struct mlx5e_ptp_cq_stats *cq_stats) + struct mlx5e_ptpsq *ptpsq) { switch (hwtstamp_type) { case (MLX5E_SKB_CB_CQE_HWTSTAMP): @@ -121,7 +130,7 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp) return; - mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats); + mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq); memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); } @@ -209,7 +218,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, - hwtstamp, ptpsq->cq_stats); + hwtstamp, ptpsq); ptpsq->cq_stats->cqe++; mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); @@ -713,7 +722,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, rq->netdev = priv->netdev; rq->priv = priv; rq->clock = mdev->clock; - rq->tstamp = &priv->tstamp; + rq->hwtstamp_config = &priv->hwtstamp_config; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &c->priv->ptp_stats.rq; @@ -896,7 +905,6 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->priv = priv; c->mdev = priv->mdev; - c->tstamp = &priv->tstamp; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 1b3c9648220b..2a457a2ed707 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -64,7 +64,6 @@ struct mlx5e_ptp { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES); struct mlx5_sq_bfreg *bfreg; }; @@ -148,7 +147,7 @@ enum { void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, ktime_t hwtstamp, - struct mlx5e_ptp_cq_stats *cq_stats); + struct mlx5e_ptpsq *ptpsq); void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb); #endif /* __MLX5_EN_PTP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 9d1c677814e0..87a2ad69526d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -30,15 +30,11 @@ static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswi { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev, *esw_mdev; - u64 system_guid, esw_system_guid; mdev = priv->mdev; esw_mdev = esw->dev; - system_guid = mlx5_query_nic_system_image_guid(mdev); - esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev); - - return system_guid == esw_system_guid; + return mlx5_same_hw_devs(mdev, esw_mdev); } static struct net_device * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index b1415992ffa2..0686fbdd5a05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -318,7 +318,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx struct devlink_fmsg *fmsg) { mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); - devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter); + devlink_fmsg_u32_pair_put(fmsg, "filter_type", + priv->hwtstamp_config.rx_filter); mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg); mlx5e_health_fmsg_named_obj_nest_end(fmsg); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index c96cbc4b0dbf..88b0e1050d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -231,6 +231,8 @@ mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, rqtn, rss_inner); mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param); rss_tt = mlx5e_rss_get_tt_config(rss, tt); + mlx5e_tir_builder_build_self_lb_block(builder, rss->params.self_lb_blk, + rss->params.self_lb_blk); mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); err = mlx5e_tir_init(tir, builder, rss->mdev, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index 5fb03cd0a411..17664757a561 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -23,6 +23,7 @@ struct mlx5e_rss_init_params { struct mlx5e_rss_params { bool inner_ft_support; u32 drop_rqn; + bool self_lb_blk; }; struct mlx5e_rss_params_traffic_type diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index ac26a32845d0..55c117b7d8c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -71,6 +71,8 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, rss_params = (struct mlx5e_rss_params) { .inner_ft_support = inner_ft_support, .drop_rqn = res->drop_rqn, + .self_lb_blk = + res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK, }; rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params); @@ -104,6 +106,8 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int in rss_params = (struct mlx5e_rss_params) { .inner_ft_support = inner_ft_support, .drop_rqn = res->drop_rqn, + .self_lb_blk = + res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK, }; rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params); @@ -346,6 +350,7 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) { bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + bool self_lb_blk = res->features & MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK; struct mlx5e_tir_builder *builder; int err = 0; int ix; @@ -376,6 +381,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), inner_ft_support); mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param); + mlx5e_tir_builder_build_self_lb_block(builder, self_lb_blk, + self_lb_blk); mlx5e_tir_builder_build_direct(builder); err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 65a857c215e1..675780120a20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -21,6 +21,7 @@ enum mlx5e_rx_res_features { MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), MLX5E_RX_RES_FEATURE_PTP = BIT(1), MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2), + MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK = BIT(3), }; /* Setup */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c index 896f718483c3..991f47050643 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c @@ -307,7 +307,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_int_port_priv *int_port_priv; - u64 mapping_id; + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; + u8 id_len; if (!mlx5e_tc_int_port_supported(esw)) return NULL; @@ -316,9 +317,10 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv) if (!int_port_priv) return NULL; - mapping_id = mlx5_query_nic_system_image_guid(priv->mdev); + mlx5_query_nic_sw_system_image_guid(priv->mdev, mapping_id, &id_len); - int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT, + int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_INT_PORT, sizeof(u32) * 2, (1 << ESW_VPORT_BITS) - 1, true); if (IS_ERR(int_port_priv->metadata_mapping)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 870d12364f99..fc0e57403d25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2287,9 +2287,10 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, enum mlx5_flow_namespace_type ns_type, struct mlx5e_post_act *post_act) { + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_tc_ct_priv *ct_priv; struct mlx5_core_dev *dev; - u64 mapping_id; + u8 id_len; int err; dev = priv->mdev; @@ -2301,16 +2302,18 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!ct_priv) goto err_alloc; - mapping_id = mlx5_query_nic_system_image_guid(dev); + mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len); - ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE, + ct_priv->zone_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_ZONE, sizeof(u16), 0, true); if (IS_ERR(ct_priv->zone_mapping)) { err = PTR_ERR(ct_priv->zone_mapping); goto err_mapping_zone; } - ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS, + ct_priv->labels_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_LABELS, sizeof(u32) * 4, 0, true); if (IS_ERR(ct_priv->labels_mapping)) { err = PTR_ERR(ct_priv->labels_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index 19499072f67f..0b55e77f19c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -146,6 +146,31 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder) MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } +static void mlx5e_tir_context_self_lb_block(void *tirc, bool enable_uc_lb, + bool enable_mc_lb) +{ + u8 lb_flags = 0; + + if (enable_uc_lb) + lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; + if (enable_mc_lb) + lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; + + MLX5_SET(tirc, tirc, self_lb_block, lb_flags); +} + +void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder, + bool enable_uc_lb, + bool enable_mc_lb) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.self_lb_en, 1); + + mlx5e_tir_context_self_lb_block(tirc, enable_uc_lb, enable_mc_lb); +} + void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) { void *tirc = mlx5e_tir_builder_get_tirc(builder); @@ -153,9 +178,7 @@ void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) WARN_ON(builder->modify); MLX5_SET(tirc, tirc, tls_en, 1); - MLX5_SET(tirc, tirc, self_lb_block, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); + mlx5e_tir_context_self_lb_block(tirc, true, true); } int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h index e8df3aaf6562..958eeb959a19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -35,6 +35,9 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, const struct mlx5e_rss_params_traffic_type *rss_tt, bool inner); void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_build_self_lb_block(struct mlx5e_tir_builder *builder, + bool enable_uc_lb, + bool enable_mc_lb); void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder); struct mlx5_core_dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 996fcdb5a29d..da8c44f46edb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -47,7 +47,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params rq->netdev = priv->netdev; rq->priv = priv; rq->clock = mdev->clock; - rq->tstamp = &priv->tstamp; + rq->hwtstamp_config = &priv->hwtstamp_config; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->stats = &priv->trap_stats.rq; @@ -144,7 +144,6 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) t->priv = priv; t->mdev = priv->mdev; - t->tstamp = &priv->tstamp; t->pdev = mlx5_core_dma_dev(priv->mdev); t->netdev = priv->netdev; t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h index aa3f17658c6d..394e917ea2b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h @@ -22,7 +22,6 @@ struct mlx5e_trap { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct hwtstamp_config *tstamp; DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); struct mlx5e_params params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 6760bb0336df..7e191e1569e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -92,7 +92,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq); -static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) +static inline bool mlx5e_rx_hw_stamp(struct kernel_hwtstamp_config *config) { return config->rx_filter == HWTSTAMP_FILTER_ALL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 5d51600935a6..80f9fc10877a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -179,7 +179,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) { const struct mlx5e_xdp_buff *_ctx = (void *)ctx; - if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp))) + if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config))) return -ENODATA; *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index dbd88eb5c082..5981c71cae2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -71,7 +71,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, rq->pdev = c->pdev; rq->netdev = c->netdev; rq->priv = c->priv; - rq->tstamp = c->tstamp; + rq->hwtstamp_config = &c->priv->hwtstamp_config; rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c index 8565cfe8d7dc..38e7c77cc851 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c @@ -28,12 +28,15 @@ struct mlx5e_psp_tx { struct mlx5_flow_handle *rule; struct mutex mutex; /* Protect PSP TX steering */ u32 refcnt; + struct mlx5_fc *tx_counter; }; struct mlx5e_psp_rx_err { struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; - struct mlx5_flow_handle *drop_rule; + struct mlx5_flow_handle *auth_fail_rule; + struct mlx5_flow_handle *err_rule; + struct mlx5_flow_handle *bad_rule; struct mlx5_modify_hdr *copy_modify_hdr; }; @@ -50,6 +53,10 @@ struct mlx5e_accel_fs_psp_prot { struct mlx5e_accel_fs_psp { struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES]; + struct mlx5_fc *rx_counter; + struct mlx5_fc *rx_auth_fail_counter; + struct mlx5_fc *rx_err_counter; + struct mlx5_fc *rx_bad_counter; }; struct mlx5e_psp_fs { @@ -72,9 +79,19 @@ static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i) static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs, struct mlx5e_psp_rx_err *rx_err) { - if (rx_err->drop_rule) { - mlx5_del_flow_rules(rx_err->drop_rule); - rx_err->drop_rule = NULL; + if (rx_err->bad_rule) { + mlx5_del_flow_rules(rx_err->bad_rule); + rx_err->bad_rule = NULL; + } + + if (rx_err->err_rule) { + mlx5_del_flow_rules(rx_err->err_rule); + rx_err->err_rule = NULL; + } + + if (rx_err->auth_fail_rule) { + mlx5_del_flow_rules(rx_err->auth_fail_rule); + rx_err->auth_fail_rule = NULL; } if (rx_err->rule) { @@ -117,6 +134,7 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_core_dev *mdev = fs->mdev; + struct mlx5_flow_destination dest[2]; struct mlx5_flow_act flow_act = {}; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *fte; @@ -147,10 +165,14 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, accel_psp_setup_syndrome_match(spec, PSP_OK); /* create fte */ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.modify_hdr = modify_hdr; - fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, - &fs_prot->default_dest, 1); + dest[0].type = fs_prot->default_dest.type; + dest[0].ft = fs_prot->default_dest.ft; + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter = fs->rx_fs->rx_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 2); if (IS_ERR(fte)) { err = PTR_ERR(fte); mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err); @@ -158,22 +180,69 @@ static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs, } rx_err->rule = fte; - /* add default drop rule */ + /* add auth fail drop rule */ memset(spec, 0, sizeof(*spec)); memset(&flow_act, 0, sizeof(flow_act)); + accel_psp_setup_syndrome_match(spec, PSP_ICV_FAIL); /* create fte */ - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; - fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, NULL, 0); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_auth_fail_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); if (IS_ERR(fte)) { err = PTR_ERR(fte); - mlx5_core_err(mdev, "fail to add psp rx err drop rule err=%d\n", err); + mlx5_core_err(mdev, "fail to add psp rx auth fail drop rule err=%d\n", + err); goto out_drop_rule; } - rx_err->drop_rule = fte; + rx_err->auth_fail_rule = fte; + + /* add framing drop rule */ + memset(spec, 0, sizeof(*spec)); + memset(&flow_act, 0, sizeof(flow_act)); + accel_psp_setup_syndrome_match(spec, PSP_BAD_TRAILER); + /* create fte */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_err_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + mlx5_core_err(mdev, "fail to add psp rx framing err drop rule err=%d\n", + err); + goto out_drop_auth_fail_rule; + } + rx_err->err_rule = fte; + + /* add misc. errors drop rule */ + memset(spec, 0, sizeof(*spec)); + memset(&flow_act, 0, sizeof(flow_act)); + /* create fte */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter = fs->rx_fs->rx_bad_counter; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, dest, 1); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + mlx5_core_err(mdev, "fail to add psp rx misc. err drop rule err=%d\n", + err); + goto out_drop_error_rule; + } + rx_err->bad_rule = fte; + rx_err->copy_modify_hdr = modify_hdr; goto out_spec; +out_drop_error_rule: + mlx5_del_flow_rules(rx_err->err_rule); + rx_err->err_rule = NULL; +out_drop_auth_fail_rule: + mlx5_del_flow_rules(rx_err->auth_fail_rule); + rx_err->auth_fail_rule = NULL; out_drop_rule: mlx5_del_flow_rules(rx_err->rule); rx_err->rule = NULL; @@ -461,6 +530,10 @@ static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs) return; accel_psp = fs->rx_fs; + mlx5_fc_destroy(fs->mdev, accel_psp->rx_bad_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_err_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_auth_fail_counter); + mlx5_fc_destroy(fs->mdev, accel_psp->rx_counter); for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { fs_prot = &accel_psp->fs_prot[i]; mutex_destroy(&fs_prot->prot_mutex); @@ -474,7 +547,10 @@ static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs) { struct mlx5e_accel_fs_psp_prot *fs_prot; struct mlx5e_accel_fs_psp *accel_psp; + struct mlx5_core_dev *mdev = fs->mdev; + struct mlx5_fc *flow_counter; enum accel_fs_psp_type i; + int err; accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL); if (!accel_psp) @@ -485,9 +561,68 @@ static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs) mutex_init(&fs_prot->prot_mutex); } + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_err; + } + accel_psp->rx_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx auth fail flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_counter_err; + } + accel_psp->rx_auth_fail_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx error flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_auth_fail_counter_err; + } + accel_psp->rx_err_counter = flow_counter; + + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp rx bad flow counter err=%pe\n", + flow_counter); + err = PTR_ERR(flow_counter); + goto out_err_counter_err; + } + accel_psp->rx_bad_counter = flow_counter; + fs->rx_fs = accel_psp; return 0; + +out_err_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_err_counter); + accel_psp->rx_err_counter = NULL; +out_auth_fail_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_auth_fail_counter); + accel_psp->rx_auth_fail_counter = NULL; +out_counter_err: + mlx5_fc_destroy(mdev, accel_psp->rx_counter); + accel_psp->rx_counter = NULL; +out_err: + for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) { + fs_prot = &accel_psp->fs_prot[i]; + mutex_destroy(&fs_prot->prot_mutex); + } + kfree(accel_psp); + fs->rx_fs = NULL; + + return err; } void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) @@ -532,6 +667,7 @@ static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination dest = {}; struct mlx5_core_dev *mdev = fs->mdev; struct mlx5_flow_act flow_act = {}; u32 *in, *mc, *outer_headers_c; @@ -580,8 +716,11 @@ static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs) flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP; flow_act.flags |= FLOW_ACT_NO_APPEND; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | - MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); + MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = tx_fs->tx_counter; + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err); @@ -650,6 +789,7 @@ static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs) if (!tx_fs) return; + mlx5_fc_destroy(fs->mdev, tx_fs->tx_counter); mutex_destroy(&tx_fs->mutex); WARN_ON(tx_fs->refcnt); kfree(tx_fs); @@ -658,10 +798,12 @@ static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs) static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs) { + struct mlx5_core_dev *mdev = fs->mdev; struct mlx5_flow_namespace *ns; + struct mlx5_fc *flow_counter; struct mlx5e_psp_tx *tx_fs; - ns = mlx5_get_flow_namespace(fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); + ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); if (!ns) return -EOPNOTSUPP; @@ -669,12 +811,55 @@ static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs) if (!tx_fs) return -ENOMEM; + flow_counter = mlx5_fc_create(mdev, false); + if (IS_ERR(flow_counter)) { + mlx5_core_warn(mdev, + "fail to create psp tx flow counter err=%pe\n", + flow_counter); + kfree(tx_fs); + return PTR_ERR(flow_counter); + } + tx_fs->tx_counter = flow_counter; mutex_init(&tx_fs->mutex); tx_fs->ns = ns; fs->tx_fs = tx_fs; return 0; } +static void +mlx5e_accel_psp_fs_get_stats_fill(struct mlx5e_priv *priv, + struct mlx5e_psp_stats *stats) +{ + struct mlx5e_psp_tx *tx_fs = priv->psp->fs->tx_fs; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_accel_fs_psp *accel_psp; + + accel_psp = (struct mlx5e_accel_fs_psp *)priv->psp->fs->rx_fs; + + if (tx_fs->tx_counter) + mlx5_fc_query(mdev, tx_fs->tx_counter, &stats->psp_tx_pkts, + &stats->psp_tx_bytes); + + if (accel_psp->rx_counter) + mlx5_fc_query(mdev, accel_psp->rx_counter, &stats->psp_rx_pkts, + &stats->psp_rx_bytes); + + if (accel_psp->rx_auth_fail_counter) + mlx5_fc_query(mdev, accel_psp->rx_auth_fail_counter, + &stats->psp_rx_pkts_auth_fail, + &stats->psp_rx_bytes_auth_fail); + + if (accel_psp->rx_err_counter) + mlx5_fc_query(mdev, accel_psp->rx_err_counter, + &stats->psp_rx_pkts_frame_err, + &stats->psp_rx_bytes_frame_err); + + if (accel_psp->rx_bad_counter) + mlx5_fc_query(mdev, accel_psp->rx_bad_counter, + &stats->psp_rx_pkts_drop, + &stats->psp_rx_bytes_drop); +} + void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { if (!priv->psp) @@ -849,12 +1034,30 @@ mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack) return mlx5e_psp_rotate_key(priv->mdev); } +static void +mlx5e_psp_get_stats(struct psp_dev *psd, struct psp_dev_stats *stats) +{ + struct mlx5e_priv *priv = netdev_priv(psd->main_netdev); + struct mlx5e_psp_stats nstats; + + mlx5e_accel_psp_fs_get_stats_fill(priv, &nstats); + stats->rx_packets = nstats.psp_rx_pkts; + stats->rx_bytes = nstats.psp_rx_bytes; + stats->rx_auth_fail = nstats.psp_rx_pkts_auth_fail; + stats->rx_error = nstats.psp_rx_pkts_frame_err; + stats->rx_bad = nstats.psp_rx_pkts_drop; + stats->tx_packets = nstats.psp_tx_pkts; + stats->tx_bytes = nstats.psp_tx_bytes; + stats->tx_error = atomic_read(&priv->psp->tx_drop); +} + static struct psp_dev_ops mlx5_psp_ops = { .set_config = mlx5e_psp_set_config, .rx_spi_alloc = mlx5e_psp_rx_spi_alloc, .tx_key_add = mlx5e_psp_assoc_add, .tx_key_del = mlx5e_psp_assoc_del, .key_rotate = mlx5e_psp_key_rotate, + .get_stats = mlx5e_psp_get_stats, }; void mlx5e_psp_unregister(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h index 42bb671fb2cb..6b62fef0d9a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h @@ -7,11 +7,27 @@ #include <net/psp/types.h> #include "en.h" +struct mlx5e_psp_stats { + u64 psp_rx_pkts; + u64 psp_rx_bytes; + u64 psp_rx_pkts_auth_fail; + u64 psp_rx_bytes_auth_fail; + u64 psp_rx_pkts_frame_err; + u64 psp_rx_bytes_frame_err; + u64 psp_rx_pkts_drop; + u64 psp_rx_bytes_drop; + u64 psp_tx_pkts; + u64 psp_tx_bytes; + u64 psp_tx_pkts_drop; + u64 psp_tx_bytes_drop; +}; + struct mlx5e_psp { struct psp_dev *psp; struct psp_dev_caps caps; struct mlx5e_psp_fs *fs; atomic_t tx_key_cnt; + atomic_t tx_drop; }; static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c index 828bff1137af..c17ea0fcd8ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c @@ -186,6 +186,7 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, /* psp_encap of the packet */ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) { kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT); + atomic_inc(&priv->psp->tx_drop); return false; } if (skb_is_gso(skb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 30424ccad584..5a2ac7b6f260 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -247,45 +247,43 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) memset(res, 0, sizeof(*res)); } -int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, - bool enable_mc_lb) +int mlx5e_modify_tirs_lb(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb) { - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_tir_builder *builder; struct mlx5e_tir *tir; - u8 lb_flags = 0; - int err = 0; - u32 tirn = 0; - int inlen; - void *in; + int err = 0; - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(true); + if (!builder) return -ENOMEM; - if (enable_uc_lb) - lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; - - if (enable_mc_lb) - lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; - - if (lb_flags) - MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags); - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + mlx5e_tir_builder_build_self_lb_block(builder, enable_uc_lb, + enable_mc_lb); mutex_lock(&mdev->mlx5e_res.hw_objs.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.hw_objs.td.tirs_list, list) { - tirn = tir->tirn; - err = mlx5_core_modify_tir(mdev, tirn, in); - if (err) + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5_core_err(mdev, + "modify tir(0x%x) enable_lb uc(%d) mc(%d) failed, %d\n", + mlx5e_tir_get_tirn(tir), + enable_uc_lb, enable_mc_lb, err); break; + } } mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); - kvfree(in); - if (err) - netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); + mlx5e_tir_builder_free(builder); return err; } + +int mlx5e_refresh_tirs(struct mlx5_core_dev *mdev, bool enable_uc_lb, + bool enable_mc_lb) +{ + if (MLX5_CAP_GEN(mdev, tis_tir_td_order)) + return 0; /* refresh not needed */ + + return mlx5e_modify_tirs_lb(mdev, enable_uc_lb, enable_mc_lb); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index cf8f14ce4cd5..fddf7c207f8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -31,14 +31,15 @@ */ #include <linux/device.h> #include <linux/netdevice.h> +#include <linux/units.h> #include "en.h" #include "en/port.h" #include "en/port_buffer.h" #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ -#define MLX5E_100MB (100000) -#define MLX5E_1GB (1000000) +#define MLX5E_100MB_TO_KB (100 * MEGA / KILO) +#define MLX5E_1GB_TO_KB (GIGA / KILO) #define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_DOWN 0 @@ -572,10 +573,10 @@ static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, for (i = 0; i <= mlx5_max_tc(mdev); i++) { switch (max_bw_unit[i]) { case MLX5_100_MBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB_TO_KB; break; case MLX5_GBPS_UNIT: - maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB_TO_KB; break; case MLX5_BW_NO_LIMIT: break; @@ -595,8 +596,8 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; - __u64 upper_limit_mbps; - __u64 upper_limit_gbps; + u64 upper_limit_100mbps; + u64 upper_limit_gbps; int i; struct { int scale; @@ -614,22 +615,22 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, memset(max_bw_value, 0, sizeof(max_bw_value)); memset(max_bw_unit, 0, sizeof(max_bw_unit)); - upper_limit_mbps = 255 * MLX5E_100MB; - upper_limit_gbps = 255 * MLX5E_1GB; + upper_limit_100mbps = U8_MAX * MLX5E_100MB_TO_KB; + upper_limit_gbps = U8_MAX * MLX5E_1GB_TO_KB; for (i = 0; i <= mlx5_max_tc(mdev); i++) { if (!maxrate->tc_maxrate[i]) { max_bw_unit[i] = MLX5_BW_NO_LIMIT; continue; } - if (maxrate->tc_maxrate[i] <= upper_limit_mbps) { + if (maxrate->tc_maxrate[i] <= upper_limit_100mbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_100MB); + MLX5E_100MB_TO_KB); max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; max_bw_unit[i] = MLX5_100_MBPS_UNIT; } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) { max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], - MLX5E_1GB); + MLX5E_1GB_TO_KB); max_bw_unit[i] = MLX5_GBPS_UNIT; } else { netdev_err(netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 893e1380a7c9..d3fef1e7e2f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -261,6 +261,11 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT, ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext, + ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT, + ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT, + ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT); } static void mlx5e_ethtool_get_speed_arr(bool ext, @@ -2027,7 +2032,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, int size_read = 0; u8 data[4] = {0}; - size_read = mlx5_query_module_eeprom(dev, 0, 2, data); + size_read = mlx5_query_module_eeprom(dev, 0, 2, data, NULL); if (size_read < 2) return -EIO; @@ -2069,6 +2074,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; int offset = ee->offset; int size_read; + u8 status = 0; int i = 0; if (!ee->len) @@ -2078,15 +2084,15 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, while (i < ee->len) { size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i, - data + i); - + data + i, &status); if (!size_read) /* Done reading */ return 0; if (size_read < 0) { - netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", - __func__, size_read); + netdev_err(netdev, + "%s: mlx5_query_eeprom failed:0x%x, status %u\n", + __func__, size_read, status); return size_read; } @@ -2106,6 +2112,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, struct mlx5_core_dev *mdev = priv->mdev; u8 *data = page_data->data; int size_read; + u8 status = 0; int i = 0; if (!page_data->length) @@ -2119,7 +2126,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, query.page = page_data->page; while (i < page_data->length) { query.size = page_data->length - i; - size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i); + size_read = mlx5_query_module_eeprom_by_page(mdev, &query, + data + i, &status); /* Done reading, return how many bytes was read */ if (!size_read) @@ -2128,8 +2136,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, if (size_read < 0) { NL_SET_ERR_MSG_FMT_MOD( extack, - "Query module eeprom by page failed, read %u bytes, err %d", - i, size_read); + "Query module eeprom by page failed, read %u bytes, err %d, status %u", + i, size_read, status); return size_read; } @@ -2271,7 +2279,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; + rx_filter = priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE; err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); if (err) return err; @@ -2286,7 +2294,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params new_params; - int err; if (enable) { /* Checking the regular RQ here; mlx5e_validate_xsk_param called @@ -2307,14 +2314,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); mlx5e_set_rq_type(mdev, &new_params); - err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); - if (err) - return err; - - /* update XDP supported features */ - mlx5e_set_xdp_feature(netdev); - - return 0; + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -2492,21 +2492,18 @@ static int mlx5e_set_rxfh_fields(struct net_device *dev, return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack); } +static u32 mlx5e_get_rx_ring_count(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return priv->channels.params.num_channels; +} + static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev); - /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part - * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, - * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc - * is compiled out via CONFIG_MLX5_EN_RXNFC=n. - */ - if (info->cmd == ETHTOOL_GRXRINGS) { - info->data = priv->channels.params.num_channels; - return 0; - } - return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); } @@ -2766,6 +2763,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .remove_rxfh_context = mlx5e_remove_rxfh_context, .get_rxnfc = mlx5e_get_rxnfc, .set_rxnfc = mlx5e_set_rxnfc, + .get_rx_ring_count = mlx5e_get_rx_ring_count, .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pause_stats = mlx5e_get_pause_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 79916f1abd14..63bdef5b4ba5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -704,7 +704,7 @@ static int validate_flow(struct mlx5e_priv *priv, num_tuples += ret; break; default: - return -ENOTSUPP; + return -EOPNOTSUPP; } if ((fs->flow_type & FLOW_EXT)) { ret = validate_vlan(fs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5e17eae81f4b..6168f0814414 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -735,7 +735,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->pdev = c->pdev; rq->netdev = c->netdev; rq->priv = c->priv; - rq->tstamp = c->tstamp; + rq->hwtstamp_config = &c->priv->hwtstamp_config; rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; @@ -2612,7 +2612,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_icosq_cq; - if (netdev_ops->ndo_xdp_xmit) { + if (netdev_ops->ndo_xdp_xmit && c->xdp) { c->xdpsq = mlx5e_open_xdpredirect_sq(c, params, cparam, &ccp); if (IS_ERR(c->xdpsq)) { err = PTR_ERR(c->xdpsq); @@ -2816,7 +2816,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->priv = priv; c->mdev = mdev; - c->tstamp = &priv->tstamp; c->ix = ix; c->vec_ix = vec_ix; c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix); @@ -3366,16 +3365,17 @@ static int mlx5e_switch_priv_params(struct mlx5e_priv *priv, } } + mlx5e_set_xdp_feature(priv); return 0; } static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *old_chs, struct mlx5e_channels *new_chs, mlx5e_fp_preactivate preactivate, void *context) { struct net_device *netdev = priv->netdev; - struct mlx5e_channels old_chs; int carrier_ok; int err = 0; @@ -3384,7 +3384,6 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, mlx5e_deactivate_priv_channels(priv); - old_chs = priv->channels; priv->channels = *new_chs; /* New channels are ready to roll, call the preactivate hook if needed @@ -3393,12 +3392,14 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, if (preactivate) { err = preactivate(priv, context); if (err) { - priv->channels = old_chs; + priv->channels = *old_chs; goto out; } } - mlx5e_close_channels(&old_chs); + mlx5e_set_xdp_feature(priv); + if (!MLX5_CAP_GEN(priv->mdev, tis_tir_td_order)) + mlx5e_close_channels(old_chs); priv->profile->update_rx(priv); mlx5e_selq_apply(&priv->selq); @@ -3417,16 +3418,20 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, mlx5e_fp_preactivate preactivate, void *context, bool reset) { - struct mlx5e_channels *new_chs; + struct mlx5e_channels *old_chs, *new_chs; int err; reset &= test_bit(MLX5E_STATE_OPENED, &priv->state); if (!reset) return mlx5e_switch_priv_params(priv, params, preactivate, context); + old_chs = kzalloc(sizeof(*old_chs), GFP_KERNEL); new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL); - if (!new_chs) - return -ENOMEM; + if (!old_chs || !new_chs) { + err = -ENOMEM; + goto err_free_chs; + } + new_chs->params = *params; mlx5e_selq_prepare_params(&priv->selq, &new_chs->params); @@ -3435,11 +3440,18 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, if (err) goto err_cancel_selq; - err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + *old_chs = priv->channels; + + err = mlx5e_switch_priv_channels(priv, old_chs, new_chs, + preactivate, context); if (err) goto err_close; + if (MLX5_CAP_GEN(priv->mdev, tis_tir_td_order)) + mlx5e_close_channels(old_chs); + kfree(new_chs); + kfree(old_chs); return 0; err_close: @@ -3447,7 +3459,9 @@ err_close: err_cancel_selq: mlx5e_selq_cancel(&priv->selq); +err_free_chs: kfree(new_chs); + kfree(old_chs); return err; } @@ -3458,8 +3472,8 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv) { - priv->tstamp.tx_type = HWTSTAMP_TX_OFF; - priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; } static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, @@ -4012,6 +4026,11 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->rx_bytes += rq_stats->bytes; s->multicast += rq_stats->mcast_packets; } + +#ifdef CONFIG_MLX5_EN_PSP + if (priv->psp) + s->tx_dropped += atomic_read(&priv->psp->tx_drop); +#endif } void @@ -4392,23 +4411,22 @@ static int mlx5e_handle_feature(struct net_device *netdev, return 0; } -void mlx5e_set_xdp_feature(struct net_device *netdev) +void mlx5e_set_xdp_feature(struct mlx5e_priv *priv) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_params *params = &priv->channels.params; - xdp_features_t val; + struct net_device *netdev = priv->netdev; + xdp_features_t val = 0; - if (!netdev->netdev_ops->ndo_bpf || - params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { - xdp_set_features_flag_locked(netdev, 0); - return; - } + if (netdev->netdev_ops->ndo_bpf && + params->packet_merge.type == MLX5E_PACKET_MERGE_NONE) + val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; + + if (netdev->netdev_ops->ndo_xdp_xmit && params->xdp_prog) + val |= NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG; - val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_XSK_ZEROCOPY | - NETDEV_XDP_ACT_RX_SG | - NETDEV_XDP_ACT_NDO_XMIT | - NETDEV_XDP_ACT_NDO_XMIT_SG; xdp_set_features_flag_locked(netdev, val); } @@ -4444,9 +4462,6 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) return -EINVAL; } - /* update XDP supported features */ - mlx5e_set_xdp_feature(netdev); - return 0; } @@ -4754,22 +4769,23 @@ static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx) &new_params.ptp_rx, true); } -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) +int mlx5e_hwtstamp_set(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; bool rx_cqe_compress_def; bool ptp_rx; int err; if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || - (mlx5_clock_get_ptp_index(priv->mdev) == -1)) + (mlx5_clock_get_ptp_index(priv->mdev) == -1)) { + NL_SET_ERR_MSG_MOD(extack, + "Timestamps are not supported on this device"); return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + } /* TX HW timestamp */ - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; @@ -4781,7 +4797,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def; /* RX HW timestamp */ - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: ptp_rx = false; break; @@ -4800,7 +4816,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; /* ptp_rx is set if both HW TS is set and CQE * compression is set */ @@ -4813,47 +4829,50 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) err = mlx5e_hwstamp_config_no_ptp_rx(priv, - config.rx_filter != HWTSTAMP_FILTER_NONE); + config->rx_filter != HWTSTAMP_FILTER_NONE); else err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx); if (err) goto err_unlock; - memcpy(&priv->tstamp, &config, sizeof(config)); + priv->hwtstamp_config = *config; mutex_unlock(&priv->state_lock); /* might need to fix some features */ netdev_update_features(priv->netdev); - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + return 0; err_unlock: mutex_unlock(&priv->state_lock); return err; } -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) +static int mlx5e_hwtstamp_set_ndo(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config *cfg = &priv->tstamp; + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_hwtstamp_set(priv, config, extack); +} +int mlx5e_hwtstamp_get(struct mlx5e_priv *priv, + struct kernel_hwtstamp_config *config) +{ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) return -EOPNOTSUPP; - return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; + *config = priv->hwtstamp_config; + + return 0; } -static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int mlx5e_hwtstamp_get_ndo(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct mlx5e_priv *priv = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx5e_hwstamp_set(priv, ifr); - case SIOCGHWTSTAMP: - return mlx5e_hwstamp_get(priv, ifr); - default: - return -EOPNOTSUPP; - } + return mlx5e_hwtstamp_get(priv, config); } #ifdef CONFIG_MLX5_ESWITCH @@ -5294,13 +5313,14 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_nic_mtu, - .ndo_eth_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, .ndo_xdp_xmit = mlx5e_xdp_xmit, .ndo_xsk_wakeup = mlx5e_xsk_wakeup, + .ndo_hwtstamp_get = mlx5e_hwtstamp_get_ndo, + .ndo_hwtstamp_set = mlx5e_hwtstamp_set_ndo, #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -5837,7 +5857,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->netmem_tx = true; netif_set_tso_max_size(netdev, GSO_MAX_SIZE); - mlx5e_set_xdp_feature(netdev); + mlx5e_set_xdp_feature(priv); mlx5e_set_netdev_dev_addr(netdev); mlx5e_macsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv); @@ -5935,7 +5955,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_psp_register(priv); /* update XDP supported features */ - mlx5e_set_xdp_feature(netdev); + mlx5e_set_xdp_feature(priv); if (take_rtnl) rtnl_unlock(); @@ -6145,7 +6165,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) static int mlx5e_update_nic_rx(struct mlx5e_priv *priv) { - return mlx5e_refresh_tirs(priv, false, false); + return mlx5e_refresh_tirs(priv->mdev, false, false); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0335ca8277ef..ee9595109649 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -867,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) if (take_rtnl) rtnl_lock(); /* update XDP supported features */ - mlx5e_set_xdp_feature(netdev); + mlx5e_set_xdp_feature(priv); if (take_rtnl) rtnl_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 687cf123211d..1f6930c77437 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1604,7 +1604,7 @@ static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, stats->lro_bytes += cqe_bcnt; } - if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config))) skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); @@ -2656,7 +2656,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct hwtstamp_config *tstamp; struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; @@ -2680,7 +2679,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, } priv = mlx5i_epriv(netdev); - tstamp = &priv->tstamp; stats = &priv->channel_stats[rq->ix]->rq; flags_rqpn = be32_to_cpu(cqe->flags_rqpn); @@ -2716,7 +2714,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, stats->csum_none++; } - if (unlikely(mlx5e_rx_hw_stamp(tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config))) skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time, rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 2f7a543feca6..fcad464bc4d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -214,7 +214,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, return err; } - err = mlx5e_refresh_tirs(priv, true, false); + err = mlx5e_modify_tirs_lb(priv->mdev, true, false); if (err) goto out; @@ -243,7 +243,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, mlx5_nic_vport_update_local_lb(priv->mdev, false); dev_remove_pack(&lbtp->pt); - mlx5e_refresh_tirs(priv, false, false); + mlx5e_modify_tirs_lb(priv->mdev, false, false); } static int mlx5e_cond_loopback(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 00c2763e57ca..a8773b2342c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3614,15 +3614,11 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *fmdev, *pmdev; - u64 fsystem_guid, psystem_guid; fmdev = priv->mdev; pmdev = peer_priv->mdev; - fsystem_guid = mlx5_query_nic_system_image_guid(fmdev); - psystem_guid = mlx5_query_nic_system_image_guid(pmdev); - - return (fsystem_guid == psystem_guid); + return mlx5_same_hw_devs(fmdev, pmdev); } static int @@ -5237,10 +5233,11 @@ static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_core_dev *dev = priv->mdev; struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; - u64 mapping_id; + u8 id_len; int err; mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); @@ -5256,11 +5253,13 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); - mapping_id = mlx5_query_nic_system_image_guid(dev); + mlx5_query_nic_sw_system_image_guid(dev, mapping_id, &id_len); - chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + chains_mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_CHAIN, sizeof(struct mlx5_mapped_obj), - MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + MLX5E_TC_TABLE_CHAIN_TAG_MASK, + true); if (IS_ERR(chains_mapping)) { err = PTR_ERR(chains_mapping); @@ -5391,14 +5390,15 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) { const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mlx5_devcom_match_attr attr = {}; struct netdev_phys_item_id ppid; struct mlx5e_rep_priv *rpriv; struct mapping_ctx *mapping; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; - u64 mapping_id; int err = 0; + u8 id_len; rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); priv = netdev_priv(rpriv->netdev); @@ -5416,9 +5416,9 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); - mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len); - mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, + mapping = mapping_create_for_id(mapping_id, id_len, MAPPING_TYPE_TUNNEL, sizeof(struct tunnel_match_key), TUNNEL_INFO_BITS_MASK, true); @@ -5431,8 +5431,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) /* Two last values are reserved for stack devices slow path table mark * and bridge ingress push mark. */ - mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, - sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true); + mapping = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_TUNNEL_ENC_OPTS, + sz_enc_opts, ENC_OPTS_BITS_MASK - 2, + true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5453,7 +5455,7 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) err = netif_get_port_parent_id(priv->netdev, &ppid, false); if (!err) { - memcpy(&attr.key.val, &ppid.id, sizeof(attr.key.val)); + memcpy(&attr.key.buf, &ppid.id, ppid.id_len); attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS; attr.net = mlx5_core_net(esw->dev); mlx5_esw_offloads_devcom_init(esw, &attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2702b3885f06..14884b9ea7f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -755,7 +755,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); if (sq->ptpsq) { mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, - hwts.hwtstamp, sq->ptpsq->cq_stats); + hwts.hwtstamp, sq->ptpsq); } else { skb_tstamp_tx(skb, &hwts); sq->stats->timestamps++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c index 0091ba697bae..250af09b5af2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c @@ -4,13 +4,8 @@ #include "fs_core.h" #include "eswitch.h" -enum { - MLX5_ADJ_VPORT_DISCONNECT = 0x0, - MLX5_ADJ_VPORT_CONNECT = 0x1, -}; - -static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, - u16 vport, bool connect) +int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport, + bool connect) { u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {}; @@ -24,7 +19,7 @@ static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1); MLX5_SET(modify_vport_state_in, in, ingress_connect, connect); MLX5_SET(modify_vport_state_in, in, egress_connect, connect); - + MLX5_SET(modify_vport_state_in, in, admin_state, connect); return mlx5_cmd_exec_in(dev, modify_vport_state, in); } @@ -96,7 +91,6 @@ static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id, if (err) goto acl_ns_remove; - mlx5_esw_adj_vport_modify(esw->dev, vport_num, MLX5_ADJ_VPORT_CONNECT); return 0; acl_ns_remove: @@ -117,8 +111,7 @@ static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw, esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n", vport_num, vport->vhca_id); - mlx5_esw_adj_vport_modify(esw->dev, vport_num, - MLX5_ADJ_VPORT_DISCONNECT); + mlx5_esw_offloads_rep_remove(esw, vport); mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering, vport->index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index cf88a106d80d..89a58dee50b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -7,11 +7,7 @@ static void mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { - u64 parent_id; - - parent_id = mlx5_query_nic_system_image_guid(dev); - ppid->id_len = sizeof(parent_id); - memcpy(ppid->id, &parent_id, sizeof(parent_id)); + mlx5_query_nic_sw_system_image_guid(dev, ppid->id, &ppid->id_len); } static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 56e6f54b1e2e..4278bcb04c72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -341,13 +341,6 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, if (max_guarantee) return max_t(u32, max_guarantee / fw_max_bw_share, 1); - /* If nodes max min_rate divider is 0 but their parent has bw_share - * configured, then set bw_share for nodes to minimal value. - */ - - if (parent && parent->bw_share) - return 1; - /* If the node nodes has min_rate configured, a divider of 0 sets all * nodes' bw_share to 0, effectively disabling min guarantees. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index e2ffb87b94cb..4b7a1ce7f406 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -875,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) vport_num, 1, vport->info.link_state); - /* Host PF has its own mac/guid. */ - if (vport_num) { - mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, - vport->info.mac); - mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, - vport->info.node_guid); - } + mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, + vport->info.mac); + mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true, + &vport->info.node_guid); flags = (vport->info.vlan || vport->info.qos) ? SET_VLAN_STRIP | SET_VLAN_INSERT : 0; @@ -947,12 +944,6 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, goto err_vhca_mapping; } - /* External controller host PF has factory programmed MAC. - * Read it from the device. - */ - if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) - mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac); - esw_vport_change_handle_locked(vport); esw->enabled_vports++; @@ -1483,7 +1474,7 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) info.new_mode = mode; - blocking_notifier_call_chain(&esw->n_head, 0, &info); + blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info); } static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev) @@ -1978,7 +1969,8 @@ static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id, } static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); @@ -2059,7 +2051,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; else esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; - BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); esw_info(dev, "Total vports %d, per vport: max uc(%d) max mc(%d)\n", @@ -2235,6 +2226,9 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->vf = vport - 1; mutex_lock(&esw->state_lock); + + mlx5_query_nic_vport_mac_address(esw->dev, vport, true, + evport->info.mac); ether_addr_copy(ivi->mac, evport->info.mac); ivi->linkstate = evport->info.link_state; ivi->vlan = evport->info.vlan; @@ -2385,14 +2379,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); } -int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) +int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev, + struct notifier_block *nb) { - return blocking_notifier_chain_register(&esw->n_head, nb); + return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb); } -void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) +void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev, + struct notifier_block *nb) { - blocking_notifier_chain_unregister(&esw->n_head, nb); + blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb); } /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 16eb99aba2a7..ad1073f7b79f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -264,6 +264,9 @@ struct mlx5_eswitch_fdb { struct offloads_fdb { struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *drop_root; + struct mlx5_flow_handle *drop_root_rule; + struct mlx5_fc *drop_root_fc; struct mlx5_flow_table *tc_miss_table; struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; @@ -392,6 +395,7 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; u32 last_vport_idx; int mode; + bool offloads_inactive; u16 manager_vport; u16 first_host_vport; u8 num_peers; @@ -399,7 +403,6 @@ struct mlx5_eswitch { struct { u32 large_group_num; } params; - struct blocking_notifier_head n_head; struct xarray paired; struct mlx5_devcom_comp_dev *devcom; u16 enabled_ipsec_vf_count; @@ -634,6 +637,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw); void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw); +int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev, u16 vport, + bool connect); #define MLX5_DEBUG_ESWITCH_MASK BIT(3) @@ -858,8 +863,10 @@ struct mlx5_esw_event_info { u16 new_mode; }; -int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); -void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); +int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev, + struct notifier_block *n); +void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev, + struct notifier_block *n); bool mlx5_esw_hold(struct mlx5_core_dev *dev); void mlx5_esw_release(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 44a142a041b2..8de6c7f6c294 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1577,6 +1577,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) attr.max_grp_num = esw->params.large_group_num; attr.default_ft = miss_fdb; attr.mapping = esw->offloads.reg_c0_obj_pool; + attr.fs_base_prio = FDB_BYPASS_PATH; chains = mlx5_chains_create(dev, &attr); if (IS_ERR(chains)) { @@ -2355,6 +2356,131 @@ static void esw_mode_change(struct mlx5_eswitch *esw, u16 mode) mlx5_devcom_comp_unlock(esw->dev->priv.hca_devcom_comp); } +static void mlx5_esw_fdb_drop_destroy(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.offloads.drop_root) + return; + + esw_debug(esw->dev, "Destroying FDB drop root table %#x fc %#x\n", + esw->fdb_table.offloads.drop_root->id, + esw->fdb_table.offloads.drop_root_fc->id); + mlx5_del_flow_rules(esw->fdb_table.offloads.drop_root_rule); + /* Don't free flow counter here, can be reused on a later activation */ + mlx5_destroy_flow_table(esw->fdb_table.offloads.drop_root); + esw->fdb_table.offloads.drop_root_rule = NULL; + esw->fdb_table.offloads.drop_root = NULL; +} + +static int mlx5_esw_fdb_drop_create(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_destination drop_fc_dst = {}; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_table *table; + int err = 0, dst_num = 0; + + if (esw->fdb_table.offloads.drop_root) + return 0; + + root_ns = esw->fdb_table.offloads.ns; + + ft_attr.prio = FDB_DROP_ROOT; + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + table = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(table)) { + esw_warn(dev, "Failed to create fdb drop root table, err %pe\n", + table); + return PTR_ERR(table); + } + + /* Drop FC reusable, create once on first deactivation of FDB */ + if (!esw->fdb_table.offloads.drop_root_fc) { + struct mlx5_fc *counter = mlx5_fc_create(dev, 0); + + err = PTR_ERR_OR_ZERO(counter); + if (err) + esw_warn(esw->dev, "create fdb drop fc err %d\n", err); + else + esw->fdb_table.offloads.drop_root_fc = counter; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + if (esw->fdb_table.offloads.drop_root_fc) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_fc_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_fc_dst.counter = esw->fdb_table.offloads.drop_root_fc; + dst = &drop_fc_dst; + dst_num++; + } + + flow_rule = mlx5_add_flow_rules(table, NULL, &flow_act, dst, dst_num); + err = PTR_ERR_OR_ZERO(flow_rule); + if (err) { + esw_warn(esw->dev, + "fs offloads: Failed to add vport rx drop rule err %d\n", + err); + goto err_flow_rule; + } + + esw->fdb_table.offloads.drop_root = table; + esw->fdb_table.offloads.drop_root_rule = flow_rule; + esw_debug(esw->dev, "Created FDB drop root table %#x fc %#x\n", + table->id, dst ? dst->counter->id : 0); + return 0; + +err_flow_rule: + /* no need to free drop fc, esw_offloads_steering_cleanup will do it */ + mlx5_destroy_flow_table(table); + return err; +} + +static void mlx5_esw_fdb_active(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + + mlx5_esw_fdb_drop_destroy(esw); + mlx5_mpfs_enable(esw->dev); + + mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { + if (!vport->adjacent) + continue; + esw_debug(esw->dev, "Connecting vport %d to eswitch\n", + vport->vport); + mlx5_esw_adj_vport_modify(esw->dev, vport->vport, true); + } + + esw->offloads_inactive = false; + esw_warn(esw->dev, "MPFS/FDB active\n"); +} + +static void mlx5_esw_fdb_inactive(struct mlx5_eswitch *esw) +{ + struct mlx5_vport *vport; + unsigned long i; + + mlx5_mpfs_disable(esw->dev); + mlx5_esw_fdb_drop_create(esw); + + mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) { + if (!vport->adjacent) + continue; + esw_debug(esw->dev, "Disconnecting vport %u from eswitch\n", + vport->vport); + + mlx5_esw_adj_vport_modify(esw->dev, vport->vport, false); + } + + esw->offloads_inactive = true; + esw_warn(esw->dev, "MPFS/FDB inactive\n"); +} + static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { @@ -2492,7 +2618,8 @@ done: } static int esw_port_metadata_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); @@ -3438,6 +3565,10 @@ create_indir_err: static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) { + mlx5_esw_fdb_drop_destroy(esw); + if (esw->fdb_table.offloads.drop_root_fc) + mlx5_fc_destroy(esw->dev, esw->fdb_table.offloads.drop_root_fc); + esw->fdb_table.offloads.drop_root_fc = NULL; esw_destroy_vport_rx_drop_rule(esw); esw_destroy_vport_rx_drop_group(esw); esw_destroy_vport_rx_group(esw); @@ -3556,10 +3687,11 @@ bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 cont int esw_offloads_enable(struct mlx5_eswitch *esw) { + u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES]; struct mapping_ctx *reg_c0_obj_pool; struct mlx5_vport *vport; unsigned long i; - u64 mapping_id; + u8 id_len; int err; mutex_init(&esw->offloads.termtbl_mutex); @@ -3581,9 +3713,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; - mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len); - reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len, + MAPPING_TYPE_CHAIN, sizeof(struct mlx5_mapped_obj), ESW_REG_C0_USER_DATA_METADATA_MASK, true); @@ -3598,6 +3731,11 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_steering_init; + if (esw->offloads_inactive) + mlx5_esw_fdb_inactive(esw); + else + mlx5_esw_fdb_active(esw); + /* Representor will control the vport link state */ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; @@ -3664,6 +3802,9 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) esw_offloads_metadata_uninit(esw); mlx5_rdma_disable_roce(esw->dev); mlx5_esw_adjacent_vhcas_cleanup(esw); + /* must be done after vhcas cleanup to avoid adjacent vports connect */ + if (esw->offloads_inactive) + mlx5_esw_fdb_active(esw); /* legacy mode always active */ mutex_destroy(&esw->offloads.termtbl_mutex); } @@ -3674,6 +3815,7 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) *mlx5_mode = MLX5_ESWITCH_LEGACY; break; case DEVLINK_ESWITCH_MODE_SWITCHDEV: + case DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE: *mlx5_mode = MLX5_ESWITCH_OFFLOADS; break; default: @@ -3683,14 +3825,17 @@ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) return 0; } -static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) +static int esw_mode_to_devlink(struct mlx5_eswitch *esw, u16 *mode) { - switch (mlx5_mode) { + switch (esw->mode) { case MLX5_ESWITCH_LEGACY: *mode = DEVLINK_ESWITCH_MODE_LEGACY; break; case MLX5_ESWITCH_OFFLOADS: - *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + if (esw->offloads_inactive) + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE; + else + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; break; default: return -EINVAL; @@ -3796,6 +3941,45 @@ static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink, return ret; } +/* Returns true when only changing between active and inactive switchdev mode */ +static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw, + u16 devlink_mode) +{ + /* current mode is not switchdev */ + if (esw->mode != MLX5_ESWITCH_OFFLOADS) + return false; + + /* new mode is not switchdev */ + if (devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV && + devlink_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE) + return false; + + /* already inactive: no change in current state */ + if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE && + esw->offloads_inactive) + return false; + + /* already active: no change in current state */ + if (devlink_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && + !esw->offloads_inactive) + return false; + + down_write(&esw->mode_lock); + esw->offloads_inactive = !esw->offloads_inactive; + esw->eswitch_operation_in_progress = true; + up_write(&esw->mode_lock); + + if (esw->offloads_inactive) + mlx5_esw_fdb_inactive(esw); + else + mlx5_esw_fdb_active(esw); + + down_write(&esw->mode_lock); + esw->eswitch_operation_in_progress = false; + up_write(&esw->mode_lock); + return true; +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { @@ -3810,12 +3994,16 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) { + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && mlx5_get_sd(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured."); return -EPERM; } + /* Avoid try_lock, active/inactive mode change is not restricted */ + if (mlx5_devlink_switchdev_active_mode_change(esw, mode)) + return 0; + mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { @@ -3838,7 +4026,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, esw->eswitch_operation_in_progress = true; up_write(&esw->mode_lock); - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) { NL_SET_ERR_MSG_MOD(extack, "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); @@ -3846,25 +4034,27 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, goto skip; } - if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + if (mlx5_mode == MLX5_ESWITCH_LEGACY) esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY; mlx5_eswitch_disable_locked(esw); - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS) { if (mlx5_devlink_trap_get_num_active(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode while devlink traps are active"); err = -EOPNOTSUPP; goto skip; } + esw->offloads_inactive = + (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV_INACTIVE); err = esw_offloads_start(esw, extack); - } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) { + } else if (mlx5_mode == MLX5_ESWITCH_LEGACY) { err = esw_offloads_stop(esw, extack); } else { err = -EINVAL; } skip: - if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && err) + if (mlx5_mode == MLX5_ESWITCH_OFFLOADS && err) mlx5_devlink_netdev_netns_immutable_set(devlink, false); down_write(&esw->mode_lock); esw->eswitch_operation_in_progress = false; @@ -3883,7 +4073,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) if (IS_ERR(esw)) return PTR_ERR(esw); - return esw_mode_to_devlink(esw->mode, mode); + return esw_mode_to_devlink(esw, mode); } static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode, @@ -4302,6 +4492,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); mutex_lock(&esw->state_lock); + + mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true, + vport->info.mac); ether_addr_copy(hw_addr, vport->info.mac); *hw_addr_len = ETH_ALEN; mutex_unlock(&esw->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index e5c1012921d2..1ec61164e6b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -211,7 +211,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); if (!max_num_qps) { mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n"); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 1af76da8b132..ced747bef641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -239,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); MLX5_SET(set_flow_table_root_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(set_flow_table_root_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); err = mlx5_cmd_exec_in(dev, set_flow_table_root, in); if (!err && @@ -302,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); MLX5_SET(create_flow_table_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(create_flow_table_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, en_decap); @@ -360,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); MLX5_SET(destroy_flow_table_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(destroy_flow_table_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); err = mlx5_cmd_exec_in(dev, destroy_flow_table, in); if (!err) @@ -394,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); MLX5_SET(modify_flow_table_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(modify_flow_table_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); MLX5_SET(modify_flow_table_in, in, modify_field_select, MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); if (next_ft) { @@ -429,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); MLX5_SET(create_flow_group_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(create_flow_group_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); if (!err) fg->id = MLX5_GET(create_flow_group_out, out, @@ -451,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); MLX5_SET(destroy_flow_group_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(destroy_flow_group_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); return mlx5_cmd_exec_in(dev, destroy_flow_group, in); } @@ -559,6 +583,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id); + MLX5_SET(set_fte_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); @@ -788,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, MLX5_SET(delete_fte_in, in, vport_number, ft->vport); MLX5_SET(delete_fte_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); + MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id, + ft->esw_owner_vhca_id); + MLX5_SET(delete_fte_in, in, other_eswitch, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH)); return mlx5_cmd_exec_in(dev, delete_fte, in); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2db3ffb0a2b2..0a6031a64c6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -939,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f return fg; } -static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, - enum fs_flow_table_type table_type, - enum fs_flow_table_op_mod op_mod, - u32 flags) +static struct mlx5_flow_table * +alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport, + enum fs_flow_table_type table_type, + enum fs_flow_table_op_mod op_mod) { struct mlx5_flow_table *ft; int ret; @@ -957,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, return ERR_PTR(ret); } - ft->level = level; + ft->level = ft_attr->level; ft->node.type = FS_TYPE_FLOW_TABLE; ft->op_mod = op_mod; ft->type = table_type; ft->vport = vport; - ft->flags = flags; + ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id; + ft->flags = ft_attr->flags; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); @@ -1370,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa /* The level is related to the * priority level range. */ - ft = alloc_flow_table(ft_attr->level, - vport, - root->table_type, - op_mod, ft_attr->flags); + ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto unlock_root; @@ -3310,6 +3308,62 @@ err: return ret; } +static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns) +{ + struct fs_prio *iter_prio; + + fs_for_each_prio(iter_prio, ns) { + if (iter_prio->num_ft) + return false; + } + + return true; +} + +int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev, + struct mlx5_core_dev *new_dev, + enum fs_flow_table_type table_type) +{ + struct mlx5_flow_root_namespace **root; + int total_vports; + int i; + + switch (table_type) { + case FS_FT_RDMA_TRANSPORT_TX: + root = dev->priv.steering->rdma_transport_tx_root_ns; + total_vports = dev->priv.steering->rdma_transport_tx_vports; + break; + case FS_FT_RDMA_TRANSPORT_RX: + root = dev->priv.steering->rdma_transport_rx_root_ns; + total_vports = dev->priv.steering->rdma_transport_rx_vports; + break; + default: + WARN_ON_ONCE(true); + return -EINVAL; + } + + for (i = 0; i < total_vports; i++) { + mutex_lock(&root[i]->chain_lock); + if (!mlx5_fs_ns_is_empty(&root[i]->ns)) { + mutex_unlock(&root[i]->chain_lock); + goto err; + } + root[i]->dev = new_dev; + mutex_unlock(&root[i]->chain_lock); + } + return 0; +err: + while (i--) { + mutex_lock(&root[i]->chain_lock); + root[i]->dev = dev; + mutex_unlock(&root[i]->chain_lock); + } + /* If you hit this error try destroying all flow tables and try again */ + mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n"); + return -EINVAL; +} +EXPORT_SYMBOL(mlx5_fs_set_root_dev); + static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering) { struct mlx5_core_dev *dev = steering->dev; @@ -3520,6 +3574,11 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_DROP_ROOT, 1); + err = PTR_ERR_OR_ZERO(maj_prio); + if (err) + goto out_err; + err = create_fdb_bypass(steering); if (err) goto out_err; @@ -3774,7 +3833,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id, } static int mlx5_fs_mode_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 8458ce203dac..1c6591425260 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -103,24 +103,6 @@ enum fs_node_type { FS_TYPE_FLOW_DEST }; -enum fs_flow_table_type { - FS_FT_NIC_RX = 0x0, - FS_FT_NIC_TX = 0x1, - FS_FT_ESW_EGRESS_ACL = 0x2, - FS_FT_ESW_INGRESS_ACL = 0x3, - FS_FT_FDB = 0X4, - FS_FT_SNIFFER_RX = 0X5, - FS_FT_SNIFFER_TX = 0X6, - FS_FT_RDMA_RX = 0X7, - FS_FT_RDMA_TX = 0X8, - FS_FT_PORT_SEL = 0X9, - FS_FT_FDB_RX = 0xa, - FS_FT_FDB_TX = 0xb, - FS_FT_RDMA_TRANSPORT_RX = 0xd, - FS_FT_RDMA_TRANSPORT_TX = 0xe, - FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX, -}; - enum fs_flow_table_op_mod { FS_FT_OP_MOD_NORMAL, FS_FT_OP_MOD_LAG_DEMUX, @@ -205,6 +187,7 @@ struct mlx5_flow_table { }; u32 id; u16 vport; + u16 esw_owner_vhca_id; unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 89e399606877..2bceb42c98cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -73,7 +73,8 @@ static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u3 } static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_fw_reset *fw_reset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 4b3430ac3905..3b2f54ca30a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -266,21 +266,18 @@ static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return mlx5e_ethtool_set_rxnfc(priv, cmd); } +static u32 mlx5i_get_rx_ring_count(struct net_device *dev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + + return priv->channels.params.num_channels; +} + static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = mlx5i_epriv(dev); - /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part - * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, - * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc - * is compiled out via CONFIG_MLX5_EN_RXNFC=n. - */ - if (info->cmd == ETHTOOL_GRXRINGS) { - info->data = priv->channels.params.num_channels; - return 0; - } - return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); } @@ -304,6 +301,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .set_rxfh_fields = mlx5i_set_rxfh_fields, .get_rxnfc = mlx5i_get_rxnfc, .set_rxnfc = mlx5i_set_rxnfc, + .get_rx_ring_count = mlx5i_get_rx_ring_count, .get_link_ksettings = mlx5i_get_link_ksettings, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 79ae3a51a4b3..0a6003fe60e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,23 @@ static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); +int mlx5i_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(dev); + + return mlx5e_hwtstamp_set(epriv, config, extack); +} + +int mlx5i_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(dev); + + return mlx5e_hwtstamp_get(epriv, config); +} + static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, @@ -52,7 +69,8 @@ static const struct net_device_ops mlx5i_netdev_ops = { .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, - .ndo_eth_ioctl = mlx5i_ioctl, + .ndo_hwtstamp_get = mlx5i_hwtstamp_get, + .ndo_hwtstamp_set = mlx5i_hwtstamp_set, }; /* IPoIB mlx5 netdev profile */ @@ -316,7 +334,7 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) int mlx5i_update_nic_rx(struct mlx5e_priv *priv) { - return mlx5e_refresh_tirs(priv, true, true); + return mlx5e_refresh_tirs(priv->mdev, true, true); } int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) @@ -409,6 +427,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + enum mlx5e_rx_res_features features; int err; priv->fs = mlx5e_fs_init(priv->profile, mdev, @@ -427,7 +446,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn, + features = MLX5E_RX_RES_FEATURE_SELF_LB_BLOCK; + priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, + priv->drop_rq.rqn, &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (IS_ERR(priv->rx_res)) { @@ -557,20 +578,6 @@ int mlx5i_dev_init(struct net_device *dev) return 0; } -int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct mlx5e_priv *priv = mlx5i_epriv(dev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return mlx5e_hwstamp_set(priv, ifr); - case SIOCGHWTSTAMP: - return mlx5e_hwstamp_get(priv, ifr); - default: - return -EOPNOTSUPP; - } -} - void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 2ab6437a1c49..d67d5a72bb41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -88,7 +88,11 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); /* Shared ndo functions */ int mlx5i_dev_init(struct net_device *dev); void mlx5i_dev_cleanup(struct net_device *dev); -int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int mlx5i_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int mlx5i_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config); /* Parent profile functions */ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 028a76944d82..04444dad3a0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -140,7 +140,6 @@ static int mlx5i_pkey_close(struct net_device *netdev); static int mlx5i_pkey_dev_init(struct net_device *dev); static void mlx5i_pkey_dev_cleanup(struct net_device *netdev); static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu); -static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, @@ -149,7 +148,8 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, - .ndo_eth_ioctl = mlx5i_pkey_ioctl, + .ndo_hwtstamp_get = mlx5i_hwtstamp_get, + .ndo_hwtstamp_set = mlx5i_hwtstamp_set, }; /* Child NDOs */ @@ -184,11 +184,6 @@ static int mlx5i_pkey_dev_init(struct net_device *dev) return mlx5i_dev_init(dev); } -static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - return mlx5i_ioctl(dev, ifr, cmd); -} - static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) { mlx5i_parent_put(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 3db0387bf6dc..1ac933cd8f02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1418,10 +1418,12 @@ static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev) static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev) { struct mlx5_devcom_match_attr attr = { - .key.val = mlx5_query_nic_system_image_guid(dev), .flags = MLX5_DEVCOM_MATCH_FLAGS_NS, .net = mlx5_core_net(dev), }; + u8 len __always_unused; + + mlx5_query_nic_sw_system_image_guid(dev, attr.key.buf, &len); /* This component is use to sync adding core_dev to lag_dev and to sync * changes of mlx5_adev_devices between LAG layer and other layers. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 29e7fa09c32c..0ba0ef8bae42 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -1432,15 +1432,17 @@ static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared) return 0; } -static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key) +static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, + u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE]) { struct mlx5_core_dev *peer_dev, *next = NULL; - struct mlx5_devcom_match_attr attr = { - .key.val = key, - }; + struct mlx5_devcom_match_attr attr = {}; struct mlx5_devcom_comp_dev *compd; struct mlx5_devcom_comp_dev *pos; + BUILD_BUG_ON(MLX5_RT_CLOCK_IDENTITY_SIZE > MLX5_DEVCOM_MATCH_KEY_MAX); + memcpy(attr.key.buf, identity, MLX5_RT_CLOCK_IDENTITY_SIZE); + compd = mlx5_devcom_register_component(mdev->priv.devc, MLX5_DEVCOM_SHARED_CLOCK, &attr, NULL, mdev); @@ -1594,7 +1596,6 @@ int mlx5_init_clock(struct mlx5_core_dev *mdev) { u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE]; struct mlx5_clock_dev_state *clock_state; - u64 key; int err; if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { @@ -1610,12 +1611,10 @@ int mlx5_init_clock(struct mlx5_core_dev *mdev) mdev->clock_state = clock_state; if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) { - if (mlx5_clock_identity_get(mdev, identity)) { + if (mlx5_clock_identity_get(mdev, identity)) mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n"); - } else { - memcpy(&key, &identity, sizeof(key)); - mlx5_shared_clock_register(mdev, key); - } + else + mlx5_shared_clock_register(mdev, identity); } if (!mdev->clock) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index c18a652c0faa..aff3aed62c74 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -54,7 +54,6 @@ struct mlx5_timer { struct mlx5_clock { seqlock_t lock; - struct hwtstamp_config hwtstamp_config; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 609c85f47917..91e5ae529d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -10,8 +10,10 @@ enum mlx5_devom_match_flags { MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0), }; +#define MLX5_DEVCOM_MATCH_KEY_MAX 32 union mlx5_devcom_match_key { u64 val; + u8 buf[MLX5_DEVCOM_MATCH_KEY_MAX]; }; struct mlx5_devcom_match_attr { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 4450091e181a..4a88a42ae4f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -65,13 +65,14 @@ static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) /* UC L2 table hash node */ struct l2table_node { struct l2addr_node node; - u32 index; /* index in HW l2 table */ + int index; /* index in HW l2 table */ int ref_count; }; struct mlx5_mpfs { struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE]; struct mutex lock; /* Synchronize l2 table access */ + bool enabled; u32 size; unsigned long *bitmap; }; @@ -114,6 +115,8 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) return -ENOMEM; } + mpfs->enabled = true; + dev->priv.mpfs = mpfs; return 0; } @@ -135,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) struct mlx5_mpfs *mpfs = dev->priv.mpfs; struct l2table_node *l2addr; int err = 0; - u32 index; + int index; if (!mpfs) return 0; @@ -148,30 +151,34 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) goto out; } - err = alloc_l2table_index(mpfs, &index); - if (err) - goto out; - l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL); if (!l2addr) { err = -ENOMEM; - goto hash_add_err; + goto out; } - err = set_l2table_entry_cmd(dev, index, mac); - if (err) - goto set_table_entry_err; + index = -1; + + if (mpfs->enabled) { + err = alloc_l2table_index(mpfs, &index); + if (err) + goto hash_del; + err = set_l2table_entry_cmd(dev, index, mac); + if (err) + goto free_l2table_index; + mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n", + l2addr->node.addr, index); + } l2addr->index = index; l2addr->ref_count = 1; mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index); goto out; - -set_table_entry_err: - l2addr_hash_del(l2addr); -hash_add_err: +free_l2table_index: free_l2table_index(mpfs, index); +hash_del: + l2addr_hash_del(l2addr); out: mutex_unlock(&mpfs->lock); return err; @@ -183,7 +190,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) struct mlx5_mpfs *mpfs = dev->priv.mpfs; struct l2table_node *l2addr; int err = 0; - u32 index; + int index; if (!mpfs) return 0; @@ -200,12 +207,87 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) goto unlock; index = l2addr->index; - del_l2table_entry_cmd(dev, index); + if (index >= 0) { + del_l2table_entry_cmd(dev, index); + free_l2table_index(mpfs, index); + mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n", + mac, index); + } l2addr_hash_del(l2addr); - free_l2table_index(mpfs, index); mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index); unlock: mutex_unlock(&mpfs->lock); return err; } EXPORT_SYMBOL(mlx5_mpfs_del_mac); + +int mlx5_mpfs_enable(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + struct hlist_node *n; + int err = 0, i; + + if (!mpfs) + return -ENODEV; + + mutex_lock(&mpfs->lock); + if (mpfs->enabled) + goto out; + mpfs->enabled = true; + mlx5_core_dbg(dev, "MPFS enabling mpfs\n"); + + mlx5_mpfs_foreach(l2addr, n, mpfs, i) { + u32 index; + + err = alloc_l2table_index(mpfs, &index); + if (err) { + mlx5_core_err(dev, "Failed to allocated MPFS index for %pM, err(%d)\n", + l2addr->node.addr, err); + goto out; + } + + err = set_l2table_entry_cmd(dev, index, l2addr->node.addr); + if (err) { + mlx5_core_err(dev, "Failed to set MPFS l2table entry for %pM index=%d, err(%d)\n", + l2addr->node.addr, index, err); + free_l2table_index(mpfs, index); + goto out; + } + + l2addr->index = index; + mlx5_core_dbg(dev, "MPFS entry %pM, set @index (%d)\n", + l2addr->node.addr, l2addr->index); + } +out: + mutex_unlock(&mpfs->lock); + return err; +} + +void mlx5_mpfs_disable(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + struct hlist_node *n; + int i; + + if (!mpfs) + return; + + mutex_lock(&mpfs->lock); + if (!mpfs->enabled) + goto unlock; + mlx5_mpfs_foreach(l2addr, n, mpfs, i) { + if (l2addr->index < 0) + continue; + del_l2table_entry_cmd(dev, l2addr->index); + free_l2table_index(mpfs, l2addr->index); + mlx5_core_dbg(dev, "MPFS entry %pM, deleted @index (%d)\n", + l2addr->node.addr, l2addr->index); + l2addr->index = -1; + } + mpfs->enabled = false; + mlx5_core_dbg(dev, "MPFS disabled\n"); +unlock: + mutex_unlock(&mpfs->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h index 4a293542a7aa..9c63838ce1f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -45,6 +45,10 @@ struct l2addr_node { u8 addr[ETH_ALEN]; }; +#define mlx5_mpfs_foreach(hs, tmp, mpfs, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hs, tmp, &(mpfs)->hash[i], node.hlist) + #define for_each_l2hash_node(hn, tmp, hash, i) \ for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist) @@ -82,11 +86,16 @@ struct l2addr_node { }) #ifdef CONFIG_MLX5_MPFS +struct mlx5_core_dev; int mlx5_mpfs_init(struct mlx5_core_dev *dev); void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_mpfs_enable(struct mlx5_core_dev *dev); +void mlx5_mpfs_disable(struct mlx5_core_dev *dev); #else /* #ifndef CONFIG_MLX5_MPFS */ static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} +static inline int mlx5_mpfs_enable(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_mpfs_disable(struct mlx5_core_dev *dev) {} #endif #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c index 459a0b4d08e6..19bb620b7436 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c @@ -8,6 +8,8 @@ enum { MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80, MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81, MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a, + MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP = 0x10b, + MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF = 0x11d, MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80, }; @@ -32,6 +34,12 @@ union mlx5_ifc_config_item_type_auto_bits { u8 reserved_at_0[0x20]; }; +enum { + MLX5_ACCESS_MODE_NEXT = 0, + MLX5_ACCESS_MODE_CURRENT, + MLX5_ACCESS_MODE_DEFAULT, +}; + struct mlx5_ifc_config_item_bits { u8 valid[0x2]; u8 priority[0x2]; @@ -123,6 +131,17 @@ struct mlx5_ifc_nv_sw_offload_conf_bits { u8 lro_log_timeout0[0x4]; }; +struct mlx5_ifc_nv_sw_offload_cap_bits { + u8 reserved_at_0[0x19]; + u8 swp_l4_csum_mode_l4_only[0x1]; + u8 reserved_at_1a[0x6]; +}; + +struct mlx5_ifc_nv_sw_accelerate_conf_bits { + u8 swp_l4_csum_mode[0x2]; + u8 reserved_at_2[0x3e]; +}; + #define MNVDA_HDR_SZ \ (MLX5_ST_SZ_BYTES(mnvda_reg) - \ MLX5_BYTE_OFF(mnvda_reg, configuration_item_data)) @@ -195,12 +214,39 @@ mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda, return mlx5_nv_param_read(dev, mnvda, len); } +static int +mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda, + size_t len) +{ + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0); + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index, + MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP); + MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap); + + return mlx5_nv_param_read(dev, mnvda, len); +} + +static int +mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda, + size_t len, int access_mode) +{ + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0); + MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index, + MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF); + MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf); + MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode, + access_mode); + + return mlx5_nv_param_read(dev, mnvda, len); +} + static const char *const cqe_compress_str[] = { "balanced", "aggressive" }; static int mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; @@ -268,6 +314,182 @@ mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id, return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda)); } +enum swp_l4_csum_mode { + SWP_L4_CSUM_MODE_DEFAULT = 0, + SWP_L4_CSUM_MODE_FULL_CSUM = 1, + SWP_L4_CSUM_MODE_L4_ONLY = 2, +}; + +static const char *const + swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" }; + +static int +mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id, + int access_mode, u8 *value, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; + void *data; + int err; + + err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda), + access_mode); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to read sw_accelerate_conf mnvda reg"); + return err; + } + + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); + *value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode); + + if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Invalid swp_l4_csum_mode value %u read from device", + *value); + return -EINVAL; + } + + return 0; +} + +static int +mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + u8 value; + int err; + + err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT, + &value, extack); + if (err) + return err; + + strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value], + sizeof(ctx->val.vstr)); + return 0; +} + +static int +mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {}; + void *data; + int err, i; + + for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) { + if (!strcmp(val.vstr, swp_l4_csum_mode_str[i])) + break; + } + + if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) || + i == SWP_L4_CSUM_MODE_DEFAULT) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid value, supported values are full_csum/l4_only"); + return -EINVAL; + } + + if (i == SWP_L4_CSUM_MODE_L4_ONLY) { + err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap)); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to read sw_offload_cap"); + return err; + } + + data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data); + if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) { + NL_SET_ERR_MSG_MOD(extack, + "l4_only mode is not supported on this device"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int +mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; + void *data; + int err; + + err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda), + MLX5_ACCESS_MODE_NEXT); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to read sw_accelerate_conf mnvda reg"); + return err; + } + + data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data); + MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value); + + err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda)); + if (err) + NL_SET_ERR_MSG_MOD(extack, + "Failed to write sw_accelerate_conf mnvda reg"); + + return err; +} + +static int +mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + u8 value; + + if (!strcmp(ctx->val.vstr, "full_csum")) + value = SWP_L4_CSUM_MODE_FULL_CSUM; + else + value = SWP_L4_CSUM_MODE_L4_ONLY; + + return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack); +} + +static int +mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + u8 value; + int err; + + err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT, + &value, extack); + if (err) + return err; + + strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value], + sizeof(ctx->val.vstr)); + return 0; +} + +static int +mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id, + enum devlink_param_cmode cmode, + struct netlink_ext_ack *extack) +{ + u8 value; + int err; + + err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT, + &value, extack); + if (err) + return err; + + return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack); +} + static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev, void *mnvda, size_t len) { @@ -302,7 +524,8 @@ static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev, } static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; @@ -413,7 +636,8 @@ static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id, } static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {}; @@ -545,6 +769,14 @@ static const struct devlink_param mlx5_nv_param_devlink_params[] = { mlx5_nv_param_devlink_cqe_compress_get, mlx5_nv_param_devlink_cqe_compress_set, mlx5_nv_param_devlink_cqe_compress_validate), + DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE, + "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + mlx5_devlink_swp_l4_csum_mode_get, + mlx5_devlink_swp_l4_csum_mode_set, + mlx5_devlink_swp_l4_csum_mode_validate, + mlx5_devlink_swp_l4_csum_mode_get_default, + mlx5_devlink_swp_l4_csum_mode_set_default), }; int mlx5_nv_param_register_dl_params(struct devlink *devlink) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c index 47fe215f66bf..ef06fe6cbb51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c @@ -19,13 +19,16 @@ struct mlx5_st { struct mutex lock; struct xa_limit index_limit; struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */ + u8 direct_mode : 1; }; struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev) { struct pci_dev *pdev = dev->pdev; struct mlx5_st *st; + u8 direct_mode = 0; u16 num_entries; + u32 tbl_loc; int ret; if (!MLX5_CAP_GEN(dev, mkey_pcie_tph)) @@ -40,10 +43,16 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev) if (!pdev->tph_cap) return NULL; - num_entries = pcie_tph_get_st_table_size(pdev); - /* We need a reserved entry for non TPH cases */ - if (num_entries < 2) - return NULL; + tbl_loc = pcie_tph_get_st_table_loc(pdev); + if (tbl_loc == PCI_TPH_LOC_NONE) + direct_mode = 1; + + if (!direct_mode) { + num_entries = pcie_tph_get_st_table_size(pdev); + /* We need a reserved entry for non TPH cases */ + if (num_entries < 2) + return NULL; + } /* The OS doesn't support ST */ ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE); @@ -56,6 +65,10 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev) mutex_init(&st->lock); xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC); + st->direct_mode = direct_mode; + if (st->direct_mode) + return st; + /* entry 0 is reserved for non TPH cases */ st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1; st->index_limit.max = num_entries - 1; @@ -96,6 +109,11 @@ int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type, if (ret) return ret; + if (st->direct_mode) { + *st_index = tag; + return 0; + } + mutex_lock(&st->lock); xa_for_each(&st->idx_xa, index, idx_data) { @@ -145,6 +163,9 @@ int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index) if (!st) return -EOPNOTSUPP; + if (st->direct_mode) + return 0; + mutex_lock(&st->lock); idx_data = xa_load(&st->idx_xa, st_index); if (WARN_ON_ONCE(!idx_data)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index d55e15c1f380..304912637c35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -149,7 +149,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) struct mlx5_vxlan *vxlan; if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev)) - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); if (!vxlan) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70c156591b0b..024339ce41f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -553,6 +553,7 @@ EXPORT_SYMBOL(mlx5_is_roce_on); static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) { + bool do_set = false; void *set_hca_cap; int err; @@ -563,17 +564,27 @@ static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) if (err) return err; - if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) || - !(dev->priv.sw_vhca_id > 0)) - return 0; - set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur, MLX5_ST_SZ_BYTES(cmd_hca_cap_2)); - MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1); - return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2); + if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) && + dev->priv.sw_vhca_id > 0) { + MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1); + do_set = true; + } + + if (MLX5_CAP_GEN_2_MAX(dev, lag_per_mp_group)) { + MLX5_SET(cmd_hca_cap_2, set_hca_cap, lag_per_mp_group, 1); + do_set = true; + } + + /* some FW versions that support querying MLX5_CAP_GENERAL_2 + * capabilities but don't support setting them. + * Skip unnecessary update to hca_cap_2 when no changes were introduced + */ + return do_set ? set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2) : 0; } static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) @@ -999,16 +1010,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) goto err_irq_cleanup; } - err = mlx5_events_init(dev); - if (err) { - mlx5_core_err(dev, "failed to initialize events\n"); - goto err_eq_cleanup; - } - err = mlx5_fw_reset_init(dev); if (err) { mlx5_core_err(dev, "failed to initialize fw reset events\n"); - goto err_events_cleanup; + goto err_eq_cleanup; } mlx5_cq_debugfs_init(dev); @@ -1110,8 +1115,6 @@ err_tables_cleanup: mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); -err_events_cleanup: - mlx5_events_cleanup(dev); err_eq_cleanup: mlx5_eq_table_cleanup(dev); err_irq_cleanup: @@ -1144,7 +1147,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); - mlx5_events_cleanup(dev); mlx5_eq_table_cleanup(dev); mlx5_irq_table_cleanup(dev); mlx5_devcom_unregister_device(dev->priv.devc); @@ -1375,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) mlx5_vhca_event_start(dev); - err = mlx5_sf_hw_table_create(dev); - if (err) { - mlx5_core_err(dev, "sf table create failed %d\n", err); - goto err_vhca; - } - err = mlx5_ec_init(dev); if (err) { mlx5_core_err(dev, "Failed to init embedded CPU\n"); @@ -1409,8 +1405,6 @@ err_sriov: mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); err_ec: - mlx5_sf_hw_table_destroy(dev); -err_vhca: mlx5_vhca_event_stop(dev); err_set_hca: mlx5_fs_core_cleanup(dev); @@ -1436,12 +1430,12 @@ static void mlx5_unload(struct mlx5_core_dev *dev) { mlx5_eswitch_disable(dev->priv.eswitch); mlx5_devlink_traps_unregister(priv_to_devlink(dev)); + mlx5_vhca_event_stop(dev); mlx5_sf_dev_table_destroy(dev); mlx5_sriov_detach(dev); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); - mlx5_vhca_event_stop(dev); mlx5_fs_core_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_rsc_dump_cleanup(dev); @@ -1822,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv) DEFINE_SHOW_ATTRIBUTE(vhca_id); +static int mlx5_notifiers_init(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_events_init(dev); + if (err) { + mlx5_core_err(dev, "failed to initialize events\n"); + return err; + } + + BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head); + mlx5_vhca_state_notifier_init(dev); + + err = mlx5_sf_hw_notifier_init(dev); + if (err) + goto err_sf_hw_notifier; + + err = mlx5_sf_notifiers_init(dev); + if (err) + goto err_sf_notifiers; + + err = mlx5_sf_dev_notifier_init(dev); + if (err) + goto err_sf_dev_notifier; + + return 0; + +err_sf_dev_notifier: + mlx5_sf_notifiers_cleanup(dev); +err_sf_notifiers: + mlx5_sf_hw_notifier_cleanup(dev); +err_sf_hw_notifier: + mlx5_events_cleanup(dev); + return err; +} + +static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev) +{ + mlx5_sf_dev_notifier_cleanup(dev); + mlx5_sf_notifiers_cleanup(dev); + mlx5_sf_hw_notifier_cleanup(dev); + mlx5_events_cleanup(dev); +} + int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) { struct mlx5_priv *priv = &dev->priv; @@ -1877,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) if (err) goto err_hca_caps; + err = mlx5_notifiers_init(dev); + if (err) + goto err_hca_caps; + /* The conjunction of sw_vhca_id with sw_owner_id will be a global * unique id per function which uses mlx5_core. * Those values are supplied to FW as part of the init HCA command to @@ -1919,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) if (priv->sw_vhca_id > 0) ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id); + mlx5_notifiers_cleanup(dev); mlx5_hca_caps_free(dev); mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 082259b56816..cfebc110c02f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -357,11 +357,11 @@ int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, bool *enabled); int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, - u16 offset, u16 size, u8 *data); + u16 offset, u16 size, u8 *data, u8 *status); int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, - u8 *data); + u8 *data, u8 *status); int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); @@ -444,6 +444,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev); void mlx5_uninit_one_light(struct mlx5_core_dev *dev); void mlx5_unload_one_light(struct mlx5_core_dev *dev); +void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf, + u8 *len); int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport, u16 opmod); #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index aa9f2b0a77d3..85a9e534f442 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -289,11 +289,11 @@ int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) } static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, - u8 *module_id) + u8 *module_id, u8 *status) { u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - int err, status; + int err; u8 *ptr; MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); @@ -308,12 +308,12 @@ static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, if (err) return err; - status = MLX5_GET(mcia_reg, out, status); - if (status) { - mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", - status); + if (MLX5_GET(mcia_reg, out, status)) { + if (status) + *status = MLX5_GET(mcia_reg, out, status); return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); *module_id = ptr[0]; @@ -370,13 +370,14 @@ static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev) } static int mlx5_query_mcia(struct mlx5_core_dev *dev, - struct mlx5_module_eeprom_query_params *params, u8 *data) + struct mlx5_module_eeprom_query_params *params, + u8 *data, u8 *status) { u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - int status, err; void *ptr; u16 size; + int err; size = min_t(int, params->size, mlx5_mcia_max_bytes(dev)); @@ -392,12 +393,9 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev, if (err) return err; - status = MLX5_GET(mcia_reg, out, status); - if (status) { - mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", - status); + *status = MLX5_GET(mcia_reg, out, status); + if (*status) return -EIO; - } ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); @@ -406,7 +404,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev, } int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, - u16 offset, u16 size, u8 *data) + u16 offset, u16 size, u8 *data, u8 *status) { struct mlx5_module_eeprom_query_params query = {0}; u8 module_id; @@ -416,7 +414,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, if (err) return err; - err = mlx5_query_module_id(dev, query.module_number, &module_id); + err = mlx5_query_module_id(dev, query.module_number, &module_id, + status); if (err) return err; @@ -441,12 +440,12 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, query.size = size; query.offset = offset; - return mlx5_query_mcia(dev, &query, data); + return mlx5_query_mcia(dev, &query, data, status); } int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, - u8 *data) + u8 *data, u8 *status) { int err; @@ -460,7 +459,7 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, return -EINVAL; } - return mlx5_query_mcia(dev, params, data); + return mlx5_query_mcia(dev, params, data, status); } static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, @@ -1109,6 +1108,7 @@ mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1}, [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2}, [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4}, + [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8}, }; int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index 99219ea52c4b..f310bde3d11f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -16,7 +16,6 @@ struct mlx5_sf_dev_table { struct xarray devices; phys_addr_t base_address; u64 sf_bar_length; - struct notifier_block nb; struct workqueue_struct *active_wq; struct work_struct work; u8 stop_active_wq:1; @@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de static int mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data) { - struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); + struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev, + priv.sf_dev_nb); + struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; const struct mlx5_vhca_state_event *event = data; struct mlx5_sf_dev *sf_dev; u16 max_functions; u16 sf_index; u16 base_id; - max_functions = mlx5_sf_max_functions(table->dev); + if (!table) + return 0; + + max_functions = mlx5_sf_max_functions(dev); if (!max_functions) return 0; - base_id = mlx5_sf_start_function_id(table->dev); + base_id = mlx5_sf_start_function_id(dev); if (event->function_id < base_id || event->function_id >= (base_id + max_functions)) return 0; @@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ case MLX5_VHCA_STATE_INVALID: case MLX5_VHCA_STATE_ALLOCATED: if (sf_dev) - mlx5_sf_dev_del(table->dev, sf_dev, sf_index); + mlx5_sf_dev_del(dev, sf_dev, sf_index); break; case MLX5_VHCA_STATE_TEARDOWN_REQUEST: if (sf_dev) - mlx5_sf_dev_del(table->dev, sf_dev, sf_index); + mlx5_sf_dev_del(dev, sf_dev, sf_index); else - mlx5_core_err(table->dev, + mlx5_core_err(dev, "SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n", sf_index, event->sw_function_id); break; case MLX5_VHCA_STATE_ACTIVE: if (!sf_dev) - mlx5_sf_dev_add(table->dev, sf_index, event->function_id, + mlx5_sf_dev_add(dev, sf_index, event->function_id, event->sw_function_id); break; default: @@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table) } } +int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev) +{ + if (mlx5_core_is_sf(dev)) + return 0; + + dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler; + return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb); +} + void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table; @@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) goto table_err; } - table->nb.notifier_call = mlx5_sf_dev_state_change_handler; table->dev = dev; table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); table->base_address = pci_resource_start(dev->pdev, 2); xa_init(&table->devices); dev->priv.sf_dev_table = table; - err = mlx5_vhca_event_notifier_register(dev, &table->nb); - if (err) - goto vhca_err; - err = mlx5_sf_dev_create_active_works(table); if (err) goto add_active_err; @@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) arm_err: mlx5_sf_dev_destroy_active_works(table); -add_active_err: - mlx5_vhca_event_notifier_unregister(dev, &table->nb); mlx5_vhca_event_work_queues_flush(dev); -vhca_err: +add_active_err: kfree(table); dev->priv.sf_dev_table = NULL; table_err: @@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) } } +void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev) +{ + if (mlx5_core_is_sf(dev)) + return; + + mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb); +} + void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) { struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; @@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) return; mlx5_sf_dev_destroy_active_works(table); - mlx5_vhca_event_notifier_unregister(dev, &table->nb); - mlx5_vhca_event_work_queues_flush(dev); /* Now that event handler is not running, it is safe to destroy * the sf device without race. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h index b99131e95e37..3ab0449c770c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h @@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx { int err; }; +int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev); void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev); +void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev); void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev); int mlx5_sf_driver_register(void); @@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev); #else +static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev) +{ + return 0; +} + static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) { } +static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev) +{ +} + static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) { } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 3304f25cc805..b82323b8449e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -31,9 +31,6 @@ struct mlx5_sf_table { struct mlx5_core_dev *dev; /* To refer from notifier context. */ struct xarray function_ids; /* function id based lookup. */ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */ - struct notifier_block esw_nb; - struct notifier_block vhca_nb; - struct notifier_block mdev_nb; }; static struct mlx5_sf * @@ -391,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) { - struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb); + struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev, + priv.sf_table_vhca_nb); + struct mlx5_sf_table *table = dev->priv.sf_table; const struct mlx5_vhca_state_event *event = data; bool update = false; struct mlx5_sf *sf; + if (!table) + return 0; + mutex_lock(&table->sf_state_lock); sf = mlx5_sf_lookup_by_function_id(table, event->function_id); if (!sf) @@ -407,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v update = mlx5_sf_state_update_check(sf, event->new_vhca_state); if (update) sf->hw_state = event->new_vhca_state; - trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller, + trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller, sf->hw_fn_id, sf->hw_state); unlock: mutex_unlock(&table->sf_state_lock); @@ -425,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table) static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data) { - struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb); + struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev, + priv.sf_table_esw_nb); const struct mlx5_esw_event_info *mode = data; + if (!dev->priv.sf_table) + return 0; + switch (mode->new_mode) { case MLX5_ESWITCH_LEGACY: - mlx5_sf_del_all(table); + mlx5_sf_del_all(dev->priv.sf_table); break; default: break; @@ -441,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data) { - struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb); + struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev, + priv.sf_table_mdev_nb); struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data; + struct mlx5_sf_table *table = dev->priv.sf_table; int ret = NOTIFY_DONE; struct mlx5_sf *sf; - if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK) + if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK) return NOTIFY_DONE; - mutex_lock(&table->sf_state_lock); sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id); if (!sf) @@ -464,10 +471,40 @@ out: return ret; } +int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev) +{ + int err; + + if (mlx5_core_is_sf(dev)) + return 0; + + dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event; + err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb); + if (err) + return err; + + dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event; + err = mlx5_vhca_event_notifier_register(dev, + &dev->priv.sf_table_vhca_nb); + if (err) + goto vhca_err; + + dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event; + err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb); + if (err) + goto mdev_err; + + return 0; +mdev_err: + mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb); +vhca_err: + mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb); + return err; +} + int mlx5_sf_table_init(struct mlx5_core_dev *dev) { struct mlx5_sf_table *table; - int err; if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev)) return 0; @@ -480,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev) table->dev = dev; xa_init(&table->function_ids); dev->priv.sf_table = table; - table->esw_nb.notifier_call = mlx5_sf_esw_event; - err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb); - if (err) - goto reg_err; - - table->vhca_nb.notifier_call = mlx5_sf_vhca_event; - err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); - if (err) - goto vhca_err; - - table->mdev_nb.notifier_call = mlx5_sf_mdev_event; - mlx5_blocking_notifier_register(dev, &table->mdev_nb); return 0; +} -vhca_err: - mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); -reg_err: - mutex_destroy(&table->sf_state_lock); - kfree(table); - dev->priv.sf_table = NULL; - return err; +void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev) +{ + if (mlx5_core_is_sf(dev)) + return; + + mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb); + mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb); + mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb); } void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) @@ -511,9 +538,6 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) if (!table) return; - mlx5_blocking_notifier_unregister(dev, &table->mdev_nb); - mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); - mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); mutex_destroy(&table->sf_state_lock); WARN_ON(!xa_empty(&table->function_ids)); kfree(table); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c index 1f613320fe07..bd968f3b3855 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c @@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index { }; struct mlx5_sf_hw_table { - struct mlx5_core_dev *dev; struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ - struct notifier_block vhca_nb; struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX]; }; @@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id) return NULL; } -static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller, +static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev, + struct mlx5_sf_hw_table *table, + u32 controller, u32 usr_sfnum) { struct mlx5_sf_hwc_table *hwc; int free_idx = -1; int i; - hwc = mlx5_sf_controller_to_hwc(table->dev, controller); + hwc = mlx5_sf_controller_to_hwc(dev, controller); if (!hwc->sfs) return -ENOSPC; @@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control return free_idx; } -static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id) +static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev, + struct mlx5_sf_hw_table *table, + u32 controller, int id) { struct mlx5_sf_hwc_table *hwc; - hwc = mlx5_sf_controller_to_hwc(table->dev, controller); + hwc = mlx5_sf_controller_to_hwc(dev, controller); hwc->sfs[id].allocated = false; hwc->sfs[id].pending_delete = false; } @@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr return -EOPNOTSUPP; mutex_lock(&table->table_lock); - sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum); + sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum); if (sw_id < 0) { err = sw_id; goto exist_err; @@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr vhca_err: mlx5_cmd_dealloc_sf(dev, hw_fn_id); err: - mlx5_sf_hw_table_id_free(table, controller, sw_id); + mlx5_sf_hw_table_id_free(dev, table, controller, sw_id); exist_err: mutex_unlock(&table->table_lock); return err; @@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id) mutex_lock(&table->table_lock); hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id); mlx5_cmd_dealloc_sf(dev, hw_fn_id); - mlx5_sf_hw_table_id_free(table, controller, id); + mlx5_sf_hw_table_id_free(dev, table, controller, id); mutex_unlock(&table->table_lock); } @@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev, } } -static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table) +static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev, + struct mlx5_sf_hw_table *table) { - mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]); - mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]); + mlx5_sf_hw_table_hwc_dealloc_all(dev, + &table->hwc[MLX5_SF_HWC_EXTERNAL]); + mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]); } static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id) @@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) } mutex_init(&table->table_lock); - table->dev = dev; dev->priv.sf_hw_table = table; base_id = mlx5_sf_start_function_id(dev); @@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]); mutex_destroy(&table->table_lock); kfree(table); + dev->priv.sf_hw_table = NULL; res_unregister: mlx5_sf_hw_table_res_unregister(dev); } static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) { - struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb); + struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev, + priv.sf_hw_table_vhca_nb); + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; const struct mlx5_vhca_state_event *event = data; struct mlx5_sf_hwc_table *hwc; struct mlx5_sf_hw *sf_hw; u16 sw_id; - if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED) + if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED) return 0; hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id); @@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode * Hence recycle the sf hardware id for reuse. */ if (sf_hw->allocated && sf_hw->pending_delete) - mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id); + mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id); mutex_unlock(&table->table_lock); return 0; } -int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev) { - struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; - - if (!table) + if (mlx5_core_is_sf(dev)) return 0; - table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; - return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb); + dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; + return mlx5_vhca_event_notifier_register(dev, + &dev->priv.sf_hw_table_vhca_nb); +} + +void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev) +{ + if (mlx5_core_is_sf(dev)) + return; + + mlx5_vhca_event_notifier_unregister(dev, + &dev->priv.sf_hw_table_vhca_nb); } void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) @@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) if (!table) return; - mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb); /* Dealloc SFs whose firmware event has been missed. */ - mlx5_sf_hw_table_dealloc_all(table); + mlx5_sf_hw_table_dealloc_all(dev, table); } bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 89559a37997a..d8a934a0e968 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -12,10 +12,13 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev); void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev); -int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev); +int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev); +void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev); void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev); +int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev); int mlx5_sf_table_init(struct mlx5_core_dev *dev); +void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev); void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev); @@ -44,20 +47,33 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) { } -static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev) +{ +} + static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) { } +static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev) +{ + return 0; +} + static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev) +{ +} + static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) { } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c index cda01ba441ae..b04cf6cf8956 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c @@ -9,15 +9,9 @@ #define CREATE_TRACE_POINTS #include "diag/vhca_tracepoint.h" -struct mlx5_vhca_state_notifier { - struct mlx5_core_dev *dev; - struct mlx5_nb nb; - struct blocking_notifier_head n_head; -}; - struct mlx5_vhca_event_work { struct work_struct work; - struct mlx5_vhca_state_notifier *notifier; + struct mlx5_core_dev *dev; struct mlx5_vhca_state_event event; }; @@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event * mlx5_vhca_event_arm(dev, event->function_id); trace_mlx5_sf_vhca_event(dev, event); - blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); + blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event); } static void mlx5_vhca_state_work_handler(struct work_struct *_work) { struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work); - struct mlx5_vhca_state_notifier *notifier = work->notifier; - struct mlx5_core_dev *dev = notifier->dev; - mlx5_vhca_event_notify(dev, &work->event); + mlx5_vhca_event_notify(work->dev, &work->event); kfree(work); } @@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo static int mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data) { - struct mlx5_vhca_state_notifier *notifier = - mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb); + struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev, + priv.vhca_state_nb); struct mlx5_vhca_event_work *work; struct mlx5_eqe *eqe = data; int wq_idx; @@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v if (!work) return NOTIFY_DONE; INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); - work->notifier = notifier; + work->dev = dev; work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS; - mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work); + mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work); return NOTIFY_OK; } @@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1); } +void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev) +{ + BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head); + MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier, + VHCA_STATE_CHANGE); +} + int mlx5_vhca_event_init(struct mlx5_core_dev *dev) { - struct mlx5_vhca_state_notifier *notifier; char wq_name[MLX5_CMD_WQ_MAX_NAME]; struct mlx5_vhca_events *events; int err, i; @@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev) return -ENOMEM; events->dev = dev; - dev->priv.vhca_events = events; for (i = 0; i < MLX5_DEV_MAX_WQS; i++) { snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i); events->handler[i].wq = create_singlethread_workqueue(wq_name); @@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev) goto err_create_wq; } } + dev->priv.vhca_events = events; - notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); - if (!notifier) { - err = -ENOMEM; - goto err_notifier; - } - - dev->priv.vhca_state_notifier = notifier; - notifier->dev = dev; - BLOCKING_INIT_NOTIFIER_HEAD(¬ifier->n_head); - MLX5_NB_INIT(¬ifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE); return 0; -err_notifier: err_create_wq: for (--i; i >= 0; i--) destroy_workqueue(events->handler[i].wq); @@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) if (!mlx5_vhca_event_supported(dev)) return; - kfree(dev->priv.vhca_state_notifier); - dev->priv.vhca_state_notifier = NULL; vhca_events = dev->priv.vhca_events; for (i = 0; i < MLX5_DEV_MAX_WQS; i++) destroy_workqueue(vhca_events->handler[i].wq); @@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) void mlx5_vhca_event_start(struct mlx5_core_dev *dev) { - struct mlx5_vhca_state_notifier *notifier; - - if (!dev->priv.vhca_state_notifier) + if (!mlx5_vhca_event_supported(dev)) return; - notifier = dev->priv.vhca_state_notifier; - mlx5_eq_notifier_register(dev, ¬ifier->nb); + mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb); } void mlx5_vhca_event_stop(struct mlx5_core_dev *dev) { - struct mlx5_vhca_state_notifier *notifier; - - if (!dev->priv.vhca_state_notifier) + if (!mlx5_vhca_event_supported(dev)) return; - notifier = dev->priv.vhca_state_notifier; - mlx5_eq_notifier_unregister(dev, ¬ifier->nb); + mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb); + + /* Flush workqueues of all pending events. */ + mlx5_vhca_event_work_queues_flush(dev); } int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) { - if (!dev->priv.vhca_state_notifier) - return -EOPNOTSUPP; - return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb); + return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head, + nb); } void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb) { - blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb); + blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h index 1725ba64f8af..52790423874c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h @@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) } void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap); +void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev); int mlx5_vhca_event_init(struct mlx5_core_dev *dev); void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev); void mlx5_vhca_event_start(struct mlx5_core_dev *dev); @@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s { } +static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev) +{ +} + static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c index 65740bb68b09..e8c67ed9f748 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c @@ -410,7 +410,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, switch (dmn->type) { case MLX5DR_DOMAIN_TYPE_NIC_RX: if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx)) - return -ENOTSUPP; + return -EOPNOTSUPP; dmn->info.supp_sw_steering = true; dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; @@ -419,7 +419,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, break; case MLX5DR_DOMAIN_TYPE_NIC_TX: if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx)) - return -ENOTSUPP; + return -EOPNOTSUPP; dmn->info.supp_sw_steering = true; dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; @@ -428,10 +428,10 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, break; case MLX5DR_DOMAIN_TYPE_FDB: if (!dmn->info.caps.eswitch_manager) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb)) - return -ENOTSUPP; + return -EOPNOTSUPP; dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 2ed2e530b07d..306affbcfd3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -78,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, } static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, - u32 *out) + bool other_vport, u32 *out) { u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); + MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport); return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out); } @@ -97,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {}; int err; - err = mlx5_query_nic_vport_context(mdev, vport, out); + err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out); if (!err) *min_inline = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.min_wqe_inline_mode); @@ -219,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, false, out); if (!err) *mtu = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.mtu); @@ -429,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, false, out); if (err) goto out; @@ -451,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, false, out); if (err) goto out; @@ -462,7 +461,8 @@ out: return err; } -int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) +int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, + u16 vport, bool other_vport, u64 *node_guid) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); @@ -472,7 +472,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out); if (err) goto out; @@ -529,7 +529,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, false, out); if (err) goto out; @@ -804,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, vport, out); + err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out); if (err) goto out; @@ -908,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status) if (!out) return -ENOMEM; - err = mlx5_query_nic_vport_context(mdev, 0, out); + err = mlx5_query_nic_vport_context(mdev, 0, false, out); if (err) goto out; @@ -1190,6 +1190,25 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); +void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf, + u8 *len) +{ + u64 fw_system_image_guid; + + *len = 0; + + fw_system_image_guid = mlx5_query_nic_system_image_guid(mdev); + if (!fw_system_image_guid) + return; + + memcpy(buf, &fw_system_image_guid, sizeof(fw_system_image_guid)); + *len += sizeof(fw_system_image_guid); + + if (MLX5_CAP_GEN_2(mdev, load_balance_id) && + MLX5_CAP_GEN_2(mdev, lag_per_mp_group)) + buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id); +} + static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev, u16 vport_num, u16 *vhca_id) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index b1d08e958bf9..69f9da9fb305 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1489,7 +1489,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_sp_acl_tcam *tcam; diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig index 3ba527514f1e..ca5c7ac2a5bc 100644 --- a/drivers/net/ethernet/meta/Kconfig +++ b/drivers/net/ethernet/meta/Kconfig @@ -19,13 +19,14 @@ if NET_VENDOR_META config FBNIC tristate "Meta Platforms Host Network Interface" - depends on X86_64 || COMPILE_TEST + depends on 64BIT || COMPILE_TEST depends on !S390 depends on MAX_SKB_FRAGS < 22 depends on PCI_MSI depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select PAGE_POOL + select PCS_XPCS select PHYLINK select PLDMFW help diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index 15e8ff649615..72c41af65364 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -21,6 +21,7 @@ fbnic-y := fbnic_csr.o \ fbnic_pci.o \ fbnic_phylink.o \ fbnic_rpc.o \ + fbnic_mdio.o \ fbnic_time.o \ fbnic_tlv.o \ fbnic_txrx.o \ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index b03e5a3d5144..779a083b9215 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -34,7 +34,7 @@ struct fbnic_dev { u32 __iomem *uc_addr4; const struct fbnic_mac *mac; unsigned int fw_msix_vector; - unsigned int pcs_msix_vector; + unsigned int mac_msix_vector; unsigned short num_irqs; struct { @@ -83,6 +83,10 @@ struct fbnic_dev { /* Last @time_high refresh time in jiffies (to catch stalls) */ unsigned long last_read; + /* PMD specific data */ + unsigned long end_of_pmd_training; + u8 pmd_state; + /* Local copy of hardware statistics */ struct fbnic_hw_stats hw_stats; @@ -91,6 +95,9 @@ struct fbnic_dev { u64 prev_firmware_time; struct fbnic_fw_log fw_log; + + /* MDIO bus for PHYs */ + struct mii_bus *mdio_bus; }; /* Reserve entry 0 in the MSI-X "others" array until we have filled all @@ -175,8 +182,8 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd); void fbnic_hwmon_register(struct fbnic_dev *fbd); void fbnic_hwmon_unregister(struct fbnic_dev *fbd); -int fbnic_pcs_request_irq(struct fbnic_dev *fbd); -void fbnic_pcs_free_irq(struct fbnic_dev *fbd); +int fbnic_mac_request_irq(struct fbnic_dev *fbd); +void fbnic_mac_free_irq(struct fbnic_dev *fbd); void fbnic_napi_name_irqs(struct fbnic_dev *fbd); int fbnic_napi_request_irq(struct fbnic_dev *fbd, @@ -200,6 +207,8 @@ void fbnic_dbg_exit(void); void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd); +int fbnic_mdiobus_create(struct fbnic_dev *fbd); + void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version); int fbnic_csr_regs_len(struct fbnic_dev *fbd); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index d3a7ad921f18..422265dc7abd 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -787,6 +787,8 @@ enum { /* MAC PCS registers */ #define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */ +#define FBNIC_PCS_PAGE(n) (0x10000 + 0x400 * (n)) /* 0x40000 + 1024*n */ +#define FBNIC_PCS(reg, n) ((reg) + FBNIC_PCS_PAGE(n)) #define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */ #define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 95fac020eb93..693ebdf38705 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -1863,6 +1863,14 @@ fbnic_get_rmon_stats(struct net_device *netdev, *ranges = fbnic_rmon_ranges; } +static void fbnic_get_link_ext_stats(struct net_device *netdev, + struct ethtool_link_ext_stats *stats) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + stats->link_down_events = fbn->link_down_events; +} + static const struct ethtool_ops fbnic_ethtool_ops = { .cap_link_lanes_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -1874,6 +1882,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = { .get_regs_len = fbnic_get_regs_len, .get_regs = fbnic_get_regs, .get_link = ethtool_op_get_link, + .get_link_ext_stats = fbnic_get_link_ext_stats, .get_coalesce = fbnic_get_coalesce, .set_coalesce = fbnic_set_coalesce, .get_ringparam = fbnic_get_ringparam, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index fcd9912e7ad3..d8d9b6cfde82 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -878,11 +878,11 @@ msg_err: * @fbd: FBNIC device structure * @cmpl_data: Completion struct to store coredump * @offset: Offset into coredump requested - * @length: Length of section of cordeump to fetch + * @length: Length of section of coredump to fetch * * Return: zero on success, negative errno on failure * - * Asks the firmware to provide a section of the cordeump back in a message. + * Asks the firmware to provide a section of the coredump back in a message. * The response will have an offset and size matching the values provided. */ int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd, @@ -1868,7 +1868,7 @@ int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd) if (err) goto free_message; - /* Send message of to FW notifying it of current RPC config */ + /* Send message off to FW notifying it of current RPC config */ err = fbnic_mbx_map_tlv_msg(fbd, msg); if (err) goto free_message; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c index 1c88a2bf3a7a..02e8b0b257fe 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -118,12 +118,12 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd) fbd->fw_msix_vector = 0; } -static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) +static irqreturn_t fbnic_mac_msix_intr(int __always_unused irq, void *data) { struct fbnic_dev *fbd = data; struct fbnic_net *fbn; - if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) { + if (fbd->mac->get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) { fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY); return IRQ_HANDLED; @@ -131,26 +131,28 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) fbn = netdev_priv(fbd->netdev); - phylink_pcs_change(&fbn->phylink_pcs, false); + /* Record link down events */ + if (!fbd->mac->get_link(fbd, fbn->aui, fbn->fec)) + phylink_pcs_change(fbn->pcs, false); return IRQ_HANDLED; } /** - * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link + * fbnic_mac_request_irq - Configure the MAC to enable it to advertise link * @fbd: Pointer to device to initialize * - * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ + * This function provides basic bringup for the MAC/PHY IRQ. For now the IRQ * will remain disabled until we start the MAC/PCS/PHY logic via phylink. * * Return: non-zero on failure. **/ -int fbnic_pcs_request_irq(struct fbnic_dev *fbd) +int fbnic_mac_request_irq(struct fbnic_dev *fbd) { struct pci_dev *pdev = to_pci_dev(fbd->dev); int vector, err; - WARN_ON(fbd->pcs_msix_vector); + WARN_ON(fbd->mac_msix_vector); vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); if (vector < 0) @@ -159,7 +161,7 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd) /* Request the IRQ for PCS link vector. * Map PCS cause to it, and unmask it */ - err = request_irq(vector, &fbnic_pcs_msix_intr, 0, + err = request_irq(vector, &fbnic_mac_msix_intr, 0, fbd->netdev->name, fbd); if (err) return err; @@ -168,22 +170,22 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd) fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); - fbd->pcs_msix_vector = vector; + fbd->mac_msix_vector = vector; return 0; } /** - * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping + * fbnic_mac_free_irq - Teardown the MAC IRQ to prepare for stopping * @fbd: Pointer to device that is stopping * - * This function undoes the work done in fbnic_pcs_request_irq and prepares + * This function undoes the work done in fbnic_mac_request_irq and prepares * the device to no longer receive traffic on the host interface. **/ -void fbnic_pcs_free_irq(struct fbnic_dev *fbd) +void fbnic_mac_free_irq(struct fbnic_dev *fbd) { /* Vector has already been freed */ - if (!fbd->pcs_msix_vector) + if (!fbd->mac_msix_vector) return; /* Disable interrupt */ @@ -192,14 +194,14 @@ void fbnic_pcs_free_irq(struct fbnic_dev *fbd) fbnic_wrfl(fbd); /* Synchronize IRQ to prevent race that would unmask vector */ - synchronize_irq(fbd->pcs_msix_vector); + synchronize_irq(fbd->mac_msix_vector); /* Mask the vector */ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); /* Free the vector */ - free_irq(fbd->pcs_msix_vector, fbd); - fbd->pcs_msix_vector = 0; + free_irq(fbd->mac_msix_vector, fbd); + fbd->mac_msix_vector = 0; } void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 2a84bd1d7e26..fc7abea4ef5b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -434,14 +434,14 @@ static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl); } -static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd) +static int fbnic_mac_get_link_event(struct fbnic_dev *fbd) { - u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); + u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); - if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) + if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) return FBNIC_LINK_EVENT_DOWN; - return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? + return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE; } @@ -466,9 +466,8 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd, return command_config; } -static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) +static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec) { - struct fbnic_net *fbn = netdev_priv(fbd->netdev); u32 pcs_status, lane_mask = ~0; pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0); @@ -476,7 +475,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) return false; /* Define the expected lane mask for the status bits we need to check */ - switch (fbn->aui) { + switch (aui) { case FBNIC_AUI_100GAUI2: lane_mask = 0xf; break; @@ -484,7 +483,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) lane_mask = 3; break; case FBNIC_AUI_LAUI2: - switch (fbn->fec) { + switch (fec) { case FBNIC_FEC_OFF: lane_mask = 0x63; break; @@ -502,7 +501,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) } /* Use an XOR to remove the bits we expect to see set */ - switch (fbn->fec) { + switch (fec) { case FBNIC_FEC_OFF: lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK, pcs_status); @@ -521,7 +520,46 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) return !lane_mask; } -static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd) +static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect) +{ + /* Delay link up for 4 seconds to allow for link training. + * The state transitions for this are as follows: + * + * All states have the following two transitions in common: + * Loss of signal -> FBNIC_PMD_INITIALIZE + * The condition handled below (!signal) + * Reconfiguration -> FBNIC_PMD_INITIALIZE + * Occurs when mac_prepare starts a PHY reconfig + * FBNIC_PMD_TRAINING: + * signal still detected && 4s have passed -> Report link up + * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA + * FBNIC_PMD_INITIALIZE: + * signal detected -> FBNIC_PMD_TRAINING + */ + if (!signal_detect) { + fbd->pmd_state = FBNIC_PMD_INITIALIZE; + return false; + } + + switch (fbd->pmd_state) { + case FBNIC_PMD_TRAINING: + return time_before(fbd->end_of_pmd_training, jiffies); + case FBNIC_PMD_LINK_READY: + case FBNIC_PMD_SEND_DATA: + return true; + } + + fbd->end_of_pmd_training = jiffies + 4 * HZ; + + /* Ensure end_of_training is visible before the state change */ + smp_wmb(); + + fbd->pmd_state = FBNIC_PMD_TRAINING; + + return false; +} + +static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec) { bool link; @@ -538,7 +576,8 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd) wr32(fbd, FBNIC_SIG_PCS_INTR_STS, FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP); - link = fbnic_mac_get_pcs_link_status(fbd); + link = fbnic_mac_get_link_status(fbd, aui, fec); + link = fbnic_pmd_update_state(fbd, link); /* Enable interrupt to only capture changes in link state */ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, @@ -586,20 +625,15 @@ void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec) } } -static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd) +static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec) { /* Mask and clear the PCS interrupt, will be enabled by link handler */ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); - return 0; -} - -static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd) -{ - /* Mask and clear the PCS interrupt */ - wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); - wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); + /* If we don't have link tear it all down and start over */ + if (!fbnic_mac_get_link_status(fbd, aui, fec)) + fbd->pmd_state = FBNIC_PMD_INITIALIZE; } static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd) @@ -867,10 +901,9 @@ exit_free: static const struct fbnic_mac fbnic_mac_asic = { .init_regs = fbnic_mac_init_regs, - .pcs_enable = fbnic_pcs_enable_asic, - .pcs_disable = fbnic_pcs_disable_asic, - .pcs_get_link = fbnic_pcs_get_link_asic, - .pcs_get_link_event = fbnic_pcs_get_link_event_asic, + .get_link = fbnic_mac_get_link, + .get_link_event = fbnic_mac_get_link_event, + .prepare = fbnic_mac_prepare, .get_fec_stats = fbnic_mac_get_fec_stats, .get_pcs_stats = fbnic_mac_get_pcs_stats, .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h index ede5ff0dae22..f08fe8b7c497 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -10,6 +10,24 @@ struct fbnic_dev; #define FBNIC_MAX_JUMBO_FRAME_SIZE 9742 +/* States loosely based on section 136.8.11.7.5 of IEEE 802.3-2022 Ethernet + * Standard. These are needed to track the state of the PHY as it has a delay + * of several seconds from the time link comes up until it has completed + * training that we need to wait to report the link. + * + * Currently we treat training as a single block as this is managed by the + * firmware. + * + * We have FBNIC_PMD_SEND_DATA set to 0 as the expected default at driver load + * and we initialize the structure containing it to zero at allocation. + */ +enum { + FBNIC_PMD_SEND_DATA = 0x0, + FBNIC_PMD_INITIALIZE = 0x1, + FBNIC_PMD_TRAINING = 0x2, + FBNIC_PMD_LINK_READY = 0x3, +}; + enum { FBNIC_LINK_EVENT_NONE = 0, FBNIC_LINK_EVENT_UP = 1, @@ -38,6 +56,7 @@ enum { FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */ FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */ FBNIC_AUI_UNKNOWN = 4, + __FBNIC_AUI_MAX__ }; #define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2) @@ -55,15 +74,15 @@ enum fbnic_sensor_id { * void (*init_regs)(struct fbnic_dev *fbd); * Initialize MAC registers to enable Tx/Rx paths and FIFOs. * - * void (*pcs_enable)(struct fbnic_dev *fbd); - * Configure and enable PCS to enable link if not already enabled - * void (*pcs_disable)(struct fbnic_dev *fbd); - * Shutdown the link if we are the only consumer of it. - * bool (*pcs_get_link)(struct fbnic_dev *fbd); - * Check PCS link status - * int (*pcs_get_link_event)(struct fbnic_dev *fbd) + * int (*get_link_event)(struct fbnic_dev *fbd) * Get the current link event status, reports true if link has * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP + * bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec); + * Check link status + * + * void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec); + * Prepare PHY for init by fetching settings, disabling interrupts, + * and sending an updated PHY config to FW if needed. * * void (*link_down)(struct fbnic_dev *fbd); * Configure MAC for link down event @@ -74,10 +93,10 @@ enum fbnic_sensor_id { struct fbnic_mac { void (*init_regs)(struct fbnic_dev *fbd); - int (*pcs_enable)(struct fbnic_dev *fbd); - void (*pcs_disable)(struct fbnic_dev *fbd); - bool (*pcs_get_link)(struct fbnic_dev *fbd); - int (*pcs_get_link_event)(struct fbnic_dev *fbd); + int (*get_link_event)(struct fbnic_dev *fbd); + bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec); + + void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec); void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset, struct fbnic_fec_stats *fec_stats); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c new file mode 100644 index 000000000000..709041f7fc43 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <linux/mdio.h> +#include <linux/pcs/pcs-xpcs.h> + +#include "fbnic.h" +#include "fbnic_netdev.h" + +#define DW_VENDOR BIT(15) +#define FBNIC_PCS_VENDOR BIT(9) +#define FBNIC_PCS_ZERO_MASK (DW_VENDOR - FBNIC_PCS_VENDOR) + +static int +fbnic_mdio_read_pmd(struct fbnic_dev *fbd, int addr, int regnum) +{ + u8 aui = FBNIC_AUI_UNKNOWN; + struct fbnic_net *fbn; + int ret = 0; + + /* We don't need a second PMD, just one can handle both lanes */ + if (addr) + return 0; + + if (fbd->netdev) { + fbn = netdev_priv(fbd->netdev); + if (fbn->aui < FBNIC_AUI_UNKNOWN) + aui = fbn->aui; + } + + switch (regnum) { + case MDIO_DEVID1: + ret = MP_FBNIC_XPCS_PMA_100G_ID >> 16; + break; + case MDIO_DEVID2: + ret = MP_FBNIC_XPCS_PMA_100G_ID & 0xffff; + break; + case MDIO_DEVS1: + ret = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS; + break; + case MDIO_STAT2: + ret = MDIO_STAT2_DEVPRST_VAL; + break; + case MDIO_PMA_RXDET: + /* If training isn't complete default to 0 */ + if (fbd->pmd_state != FBNIC_PMD_SEND_DATA) + break; + /* Report either 1 or 2 lanes detected depending on config */ + ret = (MDIO_PMD_RXDET_GLOBAL | MDIO_PMD_RXDET_0) | + ((aui & FBNIC_AUI_MODE_R2) * + (MDIO_PMD_RXDET_1 / FBNIC_AUI_MODE_R2)); + break; + default: + break; + } + + dev_dbg(fbd->dev, + "SWMII PMD Rd: Addr: %d RegNum: %d Value: 0x%04x\n", + addr, regnum, ret); + + return ret; +} + +static int +fbnic_mdio_read_pcs(struct fbnic_dev *fbd, int addr, int regnum) +{ + int ret, offset = 0; + + /* We will need access to both PCS instances to get config info */ + if (addr >= 2) + return 0; + + /* Report 0 for reserved registers */ + if (regnum & FBNIC_PCS_ZERO_MASK) + return 0; + + /* Intercept and return correct ID for PCS */ + if (regnum == MDIO_DEVID1) + return DW_XPCS_ID >> 16; + if (regnum == MDIO_DEVID2) + return DW_XPCS_ID & 0xffff; + if (regnum == MDIO_DEVS1) + return MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS; + + /* Swap vendor page bit for FBNIC PCS vendor page bit */ + if (regnum & DW_VENDOR) + offset ^= DW_VENDOR | FBNIC_PCS_VENDOR; + + ret = fbnic_rd32(fbd, FBNIC_PCS_PAGE(addr) + (regnum ^ offset)); + + dev_dbg(fbd->dev, + "SWMII PCS Rd: Addr: %d RegNum: %d Value: 0x%04x\n", + addr, regnum, ret); + + return ret; +} + +static int +fbnic_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum) +{ + struct fbnic_dev *fbd = bus->priv; + + if (devnum == MDIO_MMD_PMAPMD) + return fbnic_mdio_read_pmd(fbd, addr, regnum); + + if (devnum == MDIO_MMD_PCS) + return fbnic_mdio_read_pcs(fbd, addr, regnum); + + return 0; +} + +static void +fbnic_mdio_write_pmd(struct fbnic_dev *fbd, int addr, int regnum, u16 val) +{ + dev_dbg(fbd->dev, + "SWMII PMD Wr: Addr: %d RegNum: %d Value: 0x%04x\n", + addr, regnum, val); +} + +static void +fbnic_mdio_write_pcs(struct fbnic_dev *fbd, int addr, int regnum, u16 val) +{ + dev_dbg(fbd->dev, + "SWMII PCS Wr: Addr: %d RegNum: %d Value: 0x%04x\n", + addr, regnum, val); + + /* Allow access to both halves of PCS for 50R2 config */ + if (addr > 2) + return; + + /* Skip write for reserved registers */ + if (regnum & FBNIC_PCS_ZERO_MASK) + return; + + /* Swap vendor page bit for FBNIC PCS vendor page bit */ + if (regnum & DW_VENDOR) + regnum ^= DW_VENDOR | FBNIC_PCS_VENDOR; + + fbnic_wr32(fbd, FBNIC_PCS_PAGE(addr) + regnum, val); +} + +static int +fbnic_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, + int regnum, u16 val) +{ + struct fbnic_dev *fbd = bus->priv; + + if (devnum == MDIO_MMD_PMAPMD) + fbnic_mdio_write_pmd(fbd, addr, regnum, val); + + if (devnum == MDIO_MMD_PCS) + fbnic_mdio_write_pcs(fbd, addr, regnum, val); + + return 0; +} + +/** + * fbnic_mdiobus_create - Create an MDIO bus to allow interfacing w/ PHYs + * @fbd: Pointer to FBNIC device structure to populate bus on + * + * Initialize an MDIO bus and place a pointer to it on the fbd struct. This bus + * will be used to interface with the PMA/PMD and PCS. + * + * Return: 0 on success, negative on failure + **/ +int fbnic_mdiobus_create(struct fbnic_dev *fbd) +{ + struct mii_bus *bus; + int err; + + bus = devm_mdiobus_alloc(fbd->dev); + if (!bus) + return -ENOMEM; + + bus->name = "fbnic_mii_bus"; + bus->read_c45 = &fbnic_mdio_read_c45; + bus->write_c45 = &fbnic_mdio_write_c45; + + /* Disable PHY auto probing. We will add PCS manually */ + bus->phy_mask = ~0; + + bus->parent = fbd->dev; + bus->priv = fbd; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(fbd->dev)); + + err = devm_mdiobus_register(fbd->dev, bus); + if (err) { + dev_err(fbd->dev, "Failed to create MDIO bus: %d\n", err); + return err; + } + + fbd->mdio_bus = bus; + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index e95be0e7bd9e..81c9d5c9a4b2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -44,7 +44,7 @@ int __fbnic_open(struct fbnic_net *fbn) if (err) goto time_stop; - err = fbnic_pcs_request_irq(fbd); + err = fbnic_mac_request_irq(fbd); if (err) goto time_stop; @@ -86,10 +86,10 @@ static int fbnic_stop(struct net_device *netdev) { struct fbnic_net *fbn = netdev_priv(netdev); + fbnic_mac_free_irq(fbn->fbd); phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd)); fbnic_down(fbn); - fbnic_pcs_free_irq(fbn->fbd); fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); @@ -697,10 +697,7 @@ void fbnic_reset_queues(struct fbnic_net *fbn, **/ void fbnic_netdev_free(struct fbnic_dev *fbd) { - struct fbnic_net *fbn = netdev_priv(fbd->netdev); - - if (fbn->phylink) - phylink_destroy(fbn->phylink); + fbnic_phylink_destroy(fbd->netdev); free_netdev(fbd->netdev); fbd->netdev = NULL; @@ -802,7 +799,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) netif_tx_stop_all_queues(netdev); - if (fbnic_phylink_init(netdev)) { + if (fbnic_phylink_create(netdev)) { fbnic_netdev_free(fbd); return NULL; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index b0a87c57910f..9129a658f8fa 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -44,7 +44,7 @@ struct fbnic_net { struct phylink *phylink; struct phylink_config phylink_config; - struct phylink_pcs phylink_pcs; + struct phylink_pcs *pcs; u8 aui; u8 fec; @@ -73,6 +73,8 @@ struct fbnic_net { /* Time stamping filter config */ struct kernel_hwtstamp_config hwtstamp_config; + + bool tx_pause; }; int __fbnic_open(struct fbnic_net *fbn); @@ -106,8 +108,10 @@ int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int fbnic_phylink_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam); +int fbnic_phylink_create(struct net_device *netdev); +void fbnic_phylink_destroy(struct net_device *netdev); int fbnic_phylink_init(struct net_device *netdev); - +void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev); bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu, u32 hds_threshold); #endif /* _FBNIC_NETDEV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index a7a6b4db8016..861d98099c44 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -185,7 +185,7 @@ static void fbnic_health_check(struct fbnic_dev *fbd) { struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - /* As long as the heart is beating the FW is healty */ + /* As long as the heart is beating the FW is healthy */ if (fbd->fw_heartbeat_enabled) return; @@ -196,7 +196,7 @@ static void fbnic_health_check(struct fbnic_dev *fbd) if (tx_mbx->head != tx_mbx->tail) return; - fbnic_devlink_fw_report(fbd, "Firmware crashed detected!"); + fbnic_devlink_fw_report(fbd, "Firmware crash detected!"); fbnic_devlink_otp_check(fbd, "error detected after firmware recovery"); if (fbnic_fw_config_after_crash(fbd)) @@ -207,6 +207,10 @@ static void fbnic_service_task(struct work_struct *work) { struct fbnic_dev *fbd = container_of(to_delayed_work(work), struct fbnic_dev, service_task); + struct net_device *netdev = fbd->netdev; + + if (netif_running(netdev)) + fbnic_phylink_pmd_training_complete_notify(netdev); rtnl_lock(); @@ -224,7 +228,7 @@ static void fbnic_service_task(struct work_struct *work) netdev_unlock(fbd->netdev); } - if (netif_running(fbd->netdev)) + if (netif_running(netdev)) schedule_delayed_work(&fbd->service_task, HZ); rtnl_unlock(); @@ -335,6 +339,9 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_failure_mode; } + if (fbnic_mdiobus_create(fbd)) + goto init_failure_mode; + netdev = fbnic_netdev_alloc(fbd); if (!netdev) { dev_err(&pdev->dev, "Netdev allocation failed\n"); @@ -378,7 +385,7 @@ free_fbd: * @pdev: PCI device information struct * * Called by the PCI subsystem to alert the driver that it should release - * a PCI device. The could be caused by a Hot-Plug event, or because the + * a PCI device. This could be caused by a Hot-Plug event, or because the * driver is going to be removed from memory. **/ static void fbnic_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c index 7ce3fdd25282..09c5225111be 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ +#include <linux/pcs/pcs-xpcs.h> #include <linux/phy.h> #include <linux/phylink.h> @@ -101,88 +102,47 @@ int fbnic_phylink_get_fecparam(struct net_device *netdev, return 0; } -static struct fbnic_net * -fbnic_pcs_to_net(struct phylink_pcs *pcs) -{ - return container_of(pcs, struct fbnic_net, phylink_pcs); -} - -static void -fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, - struct phylink_link_state *state) +static struct phylink_pcs * +fbnic_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) { - struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); - struct fbnic_dev *fbd = fbn->fbd; - - switch (fbn->aui) { - case FBNIC_AUI_25GAUI: - state->speed = SPEED_25000; - break; - case FBNIC_AUI_LAUI2: - case FBNIC_AUI_50GAUI1: - state->speed = SPEED_50000; - break; - case FBNIC_AUI_100GAUI2: - state->speed = SPEED_100000; - break; - default: - state->link = 0; - return; - } - - state->duplex = DUPLEX_FULL; + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); - state->link = fbd->mac->pcs_get_link(fbd); + return fbn->pcs; } static int -fbnic_phylink_pcs_enable(struct phylink_pcs *pcs) +fbnic_phylink_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface) { - struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); struct fbnic_dev *fbd = fbn->fbd; - return fbd->mac->pcs_enable(fbd); + fbd->mac->prepare(fbd, fbn->aui, fbn->fec); + + return 0; } static void -fbnic_phylink_pcs_disable(struct phylink_pcs *pcs) +fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); - struct fbnic_dev *fbd = fbn->fbd; - - return fbd->mac->pcs_disable(fbd); } static int -fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, - phy_interface_t interface, - const unsigned long *advertising, - bool permit_pause_to_mac) -{ - return 0; -} - -static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = { - .pcs_config = fbnic_phylink_pcs_config, - .pcs_enable = fbnic_phylink_pcs_enable, - .pcs_disable = fbnic_phylink_pcs_disable, - .pcs_get_state = fbnic_phylink_pcs_get_state, -}; - -static struct phylink_pcs * -fbnic_phylink_mac_select_pcs(struct phylink_config *config, - phy_interface_t interface) +fbnic_phylink_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t iface) { struct net_device *netdev = to_net_dev(config->dev); struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; - return &fbn->phylink_pcs; -} + /* Retest the link state and restart interrupts */ + fbd->mac->get_link(fbd, fbn->aui, fbn->fec); -static void -fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode, - const struct phylink_link_state *state) -{ + return 0; } static void @@ -208,23 +168,48 @@ fbnic_phylink_mac_link_up(struct phylink_config *config, struct fbnic_net *fbn = netdev_priv(netdev); struct fbnic_dev *fbd = fbn->fbd; + fbn->tx_pause = tx_pause; + fbnic_config_drop_mode(fbn, tx_pause); + fbd->mac->link_up(fbd, tx_pause, rx_pause); } static const struct phylink_mac_ops fbnic_phylink_mac_ops = { .mac_select_pcs = fbnic_phylink_mac_select_pcs, + .mac_prepare = fbnic_phylink_mac_prepare, .mac_config = fbnic_phylink_mac_config, + .mac_finish = fbnic_phylink_mac_finish, .mac_link_down = fbnic_phylink_mac_link_down, .mac_link_up = fbnic_phylink_mac_link_up, }; -int fbnic_phylink_init(struct net_device *netdev) +/** + * fbnic_phylink_create - Phylink device creation + * @netdev: Network Device struct to attach phylink device + * + * Initialize and attach a phylink instance to the device. The phylink + * device will make use of the netdev struct to track carrier and will + * eventually be used to expose the current state of the MAC and PCS + * setup. + * + * Return: 0 on success, negative on failure + **/ +int fbnic_phylink_create(struct net_device *netdev) { struct fbnic_net *fbn = netdev_priv(netdev); struct fbnic_dev *fbd = fbn->fbd; + struct phylink_pcs *pcs; struct phylink *phylink; + int err; + + pcs = xpcs_create_pcs_mdiodev(fbd->mdio_bus, 0); + if (IS_ERR(pcs)) { + err = PTR_ERR(pcs); + dev_err(fbd->dev, "Failed to create PCS device: %d\n", err); + return err; + } - fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops; + fbn->pcs = pcs; fbn->phylink_config.dev = &netdev->dev; fbn->phylink_config.type = PHYLINK_NETDEV; @@ -247,10 +232,80 @@ int fbnic_phylink_init(struct net_device *netdev) phylink = phylink_create(&fbn->phylink_config, NULL, fbnic_phylink_select_interface(fbn->aui), &fbnic_phylink_mac_ops); - if (IS_ERR(phylink)) - return PTR_ERR(phylink); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + dev_err(netdev->dev.parent, + "Failed to create Phylink interface, err: %d\n", err); + xpcs_destroy_pcs(pcs); + return err; + } fbn->phylink = phylink; return 0; } + +/** + * fbnic_phylink_destroy - Teardown phylink related interfaces + * @netdev: Network Device struct containing phylink device + * + * Detach and free resources related to phylink interface. + **/ +void fbnic_phylink_destroy(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + if (fbn->phylink) + phylink_destroy(fbn->phylink); + if (fbn->pcs) + xpcs_destroy_pcs(fbn->pcs); +} + +/** + * fbnic_phylink_pmd_training_complete_notify - PMD training complete notifier + * @netdev: Netdev struct phylink device attached to + * + * When the link first comes up the PMD will have a period of 2 to 3 seconds + * where the link will flutter due to link training. To avoid spamming the + * kernel log with messages about this we add a delay of 4 seconds from the + * time of the last PCS report of link so that we can guarantee we are unlikely + * to see any further link loss events due to link training. + **/ +void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + if (fbd->pmd_state != FBNIC_PMD_TRAINING) + return; + + /* Prevent reading end_of_pmd_training until we verified state */ + smp_rmb(); + + if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies)) + return; + + /* At this point we have verified that the link has been up for + * the full training duration. As a first step we will try + * transitioning to link ready. + */ + if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_TRAINING, + FBNIC_PMD_LINK_READY) != FBNIC_PMD_TRAINING) + return; + + /* Perform a follow-up check to verify that the link didn't flap + * just before our transition by rechecking the training timer. + */ + if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies)) + return; + + /* The training timeout has been completed. We are good to swap out + * link_ready for send_data assuming no other events have occurred + * that would have pulled us back into initialization or training. + */ + if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_LINK_READY, + FBNIC_PMD_SEND_DATA) != FBNIC_PMD_LINK_READY) + return; + + phylink_pcs_change(fbn->pcs, false); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_time.c b/drivers/net/ethernet/meta/fbnic/fbnic_time.c index 39d99677b71e..db7748189f45 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_time.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_time.c @@ -253,7 +253,7 @@ static void fbnic_ptp_reset(struct fbnic_dev *fbd) void fbnic_time_init(struct fbnic_net *fbn) { - /* This is not really a statistic, but the lockng primitive fits + /* This is not really a statistic, but the locking primitive fits * our usecase perfectly, we need an atomic 8 bytes READ_ONCE() / * WRITE_ONCE() behavior. */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h index c34bf87eeec9..3508b46ebdd0 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h @@ -80,7 +80,7 @@ struct fbnic_tlv_index { enum fbnic_tlv_type type; }; -#define TLV_MAX_DATA (PAGE_SIZE - 512) +#define TLV_MAX_DATA ((PAGE_SIZE - 512) & 0xFFFF) #define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX #define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING } #define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index b1e8ce89870f..13d508ce637f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -653,7 +653,8 @@ static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct, FBNIC_TWD_TYPE_AL; total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd); - page_pool_put_page(page->pp, page, -1, pp_allow_direct); + page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, -1, + pp_allow_direct); next_desc: head++; head &= ring->size_mask; @@ -887,6 +888,7 @@ static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem) *bdq_desc = cpu_to_le64(bd); bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) | FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1); + bdq_desc++; } while (--i); } @@ -1806,7 +1808,7 @@ int fbnic_alloc_napi_vectors(struct fbnic_net *fbn) free_vectors: fbnic_free_napi_vectors(fbn); - return -ENOMEM; + return err; } static void fbnic_free_ring_resources(struct device *dev, @@ -2573,11 +2575,15 @@ write_ctl: } static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv, - struct fbnic_ring *rcq) + struct fbnic_ring *rcq, bool tx_pause) { + struct fbnic_net *fbn = netdev_priv(nv->napi.dev); u32 drop_mode, rcq_ctl; - drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE; + if (!tx_pause && fbn->num_rx_queues > 1) + drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE; + else + drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_NEVER; /* Specify packet layout */ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) | @@ -2587,6 +2593,21 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv, fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl); } +void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause) +{ + int i, t; + + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + + for (t = 0; t < nv->rxt_count; t++) { + struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t]; + + fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause); + } + } +} + static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc) { u32 threshold; @@ -2636,7 +2657,7 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, u32 hds_thresh = fbn->hds_thresh; u32 rcq_ctl = 0; - fbnic_config_drop_mode_rcq(nv, rcq); + fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause); /* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should * be split at L4. It would also result in the frames being split at @@ -2699,7 +2720,6 @@ static void __fbnic_nv_enable(struct fbnic_napi_vector *nv) &nv->napi); fbnic_enable_bdq(&qt->sub0, &qt->sub1); - fbnic_config_drop_mode_rcq(nv, &qt->cmpl); fbnic_enable_rcq(nv, &qt->cmpl); } } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index ca37da5a0b17..27776e844e29 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -184,6 +184,7 @@ void fbnic_reset_netif_queues(struct fbnic_net *fbn); irqreturn_t fbnic_msix_clean_rings(int irq, void *data); void fbnic_napi_enable(struct fbnic_net *fbn); void fbnic_napi_disable(struct fbnic_net *fbn); +void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause); void fbnic_enable(struct fbnic_net *fbn); void fbnic_disable(struct fbnic_net *fbn); void fbnic_flush(struct fbnic_net *fbn); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 40b1bfc600a7..582145713cfd 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -395,6 +395,8 @@ static int sparx5_create_port(struct sparx5 *sparx5, spx5_port->phylink = phylink; + spx5_port->ndev->dev.of_node = spx5_port->of_node; + return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 43f034e180c4..efb4e412ec7e 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -15,6 +15,20 @@ struct dentry *mana_debugfs_root; +struct mana_dev_recovery { + struct list_head list; + struct pci_dev *pdev; + enum gdma_eqe_type type; +}; + +static struct mana_dev_recovery_work { + struct list_head dev_list; + struct delayed_work work; + + /* Lock for dev_list above */ + spinlock_t lock; +} mana_dev_recovery_work; + static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { return readl(g->bar0_va + offset); @@ -387,6 +401,25 @@ EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA"); #define MANA_SERVICE_PERIOD 10 +static void mana_serv_rescan(struct pci_dev *pdev) +{ + struct pci_bus *parent; + + pci_lock_rescan_remove(); + + parent = pdev->bus; + if (!parent) { + dev_err(&pdev->dev, "MANA service: no parent bus\n"); + goto out; + } + + pci_stop_and_remove_bus_device(pdev); + pci_rescan_bus(parent); + +out: + pci_unlock_rescan_remove(); +} + static void mana_serv_fpga(struct pci_dev *pdev) { struct pci_bus *bus, *parent; @@ -419,9 +452,12 @@ static void mana_serv_reset(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); struct hw_channel_context *hwc; + int ret; if (!gc) { - dev_err(&pdev->dev, "MANA service: no GC\n"); + /* Perform PCI rescan on device if GC is not set up */ + dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n"); + mana_serv_rescan(pdev); return; } @@ -440,9 +476,18 @@ static void mana_serv_reset(struct pci_dev *pdev) msleep(MANA_SERVICE_PERIOD * 1000); - mana_gd_resume(pdev); + ret = mana_gd_resume(pdev); + if (ret == -ETIMEDOUT || ret == -EPROTO) { + /* Perform PCI rescan on device if we failed on HWC */ + dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n"); + mana_serv_rescan(pdev); + goto out; + } - dev_info(&pdev->dev, "MANA reset cycle completed\n"); + if (ret) + dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret); + else + dev_info(&pdev->dev, "MANA reset cycle completed\n"); out: gc->in_service = false; @@ -454,18 +499,9 @@ struct mana_serv_work { enum gdma_eqe_type type; }; -static void mana_serv_func(struct work_struct *w) +static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev) { - struct mana_serv_work *mns_wk; - struct pci_dev *pdev; - - mns_wk = container_of(w, struct mana_serv_work, serv_work); - pdev = mns_wk->pdev; - - if (!pdev) - goto out; - - switch (mns_wk->type) { + switch (type) { case GDMA_EQE_HWC_FPGA_RECONFIG: mana_serv_fpga(pdev); break; @@ -475,12 +511,48 @@ static void mana_serv_func(struct work_struct *w) break; default: - dev_err(&pdev->dev, "MANA service: unknown type %d\n", - mns_wk->type); + dev_err(&pdev->dev, "MANA service: unknown type %d\n", type); break; } +} + +static void mana_recovery_delayed_func(struct work_struct *w) +{ + struct mana_dev_recovery_work *work; + struct mana_dev_recovery *dev; + unsigned long flags; + + work = container_of(w, struct mana_dev_recovery_work, work.work); + + spin_lock_irqsave(&work->lock, flags); + + while (!list_empty(&work->dev_list)) { + dev = list_first_entry(&work->dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + spin_unlock_irqrestore(&work->lock, flags); + + mana_do_service(dev->type, dev->pdev); + pci_dev_put(dev->pdev); + kfree(dev); + + spin_lock_irqsave(&work->lock, flags); + } + + spin_unlock_irqrestore(&work->lock, flags); +} + +static void mana_serv_func(struct work_struct *w) +{ + struct mana_serv_work *mns_wk; + struct pci_dev *pdev; + + mns_wk = container_of(w, struct mana_serv_work, serv_work); + pdev = mns_wk->pdev; + + if (pdev) + mana_do_service(mns_wk->type, pdev); -out: pci_dev_put(pdev); kfree(mns_wk); module_put(THIS_MODULE); @@ -528,6 +600,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_INIT_DONE: case GDMA_EQE_HWC_SOC_SERVICE: case GDMA_EQE_RNIC_QP_FATAL: + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: if (!eq->eq.callback) break; @@ -540,6 +613,17 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_RESET_REQUEST: dev_info(gc->dev, "Recv MANA service type:%d\n", type); + if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + /* + * Device is in probe and we received a hardware reset + * event, the probe function will detect that the flag + * has changed and perform service procedure. + */ + dev_info(gc->dev, + "Service is to be processed in probe\n"); + break; + } + if (gc->in_service) { dev_info(gc->dev, "Already in service\n"); break; @@ -1299,7 +1383,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq, struct gdma_posted_wqe_info *wqe_info) { u32 client_oob_size = wqe_req->inline_oob_size; - struct gdma_context *gc; u32 sgl_data_size; u32 max_wqe_size; u32 wqe_size; @@ -1329,11 +1412,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq, if (wqe_size > max_wqe_size) return -EINVAL; - if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { - gc = wq->gdma_dev->gdma_context; - dev_err(gc->dev, "unsuccessful flow control!\n"); + if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) return -ENOSPC; - } if (wqe_info) wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; @@ -1941,8 +2021,19 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_mana; + /* + * If a hardware reset event has occurred over HWC during probe, + * rollback and perform hardware reset procedure. + */ + if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + err = -EPROTO; + goto cleanup_mana_rdma; + } + return 0; +cleanup_mana_rdma: + mana_rdma_remove(&gc->mana_ib); cleanup_mana: mana_remove(&gc->mana, false); cleanup_gd: @@ -1966,6 +2057,35 @@ release_region: disable_dev: pci_disable_device(pdev); dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + + /* + * Hardware could be in recovery mode and the HWC returns TIMEDOUT or + * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or + * we received a hardware reset event over HWC interrupt. In this case, + * perform the device recovery procedure after MANA_SERVICE_PERIOD + * seconds. + */ + if (err == -ETIMEDOUT || err == -EPROTO) { + struct mana_dev_recovery *dev; + unsigned long flags; + + dev_info(&pdev->dev, "Start MANA recovery mode\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return err; + + dev->pdev = pci_dev_get(pdev); + dev->type = GDMA_EQE_HWC_RESET_REQUEST; + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list); + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + + schedule_delayed_work(&mana_dev_recovery_work.work, + secs_to_jiffies(MANA_SERVICE_PERIOD)); + } + return err; } @@ -2070,6 +2190,10 @@ static int __init mana_driver_init(void) { int err; + INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list); + spin_lock_init(&mana_dev_recovery_work.lock); + INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func); + mana_debugfs_root = debugfs_create_dir("mana", NULL); err = pci_register_driver(&mana_driver); @@ -2083,6 +2207,21 @@ static int __init mana_driver_init(void) static void __exit mana_driver_exit(void) { + struct mana_dev_recovery *dev; + unsigned long flags; + + disable_delayed_work_sync(&mana_dev_recovery_work.work); + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + while (!list_empty(&mana_dev_recovery_work.dev_list)) { + dev = list_first_entry(&mana_dev_recovery_work.dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + pci_dev_put(dev->pdev); + kfree(dev); + } + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + pci_unregister_driver(&mana_driver); debugfs_remove(mana_debugfs_root); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index ada6c78a2bef..aa4e2731e2ba 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -118,6 +118,7 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, struct gdma_dev *gd = hwc->gdma_dev; union hwc_init_type_data type_data; union hwc_init_eq_id_db eq_db; + struct mana_context *ac; u32 type, val; int ret; @@ -196,6 +197,17 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, hwc->hwc_timeout = val; break; + case HWC_DATA_HW_LINK_CONNECT: + case HWC_DATA_HW_LINK_DISCONNECT: + ac = gd->gdma_context->mana.driver_data; + if (!ac) + break; + + WRITE_ONCE(ac->link_event, type); + schedule_work(&ac->link_change_work); + + break; + default: dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type); break; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 0142fd98392c..1ad154f9db1a 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -11,6 +11,7 @@ #include <linux/mm.h> #include <linux/pci.h> #include <linux/export.h> +#include <linux/skbuff.h> #include <net/checksum.h> #include <net/ip6_checksum.h> @@ -20,6 +21,7 @@ #include <net/mana/mana.h> #include <net/mana/mana_auxiliary.h> +#include <net/mana/hw_channel.h> static DEFINE_IDA(mana_adev_ida); @@ -84,7 +86,6 @@ static int mana_open(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); netdev_dbg(ndev, "%s successful\n", __func__); return 0; @@ -100,6 +101,46 @@ static int mana_close(struct net_device *ndev) return mana_detach(ndev, true); } +static void mana_link_state_handle(struct work_struct *w) +{ + struct mana_context *ac; + struct net_device *ndev; + u32 link_event; + bool link_up; + int i; + + ac = container_of(w, struct mana_context, link_change_work); + + rtnl_lock(); + + link_event = READ_ONCE(ac->link_event); + + if (link_event == HWC_DATA_HW_LINK_CONNECT) + link_up = true; + else if (link_event == HWC_DATA_HW_LINK_DISCONNECT) + link_up = false; + else + goto out; + + /* Process all ports */ + for (i = 0; i < ac->num_ports; i++) { + ndev = ac->ports[i]; + if (!ndev) + continue; + + if (link_up) { + netif_carrier_on(ndev); + + __netdev_notify_peers(ndev); + } else { + netif_carrier_off(ndev); + } + } + +out: + rtnl_unlock(); +} + static bool mana_can_tx(struct gdma_queue *wq) { return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; @@ -289,6 +330,21 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) cq = &apc->tx_qp[txq_idx].tx_cq; tx_stats = &txq->stats; + BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES); + if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES && + skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { + /* GSO skb with Hardware SGE limit exceeded is not expected here + * as they are handled in mana_features_check() callback + */ + if (skb_linearize(skb)) { + netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n", + skb_shinfo(skb)->nr_frags, + skb_is_gso(skb)); + goto tx_drop_count; + } + apc->eth_stats.tx_linear_pkt_cnt++; + } + pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; @@ -402,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { pkg.wqe_req.sgl = pkg.sgl_array; } else { @@ -438,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (err) { (void)skb_dequeue_tail(&txq->pending_skbs); + mana_unmap_skb(skb, apc); netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); - err = NETDEV_TX_BUSY; - goto tx_busy; + goto free_sgl_ptr; } err = NETDEV_TX_OK; @@ -460,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs); u64_stats_update_end(&tx_stats->syncp); -tx_busy: if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { netif_tx_wake_queue(net_txq); apc->eth_stats.wake_queue++; @@ -478,6 +531,25 @@ tx_drop: return NETDEV_TX_OK; } +#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) +static netdev_features_t mana_features_check(struct sk_buff *skb, + struct net_device *ndev, + netdev_features_t features) +{ + if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { + /* Exceeds HW SGE limit. + * GSO case: + * Disable GSO so the stack will software-segment the skb + * into smaller skbs that fit the SGE budget. + * Non-GSO case: + * The xmit path will attempt skb_linearize() as a fallback. + */ + features &= ~NETIF_F_GSO_MASK; + } + return features; +} +#endif + static void mana_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *st) { @@ -494,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev, netdev_stats_to_stats64(st, &ndev->stats); + if (apc->ac->hwc_timeout_occurred) + netdev_warn_once(ndev, "HWC timeout occurred\n"); + + st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe; + for (q = 0; q < num_queues; q++) { rx_stats = &apc->rxqs[q]->stats; @@ -814,7 +891,7 @@ static int mana_shaper_del(struct net_shaper_binding *binding, /* Reset mana port context parameters */ apc->handle.id = 0; apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; - apc->speed = 0; + apc->speed = apc->max_speed; } return err; @@ -838,6 +915,9 @@ static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, .ndo_select_queue = mana_select_queue, +#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) + .ndo_features_check = mana_features_check, +#endif .ndo_start_xmit = mana_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = mana_get_stats64, @@ -1606,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) return 0; } -static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; @@ -2769,11 +2849,12 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, return 0; } -void mana_query_gf_stats(struct mana_port_context *apc) +int mana_query_gf_stats(struct mana_context *ac) { + struct gdma_context *gc = ac->gdma_dev->gdma_context; struct mana_query_gf_stat_resp resp = {}; struct mana_query_gf_stat_req req = {}; - struct net_device *ndev = apc->ndev; + struct device *dev = gc->dev; int err; mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, @@ -2807,52 +2888,54 @@ void mana_query_gf_stats(struct mana_port_context *apc) STATISTICS_FLAGS_HC_TX_BCAST_BYTES | STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR; - err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); if (err) { - netdev_err(ndev, "Failed to query GF stats: %d\n", err); - return; + dev_err(dev, "Failed to query GF stats: %d\n", err); + return err; } err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, sizeof(resp)); if (err || resp.hdr.status) { - netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err, - resp.hdr.status); - return; + dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err, + resp.hdr.status); + return err; } - apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; - apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; - apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; - apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; - apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; - apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; - apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; - apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; - apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; - apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; - apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; - apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = + ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; + ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; + ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes; + ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; + ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; + ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; + ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; + ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; + ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; + ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; + ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; + ac->hc_stats.hc_tx_err_inval_vportoffset_pkt = resp.tx_err_inval_vport_offset_pkt; - apc->eth_stats.hc_tx_err_vlan_enforcement = + ac->hc_stats.hc_tx_err_vlan_enforcement = resp.tx_err_vlan_enforcement; - apc->eth_stats.hc_tx_err_eth_type_enforcement = + ac->hc_stats.hc_tx_err_eth_type_enforcement = resp.tx_err_ethtype_enforcement; - apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; - apc->eth_stats.hc_tx_err_sqpdid_enforcement = + ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; + ac->hc_stats.hc_tx_err_sqpdid_enforcement = resp.tx_err_SQPDID_enforcement; - apc->eth_stats.hc_tx_err_cqpdid_enforcement = + ac->hc_stats.hc_tx_err_cqpdid_enforcement = resp.tx_err_CQPDID_enforcement; - apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; - apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; - apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; - apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; - apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; - apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; - apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; - apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; - apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; - apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; + ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; + ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; + ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes; + ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; + ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; + ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; + ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; + ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; + ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; + ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma; + + return 0; } void mana_query_phy_stats(struct mana_port_context *apc) @@ -3059,9 +3142,6 @@ int mana_attach(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - if (apc->port_is_up) - netif_carrier_on(ndev); - netif_device_attach(ndev); return 0; @@ -3154,7 +3234,6 @@ int mana_detach(struct net_device *ndev, bool from_close) smp_wmb(); netif_tx_disable(ndev); - netif_carrier_off(ndev); if (apc->port_st_save) { err = mana_dealloc_queues(ndev); @@ -3243,6 +3322,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, goto free_indir; } + netif_carrier_on(ndev); + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); return 0; @@ -3389,6 +3470,24 @@ int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type even return 0; } +#define MANA_GF_STATS_PERIOD (2 * HZ) + +static void mana_gf_stats_work_handler(struct work_struct *work) +{ + struct mana_context *ac = + container_of(to_delayed_work(work), struct mana_context, gf_stats_work); + int err; + + err = mana_query_gf_stats(ac); + if (err == -ETIMEDOUT) { + /* HWC timeout detected - reset stats and stop rescheduling */ + ac->hwc_timeout_occurred = true; + memset(&ac->hc_stats, 0, sizeof(ac->hc_stats)); + return; + } + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); +} + int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; @@ -3431,6 +3530,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) if (!resuming) { ac->num_ports = num_ports; + + INIT_WORK(&ac->link_change_work, mana_link_state_handle); } else { if (ac->num_ports != num_ports) { dev_err(dev, "The number of vPorts changed: %d->%d\n", @@ -3438,6 +3539,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) err = -EPROTO; goto out; } + + enable_work(&ac->link_change_work); } if (ac->num_ports == 0) @@ -3477,6 +3580,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } err = add_adev(gd, "eth"); + + INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler); + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); + out: if (err) { mana_remove(gd, false); @@ -3500,6 +3607,9 @@ void mana_remove(struct gdma_dev *gd, bool suspending) int err; int i; + disable_work_sync(&ac->link_change_work); + cancel_delayed_work_sync(&ac->gf_stats_work); + /* adev currently doesn't support suspending, always remove it */ if (gd->adev) remove_adev(gd); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index a1afa75a9463..0e2f4343ac67 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -15,66 +15,71 @@ struct mana_stats_desc { static const struct mana_stats_desc mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, - {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats, + {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, + {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + tx_cqe_unknown_type)}, + {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats, + tx_linear_pkt_cnt)}, + {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, + rx_coalesced_err)}, + {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + rx_cqe_unknown_type)}, +}; + +static const struct mana_stats_desc mana_hc_stats[] = { + {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats, hc_rx_discards_no_wqe)}, - {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats, + {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, hc_rx_err_vport_disabled)}, - {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)}, - {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)}, + {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_rx_ucast_pkts)}, - {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_ucast_bytes)}, - {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_rx_bcast_pkts)}, - {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bcast_bytes)}, - {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats, - hc_rx_mcast_pkts)}, - {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, + hc_rx_mcast_pkts)}, + {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_mcast_bytes)}, - {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_gf_disabled)}, - {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_vport_disabled)}, {"hc_tx_err_inval_vportoffset_pkt", - offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_inval_vportoffset_pkt)}, - {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_vlan_enforcement)}, {"hc_tx_err_eth_type_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)}, - {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)}, + {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sa_enforcement)}, {"hc_tx_err_sqpdid_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)}, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)}, {"hc_tx_err_cqpdid_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)}, - {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)}, + {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_mtu_violation)}, - {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_inval_oob)}, - {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_gdma)}, - {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, - {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)}, + {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_ucast_pkts)}, - {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_ucast_bytes)}, - {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_bcast_pkts)}, - {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bcast_bytes)}, - {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_mcast_pkts)}, - {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_mcast_bytes)}, - {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, - {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, - tx_cqe_unknown_type)}, - {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, - rx_coalesced_err)}, - {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, - rx_cqe_unknown_type)}, }; static const struct mana_stats_desc mana_phy_stats[] = { @@ -138,7 +143,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset) if (stringset != ETH_SS_STATS) return -EINVAL; - return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + + return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) + num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); } @@ -150,10 +155,12 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) ethtool_puts(&data, mana_eth_stats[i].name); + for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++) + ethtool_puts(&data, mana_hc_stats[i].name); + for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++) ethtool_puts(&data, mana_phy_stats[i].name); @@ -186,6 +193,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev, struct mana_port_context *apc = netdev_priv(ndev); unsigned int num_queues = apc->num_queues; void *eth_stats = &apc->eth_stats; + void *hc_stats = &apc->ac->hc_stats; void *phy_stats = &apc->phy_stats; struct mana_stats_rx *rx_stats; struct mana_stats_tx *tx_stats; @@ -207,8 +215,6 @@ static void mana_get_ethtool_stats(struct net_device *ndev, if (!apc->port_is_up) return; - /* we call mana function to update stats from GDMA */ - mana_query_gf_stats(apc); /* We call this mana function to get the phy stats from GDMA and includes * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause @@ -219,6 +225,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev, for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); + for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++) + data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset); + for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++) data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset); diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig new file mode 100644 index 000000000000..0b3e853d625f --- /dev/null +++ b/drivers/net/ethernet/mucse/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Mucse network device configuration +# + +config NET_VENDOR_MUCSE + bool "Mucse devices" + default y + help + If you have a network (Ethernet) card from Mucse(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mucse(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_MUCSE + +config MGBE + tristate "Mucse(R) 1GbE PCI Express adapters support" + depends on PCI + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + <file:Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst>. + + To compile this driver as a module, choose M here. The module + will be called rnpgbe. + +endif # NET_VENDOR_MUCSE + diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile new file mode 100644 index 000000000000..675173fa05f7 --- /dev/null +++ b/drivers/net/ethernet/mucse/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2020 - 2025 MUCSE Corporation. +# +# Makefile for the MUCSE(R) network device drivers +# + +obj-$(CONFIG_MGBE) += rnpgbe/ diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile new file mode 100644 index 000000000000..de8bcb7772ab --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2020 - 2025 MUCSE Corporation. +# +# Makefile for the MUCSE(R) 1GbE PCI Express ethernet driver +# + +obj-$(CONFIG_MGBE) += rnpgbe.o +rnpgbe-objs := rnpgbe_main.o\ + rnpgbe_chip.o\ + rnpgbe_mbx.o\ + rnpgbe_mbx_fw.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h new file mode 100644 index 000000000000..5b024f9f7e17 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_H +#define _RNPGBE_H + +#include <linux/types.h> +#include <linux/mutex.h> + +enum rnpgbe_boards { + board_n500, + board_n210 +}; + +struct mucse_mbx_info { + u32 timeout_us; + u32 delay_us; + u16 fw_req; + u16 fw_ack; + /* lock for only one use mbx */ + struct mutex lock; + /* fw <--> pf mbx */ + u32 fwpf_shm_base; + u32 pf2fw_mbx_ctrl; + u32 fwpf_mbx_mask; + u32 fwpf_ctrl_base; +}; + +/* Enum for firmware notification modes, + * more modes (e.g., portup, link_report) will be added in future + **/ +enum { + mucse_fw_powerup, +}; + +struct mucse_hw { + void __iomem *hw_addr; + struct pci_dev *pdev; + struct mucse_mbx_info mbx; + int port; + u8 pfvfnum; +}; + +struct mucse_stats { + u64 tx_dropped; +}; + +struct mucse { + struct net_device *netdev; + struct pci_dev *pdev; + struct mucse_hw hw; + struct mucse_stats stats; +}; + +int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr); +int rnpgbe_reset_hw(struct mucse_hw *hw); +int rnpgbe_send_notify(struct mucse_hw *hw, + bool enable, + int mode); +int rnpgbe_init_hw(struct mucse_hw *hw, int board_type); + +/* Device IDs */ +#define PCI_VENDOR_ID_MUCSE 0x8848 +#define RNPGBE_DEVICE_ID_N500_QUAD_PORT 0x8308 +#define RNPGBE_DEVICE_ID_N500_DUAL_PORT 0x8318 +#define RNPGBE_DEVICE_ID_N210 0x8208 +#define RNPGBE_DEVICE_ID_N210L 0x820a + +#define mucse_hw_wr32(hw, reg, val) \ + writel((val), (hw)->hw_addr + (reg)) +#endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c new file mode 100644 index 000000000000..ebc7b3750157 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> + +#include "rnpgbe.h" +#include "rnpgbe_hw.h" +#include "rnpgbe_mbx.h" +#include "rnpgbe_mbx_fw.h" + +/** + * rnpgbe_get_permanent_mac - Get permanent mac + * @hw: hw information structure + * @perm_addr: pointer to store perm_addr + * + * rnpgbe_get_permanent_mac tries to get mac from hw + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_get_permanent_mac(struct mucse_hw *hw, u8 *perm_addr) +{ + struct device *dev = &hw->pdev->dev; + int err; + + err = mucse_mbx_get_macaddr(hw, hw->pfvfnum, perm_addr, hw->port); + if (err) { + dev_err(dev, "Failed to get MAC from FW %d\n", err); + return err; + } + + if (!is_valid_ether_addr(perm_addr)) { + dev_err(dev, "Failed to get valid MAC from FW\n"); + return -EINVAL; + } + + return 0; +} + +/** + * rnpgbe_reset_hw - Do a hardware reset + * @hw: hw information structure + * + * rnpgbe_reset_hw calls fw to do a hardware + * reset, and cleans some regs to default. + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_reset_hw(struct mucse_hw *hw) +{ + mucse_hw_wr32(hw, RNPGBE_DMA_AXI_EN, 0); + return mucse_mbx_reset_hw(hw); +} + +/** + * rnpgbe_send_notify - Echo fw status + * @hw: hw information structure + * @enable: true or false status + * @mode: status mode + * + * Return: 0 on success, negative errno on failure + **/ +int rnpgbe_send_notify(struct mucse_hw *hw, + bool enable, + int mode) +{ + int err; + /* Keep switch struct to support more modes in the future */ + switch (mode) { + case mucse_fw_powerup: + err = mucse_mbx_powerup(hw, enable); + break; + default: + err = -EINVAL; + } + + return err; +} + +/** + * rnpgbe_init_n500 - Setup n500 hw info + * @hw: hw information structure + * + * rnpgbe_init_n500 initializes all private + * structure for n500 + **/ +static void rnpgbe_init_n500(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->fwpf_ctrl_base = MUCSE_N500_FWPF_CTRL_BASE; + mbx->fwpf_shm_base = MUCSE_N500_FWPF_SHM_BASE; +} + +/** + * rnpgbe_init_n210 - Setup n210 hw info + * @hw: hw information structure + * + * rnpgbe_init_n210 initializes all private + * structure for n210 + **/ +static void rnpgbe_init_n210(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->fwpf_ctrl_base = MUCSE_N210_FWPF_CTRL_BASE; + mbx->fwpf_shm_base = MUCSE_N210_FWPF_SHM_BASE; +} + +/** + * rnpgbe_init_hw - Setup hw info according to board_type + * @hw: hw information structure + * @board_type: board type + * + * rnpgbe_init_hw initializes all hw data + * + * Return: 0 on success, -EINVAL on failure + **/ +int rnpgbe_init_hw(struct mucse_hw *hw, int board_type) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + hw->port = 0; + + mbx->pf2fw_mbx_ctrl = MUCSE_GBE_PFFW_MBX_CTRL_OFFSET; + mbx->fwpf_mbx_mask = MUCSE_GBE_FWPF_MBX_MASK_OFFSET; + + switch (board_type) { + case board_n500: + rnpgbe_init_n500(hw); + break; + case board_n210: + rnpgbe_init_n210(hw); + break; + default: + return -EINVAL; + } + /* init_params with mbx base */ + mucse_init_mbx_params_pf(hw); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h new file mode 100644 index 000000000000..e77e6bc3d3e3 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_HW_H +#define _RNPGBE_HW_H + +#define MUCSE_N500_FWPF_CTRL_BASE 0x28b00 +#define MUCSE_N500_FWPF_SHM_BASE 0x2d000 +#define MUCSE_GBE_PFFW_MBX_CTRL_OFFSET 0x5500 +#define MUCSE_GBE_FWPF_MBX_MASK_OFFSET 0x5700 +#define MUCSE_N210_FWPF_CTRL_BASE 0x29400 +#define MUCSE_N210_FWPF_SHM_BASE 0x2d900 + +#define RNPGBE_DMA_AXI_EN 0x0010 + +#define RNPGBE_MAX_QUEUES 8 +#endif /* _RNPGBE_HW_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c new file mode 100644 index 000000000000..316f941629d4 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/pci.h> +#include <net/rtnetlink.h> +#include <linux/etherdevice.h> + +#include "rnpgbe.h" +#include "rnpgbe_hw.h" +#include "rnpgbe_mbx_fw.h" + +static const char rnpgbe_driver_name[] = "rnpgbe"; + +/* rnpgbe_pci_tbl - PCI Device ID Table + * + * { PCI_VDEVICE(Vendor ID, Device ID), + * private_data (used for different hw chip) } + */ +static struct pci_device_id rnpgbe_pci_tbl[] = { + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210), board_n210 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N210L), board_n210 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_DUAL_PORT), board_n500 }, + { PCI_VDEVICE(MUCSE, RNPGBE_DEVICE_ID_N500_QUAD_PORT), board_n500 }, + /* required last entry */ + {0, }, +}; + +/** + * rnpgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). + * + * Return: 0 + **/ +static int rnpgbe_open(struct net_device *netdev) +{ + return 0; +} + +/** + * rnpgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * The close entry point is called when an interface is de-activated + * by the OS. + * + * Return: 0, this is not allowed to fail + **/ +static int rnpgbe_close(struct net_device *netdev) +{ + return 0; +} + +/** + * rnpgbe_xmit_frame - Send a skb to driver + * @skb: skb structure to be sent + * @netdev: network interface device structure + * + * Return: NETDEV_TX_OK + **/ +static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct mucse *mucse = netdev_priv(netdev); + + dev_kfree_skb_any(skb); + mucse->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static const struct net_device_ops rnpgbe_netdev_ops = { + .ndo_open = rnpgbe_open, + .ndo_stop = rnpgbe_close, + .ndo_start_xmit = rnpgbe_xmit_frame, +}; + +/** + * rnpgbe_add_adapter - Add netdev for this pci_dev + * @pdev: PCI device information structure + * @board_type: board type + * + * rnpgbe_add_adapter initializes a netdev for this pci_dev + * structure. Initializes Bar map, private structure, and a + * hardware reset occur. + * + * Return: 0 on success, negative errno on failure + **/ +static int rnpgbe_add_adapter(struct pci_dev *pdev, + int board_type) +{ + struct net_device *netdev; + u8 perm_addr[ETH_ALEN]; + void __iomem *hw_addr; + struct mucse *mucse; + struct mucse_hw *hw; + int err, err_notify; + + netdev = alloc_etherdev_mq(sizeof(struct mucse), RNPGBE_MAX_QUEUES); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + mucse = netdev_priv(netdev); + mucse->netdev = netdev; + mucse->pdev = pdev; + pci_set_drvdata(pdev, mucse); + + hw = &mucse->hw; + hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!hw_addr) { + err = -EIO; + goto err_free_net; + } + + hw->hw_addr = hw_addr; + hw->pdev = pdev; + + err = rnpgbe_init_hw(hw, board_type); + if (err) { + dev_err(&pdev->dev, "Init hw err %d\n", err); + goto err_free_net; + } + /* Step 1: Send power-up notification to firmware (no response expected) + * This informs firmware to initialize hardware power state, but + * firmware only acknowledges receipt without returning data. Must be + * done before synchronization as firmware may be in low-power idle + * state initially. + */ + err_notify = rnpgbe_send_notify(hw, true, mucse_fw_powerup); + if (err_notify) { + dev_warn(&pdev->dev, "Send powerup to hw failed %d\n", + err_notify); + dev_warn(&pdev->dev, "Maybe low performance\n"); + } + /* Step 2: Synchronize mailbox communication with firmware (requires + * response) After power-up, confirm firmware is ready to process + * requests with responses. This ensures subsequent request/response + * interactions work reliably. + */ + err = mucse_mbx_sync_fw(hw); + if (err) { + dev_err(&pdev->dev, "Sync fw failed! %d\n", err); + goto err_powerdown; + } + + netdev->netdev_ops = &rnpgbe_netdev_ops; + err = rnpgbe_reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "Hw reset failed %d\n", err); + goto err_powerdown; + } + + err = rnpgbe_get_permanent_mac(hw, perm_addr); + if (!err) { + eth_hw_addr_set(netdev, perm_addr); + } else if (err == -EINVAL) { + dev_warn(&pdev->dev, "Using random MAC\n"); + eth_hw_addr_random(netdev); + } else if (err) { + dev_err(&pdev->dev, "get perm_addr failed %d\n", err); + goto err_powerdown; + } + + err = register_netdev(netdev); + if (err) + goto err_powerdown; + + return 0; +err_powerdown: + /* notify powerdown only powerup ok */ + if (!err_notify) { + err_notify = rnpgbe_send_notify(hw, false, mucse_fw_powerup); + if (err_notify) + dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", + err_notify); + } +err_free_net: + free_netdev(netdev); + return err; +} + +/** + * rnpgbe_probe - Device initialization routine + * @pdev: PCI device information struct + * @id: entry in rnpgbe_pci_tbl + * + * rnpgbe_probe initializes a PF adapter identified by a pci_dev + * structure. + * + * Return: 0 on success, negative errno on failure + **/ +static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int board_type = id->driver_data; + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting %d\n", err); + goto err_disable_dev; + } + + err = pci_request_mem_regions(pdev, rnpgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_disable_dev; + } + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) { + dev_err(&pdev->dev, "pci_save_state failed %d\n", err); + goto err_free_regions; + } + + err = rnpgbe_add_adapter(pdev, board_type); + if (err) + goto err_free_regions; + + return 0; +err_free_regions: + pci_release_mem_regions(pdev); +err_disable_dev: + pci_disable_device(pdev); + return err; +} + +/** + * rnpgbe_rm_adapter - Remove netdev for this mucse structure + * @pdev: PCI device information struct + * + * rnpgbe_rm_adapter remove a netdev for this mucse structure + **/ +static void rnpgbe_rm_adapter(struct pci_dev *pdev) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + struct mucse_hw *hw = &mucse->hw; + struct net_device *netdev; + int err; + + if (!mucse) + return; + netdev = mucse->netdev; + unregister_netdev(netdev); + err = rnpgbe_send_notify(hw, false, mucse_fw_powerup); + if (err) + dev_warn(&pdev->dev, "Send powerdown to hw failed %d\n", err); + free_netdev(netdev); +} + +/** + * rnpgbe_remove - Device removal routine + * @pdev: PCI device information struct + * + * rnpgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnpgbe_remove(struct pci_dev *pdev) +{ + rnpgbe_rm_adapter(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +/** + * rnpgbe_dev_shutdown - Device shutdown routine + * @pdev: PCI device information struct + **/ +static void rnpgbe_dev_shutdown(struct pci_dev *pdev) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + struct net_device *netdev = mucse->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + rnpgbe_close(netdev); + rtnl_unlock(); + pci_disable_device(pdev); +} + +/** + * rnpgbe_shutdown - Device shutdown routine + * @pdev: PCI device information struct + * + * rnpgbe_shutdown is called by the PCI subsystem to alert the driver + * that os shutdown. Device should setup wakeup state here. + **/ +static void rnpgbe_shutdown(struct pci_dev *pdev) +{ + rnpgbe_dev_shutdown(pdev); +} + +static struct pci_driver rnpgbe_driver = { + .name = rnpgbe_driver_name, + .id_table = rnpgbe_pci_tbl, + .probe = rnpgbe_probe, + .remove = rnpgbe_remove, + .shutdown = rnpgbe_shutdown, +}; + +module_pci_driver(rnpgbe_driver); + +MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl); +MODULE_AUTHOR("Yibo Dong, <dong100@mucse.com>"); +MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c new file mode 100644 index 000000000000..de5e29230b3c --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2025 Mucse Corporation. */ + +#include <linux/errno.h> +#include <linux/bitfield.h> +#include <linux/iopoll.h> + +#include "rnpgbe_mbx.h" + +/** + * mbx_data_rd32 - Reads reg with base mbx->fwpf_shm_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * + * Return: register value + **/ +static u32 mbx_data_rd32(struct mucse_mbx_info *mbx, u32 reg) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + return readl(hw->hw_addr + mbx->fwpf_shm_base + reg); +} + +/** + * mbx_data_wr32 - Writes value to reg with base mbx->fwpf_shm_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * @value: value to be written + * + **/ +static void mbx_data_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + writel(value, hw->hw_addr + mbx->fwpf_shm_base + reg); +} + +/** + * mbx_ctrl_rd32 - Reads reg with base mbx->fwpf_ctrl_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * + * Return: register value + **/ +static u32 mbx_ctrl_rd32(struct mucse_mbx_info *mbx, u32 reg) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + return readl(hw->hw_addr + mbx->fwpf_ctrl_base + reg); +} + +/** + * mbx_ctrl_wr32 - Writes value to reg with base mbx->fwpf_ctrl_base + * @mbx: pointer to the MBX structure + * @reg: register offset + * @value: value to be written + * + **/ +static void mbx_ctrl_wr32(struct mucse_mbx_info *mbx, u32 reg, u32 value) +{ + struct mucse_hw *hw = container_of(mbx, struct mucse_hw, mbx); + + writel(value, hw->hw_addr + mbx->fwpf_ctrl_base + reg); +} + +/** + * mucse_mbx_get_lock_pf - Write ctrl and read back lock status + * @hw: pointer to the HW structure + * + * Return: register value after write + **/ +static u32 mucse_mbx_get_lock_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx); + + mbx_ctrl_wr32(mbx, reg, MUCSE_MBX_PFU); + + return mbx_ctrl_rd32(mbx, reg); +} + +/** + * mucse_obtain_mbx_lock_pf - Obtain mailbox lock + * @hw: pointer to the HW structure + * + * Pair with mucse_release_mbx_lock_pf() + * This function maybe used in an irq handler. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_obtain_mbx_lock_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 val; + + return read_poll_timeout_atomic(mucse_mbx_get_lock_pf, + val, val & MUCSE_MBX_PFU, + mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_release_mbx_lock_pf - Release mailbox lock + * @hw: pointer to the HW structure + * @req: send a request or not + * + * Pair with mucse_obtain_mbx_lock_pf(): + * - Releases the mailbox lock by clearing MUCSE_MBX_PFU bit + * - Simultaneously sends the request by setting MUCSE_MBX_REQ bit + * if req is true + * (Both bits are in the same mailbox control register, + * so operations are combined) + **/ +static void mucse_release_mbx_lock_pf(struct mucse_hw *hw, bool req) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg = MUCSE_MBX_PF2FW_CTRL(mbx); + + mbx_ctrl_wr32(mbx, reg, req ? MUCSE_MBX_REQ : 0); +} + +/** + * mucse_mbx_get_fwreq - Read fw req from reg + * @mbx: pointer to the mbx structure + * + * Return: the fwreq value + **/ +static u16 mucse_mbx_get_fwreq(struct mucse_mbx_info *mbx) +{ + u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + + return FIELD_GET(GENMASK_U32(15, 0), val); +} + +/** + * mucse_mbx_inc_pf_ack - Increase ack + * @hw: pointer to the HW structure + * + * mucse_mbx_inc_pf_ack reads pf_ack from hw, then writes + * new value back after increase + **/ +static void mucse_mbx_inc_pf_ack(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 ack; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT); + ack = FIELD_GET(GENMASK_U32(31, 16), val); + ack++; + val &= ~GENMASK_U32(31, 16); + val |= FIELD_PREP(GENMASK_U32(31, 16), ack); + mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val); +} + +/** + * mucse_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * mucse_read_mbx_pf copies a message from the mbx buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a fw request so no polling for message is needed. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_read_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size) +{ + const int size_in_words = size / sizeof(u32); + struct mucse_mbx_info *mbx = &hw->mbx; + int err; + + err = mucse_obtain_mbx_lock_pf(hw); + if (err) + return err; + + for (int i = 0; i < size_in_words; i++) + msg[i] = mbx_data_rd32(mbx, MUCSE_MBX_FWPF_SHM + 4 * i); + /* Hw needs write data_reg at last */ + mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM, 0); + /* flush reqs as we have read this request data */ + hw->mbx.fw_req = mucse_mbx_get_fwreq(mbx); + mucse_mbx_inc_pf_ack(hw); + mucse_release_mbx_lock_pf(hw, false); + + return 0; +} + +/** + * mucse_check_for_msg_pf - Check to see if the fw has sent mail + * @hw: pointer to the HW structure + * + * Return: 0 if the fw has set the Status bit or else -EIO + **/ +static int mucse_check_for_msg_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 fw_req; + + fw_req = mucse_mbx_get_fwreq(mbx); + /* chip's register is reset to 0 when rc send reset + * mbx command. Return -EIO if in this state, others + * fw == hw->mbx.fw_req means no new msg. + **/ + if (fw_req == 0 || fw_req == hw->mbx.fw_req) + return -EIO; + + return 0; +} + +/** + * mucse_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_poll_for_msg(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int val; + + return read_poll_timeout(mucse_check_for_msg_pf, + val, !val, mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_poll_and_read_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully received a message notification and + * copied it into the receive buffer, negative errno on failure + **/ +int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size) +{ + int err; + + err = mucse_poll_for_msg(hw); + if (err) + return err; + + return mucse_read_mbx_pf(hw, msg, size); +} + +/** + * mucse_mbx_get_fwack - Read fw ack from reg + * @mbx: pointer to the MBX structure + * + * Return: the fwack value + **/ +static u16 mucse_mbx_get_fwack(struct mucse_mbx_info *mbx) +{ + u32 val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + + return FIELD_GET(GENMASK_U32(31, 16), val); +} + +/** + * mucse_mbx_inc_pf_req - Increase req + * @hw: pointer to the HW structure + * + * mucse_mbx_inc_pf_req reads pf_req from hw, then writes + * new value back after increase + **/ +static void mucse_mbx_inc_pf_req(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 req; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_PF2FW_CNT); + req = FIELD_GET(GENMASK_U32(15, 0), val); + req++; + val &= ~GENMASK_U32(15, 0); + val |= FIELD_PREP(GENMASK_U32(15, 0), req); + mbx_data_wr32(mbx, MUCSE_MBX_PF2FW_CNT, val); +} + +/** + * mucse_write_mbx_pf - Place a message in the mailbox + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully copied message into the buffer, + * negative errno on failure + **/ +static int mucse_write_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size) +{ + const int size_in_words = size / sizeof(u32); + struct mucse_mbx_info *mbx = &hw->mbx; + int err; + + err = mucse_obtain_mbx_lock_pf(hw); + if (err) + return err; + + for (int i = 0; i < size_in_words; i++) + mbx_data_wr32(mbx, MUCSE_MBX_FWPF_SHM + i * 4, msg[i]); + + /* flush acks as we are overwriting the message buffer */ + hw->mbx.fw_ack = mucse_mbx_get_fwack(mbx); + mucse_mbx_inc_pf_req(hw); + mucse_release_mbx_lock_pf(hw, true); + + return 0; +} + +/** + * mucse_check_for_ack_pf - Check to see if the fw has ACKed + * @hw: pointer to the HW structure + * + * Return: 0 if the fw has set the Status bit or else -EIO + **/ +static int mucse_check_for_ack_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 fw_ack; + + fw_ack = mucse_mbx_get_fwack(mbx); + /* chip's register is reset to 0 when rc send reset + * mbx command. Return -EIO if in this state, others + * fw_ack == hw->mbx.fw_ack means no new ack. + **/ + if (fw_ack == 0 || fw_ack == hw->mbx.fw_ack) + return -EIO; + + return 0; +} + +/** + * mucse_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * + * Return: 0 if it successfully received a message acknowledgment, + * else negative errno + **/ +static int mucse_poll_for_ack(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int val; + + return read_poll_timeout(mucse_check_for_ack_pf, + val, !val, mbx->delay_us, + mbx->timeout_us, + false, hw); +} + +/** + * mucse_write_and_wait_ack_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: the message buffer + * @size: length of buffer + * + * Return: 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout_cnt period + **/ +int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size) +{ + int err; + + err = mucse_write_mbx_pf(hw, msg, size); + if (err) + return err; + + return mucse_poll_for_ack(hw); +} + +/** + * mucse_mbx_reset - Reset mbx info, sync info from regs + * @hw: pointer to the HW structure + * + * mucse_mbx_reset resets all mbx variables to default. + **/ +static void mucse_mbx_reset(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 val; + + val = mbx_data_rd32(mbx, MUCSE_MBX_FW2PF_CNT); + hw->mbx.fw_req = FIELD_GET(GENMASK_U32(15, 0), val); + hw->mbx.fw_ack = FIELD_GET(GENMASK_U32(31, 16), val); + mbx_ctrl_wr32(mbx, MUCSE_MBX_PF2FW_CTRL(mbx), 0); + mbx_ctrl_wr32(mbx, MUCSE_MBX_FWPF_MASK(mbx), GENMASK_U32(31, 16)); +} + +/** + * mucse_init_mbx_params_pf - Set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void mucse_init_mbx_params_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->delay_us = 100; + mbx->timeout_us = 4 * USEC_PER_SEC; + mutex_init(&mbx->lock); + mucse_mbx_reset(hw); +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h new file mode 100644 index 000000000000..e6fcc8d1d3ca --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_H +#define _RNPGBE_MBX_H + +#include "rnpgbe.h" + +#define MUCSE_MBX_FW2PF_CNT 0 +#define MUCSE_MBX_PF2FW_CNT 4 +#define MUCSE_MBX_FWPF_SHM 8 +#define MUCSE_MBX_PF2FW_CTRL(mbx) ((mbx)->pf2fw_mbx_ctrl) +#define MUCSE_MBX_FWPF_MASK(mbx) ((mbx)->fwpf_mbx_mask) +#define MUCSE_MBX_REQ BIT(0) /* Request a req to mailbox */ +#define MUCSE_MBX_PFU BIT(3) /* PF owns the mailbox buffer */ + +int mucse_write_and_wait_ack_mbx(struct mucse_hw *hw, u32 *msg, u16 size); +void mucse_init_mbx_params_pf(struct mucse_hw *hw); +int mucse_poll_and_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size); +#endif /* _RNPGBE_MBX_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c new file mode 100644 index 000000000000..8c8bd5e8e1db --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include <linux/if_ether.h> +#include <linux/bitfield.h> + +#include "rnpgbe.h" +#include "rnpgbe_mbx.h" +#include "rnpgbe_mbx_fw.h" + +/** + * mucse_fw_send_cmd_wait_resp - Send cmd req and wait for response + * @hw: pointer to the HW structure + * @req: pointer to the cmd req structure + * @reply: pointer to the fw reply structure + * + * mucse_fw_send_cmd_wait_resp sends req to pf-fw mailbox and wait + * reply from fw. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_fw_send_cmd_wait_resp(struct mucse_hw *hw, + struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int len = le16_to_cpu(req->datalen); + int retry_cnt = 3; + int err; + + mutex_lock(&hw->mbx.lock); + err = mucse_write_and_wait_ack_mbx(hw, (u32 *)req, len); + if (err) + goto out; + do { + err = mucse_poll_and_read_mbx(hw, (u32 *)reply, + sizeof(*reply)); + if (err) + goto out; + /* mucse_write_and_wait_ack_mbx return 0 means fw has + * received request, wait for the expect opcode + * reply with 'retry_cnt' times. + */ + } while (--retry_cnt >= 0 && reply->opcode != req->opcode); +out: + mutex_unlock(&hw->mbx.lock); + if (!err && retry_cnt < 0) + return -ETIMEDOUT; + if (!err && reply->error_code) + return -EIO; + + return err; +} + +/** + * mucse_mbx_get_info - Get hw info from fw + * @hw: pointer to the HW structure + * + * mucse_mbx_get_info tries to get hw info from hw. + * + * Return: 0 on success, negative errno on failure + **/ +static int mucse_mbx_get_info(struct mucse_hw *hw) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(GET_HW_INFO), + }; + struct mbx_fw_cmd_reply reply = {}; + int err; + + err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply); + if (!err) + hw->pfvfnum = FIELD_GET(GENMASK_U16(7, 0), + le16_to_cpu(reply.hw_info.pfnum)); + + return err; +} + +/** + * mucse_mbx_sync_fw - Try to sync with fw + * @hw: pointer to the HW structure + * + * mucse_mbx_sync_fw tries to sync with fw. It is only called in + * probe. Nothing (register network) todo if failed. + * Try more times to do sync. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_sync_fw(struct mucse_hw *hw) +{ + int try_cnt = 3; + int err; + + do { + err = mucse_mbx_get_info(hw); + } while (err == -ETIMEDOUT && try_cnt--); + + return err; +} + +/** + * mucse_mbx_powerup - Echo fw to powerup + * @hw: pointer to the HW structure + * @is_powerup: true for powerup, false for powerdown + * + * mucse_mbx_powerup echo fw to change working frequency + * to normal after received true, and reduce working frequency + * if false. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(sizeof(req.powerup) + + MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(POWER_UP), + .powerup = { + /* fw needs this to reply correct cmd */ + .version = cpu_to_le32(GENMASK_U32(31, 0)), + .status = cpu_to_le32(is_powerup ? 1 : 0), + }, + }; + int len, err; + + len = le16_to_cpu(req.datalen); + mutex_lock(&hw->mbx.lock); + err = mucse_write_and_wait_ack_mbx(hw, (u32 *)&req, len); + mutex_unlock(&hw->mbx.lock); + + return err; +} + +/** + * mucse_mbx_reset_hw - Posts a mbx req to reset hw + * @hw: pointer to the HW structure + * + * mucse_mbx_reset_hw posts a mbx req to firmware to reset hw. + * We use mucse_fw_send_cmd_wait_resp to wait hw reset ok. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_reset_hw(struct mucse_hw *hw) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(RESET_HW), + }; + struct mbx_fw_cmd_reply reply = {}; + + return mucse_fw_send_cmd_wait_resp(hw, &req, &reply); +} + +/** + * mucse_mbx_get_macaddr - Posts a mbx req to request macaddr + * @hw: pointer to the HW structure + * @pfvfnum: index of pf/vf num + * @mac_addr: pointer to store mac_addr + * @port: port index + * + * mucse_mbx_get_macaddr posts a mbx req to firmware to get mac_addr. + * + * Return: 0 on success, negative errno on failure + **/ +int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, + int port) +{ + struct mbx_fw_cmd_req req = { + .datalen = cpu_to_le16(sizeof(req.get_mac_addr) + + MUCSE_MBX_REQ_HDR_LEN), + .opcode = cpu_to_le16(GET_MAC_ADDRESS), + .get_mac_addr = { + .port_mask = cpu_to_le32(BIT(port)), + .pfvf_num = cpu_to_le32(pfvfnum), + }, + }; + struct mbx_fw_cmd_reply reply = {}; + int err; + + err = mucse_fw_send_cmd_wait_resp(hw, &req, &reply); + if (err) + return err; + + if (le32_to_cpu(reply.mac_addr.ports) & BIT(port)) + memcpy(mac_addr, reply.mac_addr.addrs[port].mac, ETH_ALEN); + else + return -ENODATA; + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h new file mode 100644 index 000000000000..fb24fc12b613 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_FW_H +#define _RNPGBE_MBX_FW_H + +#include <linux/types.h> + +#include "rnpgbe.h" + +#define MUCSE_MBX_REQ_HDR_LEN 24 + +enum MUCSE_FW_CMD { + GET_HW_INFO = 0x0601, + GET_MAC_ADDRESS = 0x0602, + RESET_HW = 0x0603, + POWER_UP = 0x0803, +}; + +struct mucse_hw_info { + u8 link_stat; + u8 port_mask; + __le32 speed; + __le16 phy_type; + __le16 nic_mode; + __le16 pfnum; + __le32 fw_version; + __le32 axi_mhz; + union { + u8 port_id[4]; + __le32 port_ids; + }; + __le32 bd_uid; + __le32 phy_id; + __le32 wol_status; + __le32 ext_info; +} __packed; + +struct mbx_fw_cmd_req { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 ret_value; + __le32 cookie_lo; + __le32 cookie_hi; + __le32 reply_lo; + __le32 reply_hi; + union { + u8 data[32]; + struct { + __le32 version; + __le32 status; + } powerup; + struct { + __le32 port_mask; + __le32 pfvf_num; + } get_mac_addr; + }; +} __packed; + +struct mbx_fw_cmd_reply { + __le16 flags; + __le16 opcode; + __le16 error_code; + __le16 datalen; + __le32 cookie_lo; + __le32 cookie_hi; + union { + u8 data[40]; + struct mac_addr { + __le32 ports; + struct _addr { + /* for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + u8 mac[8]; + } addrs[4]; + } mac_addr; + struct mucse_hw_info hw_info; + }; +} __packed; + +int mucse_mbx_sync_fw(struct mucse_hw *hw); +int mucse_mbx_powerup(struct mucse_hw *hw, bool is_powerup); +int mucse_mbx_reset_hw(struct mucse_hw *hw); +int mucse_mbx_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, int port); +#endif /* _RNPGBE_MBX_FW_H */ diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c index 0e1a3800f371..85e3b19e6165 100644 --- a/drivers/net/ethernet/netronome/nfp/devlink_param.c +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -81,7 +81,8 @@ static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = { static int nfp_devlink_param_u8_get(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { const struct nfp_devlink_param_u8_arg *arg; struct nfp_pf *pf = devlink_priv(devlink); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index e5a6f59af0b6..62f05f4569b1 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -198,23 +198,21 @@ pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED); } -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int pch_gbe_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct hwtstamp_config cfg; struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev; u8 station[20]; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - /* Get ieee1588's dev information */ pdev = adapter->ptp_pdev; - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON) return -ERANGE; - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: adapter->hwts_rx_en = 0; break; @@ -223,17 +221,17 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0); break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); strcpy(station, PTP_L4_MULTICAST_SA); pch_set_station_address(station, pdev); break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - adapter->hwts_rx_en = 1; + adapter->hwts_rx_en = cfg->rx_filter; pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); strcpy(station, PTP_L2_MULTICAST_SA); pch_set_station_address(station, pdev); @@ -242,12 +240,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -ERANGE; } - adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + adapter->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON; /* Clear out any old time stamps. */ pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; +} + +static int pch_gbe_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *cfg) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + cfg->tx_type = adapter->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = adapter->hwts_rx_en; + + return 0; } static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) @@ -2234,9 +2243,6 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) netdev_dbg(netdev, "cmd : 0x%04x\n", cmd); - if (cmd == SIOCSHWTSTAMP) - return hwtstamp_ioctl(netdev, ifr, cmd); - return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); } @@ -2328,6 +2334,8 @@ static const struct net_device_ops pch_gbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pch_gbe_netpoll, #endif + .ndo_hwtstamp_get = pch_gbe_hwtstamp_get, + .ndo_hwtstamp_set = pch_gbe_hwtstamp_set, }; static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index b28966ae50c2..058eea86e141 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2335,20 +2335,6 @@ static int ionic_stop(struct net_device *netdev) return 0; } -static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - struct ionic_lif *lif = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return ionic_lif_hwstamp_set(lif, ifr); - case SIOCGHWTSTAMP: - return ionic_lif_hwstamp_get(lif, ifr); - default: - return -EOPNOTSUPP; - } -} - static int ionic_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivf) { @@ -2812,7 +2798,6 @@ static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, - .ndo_eth_ioctl = ionic_eth_ioctl, .ndo_start_xmit = ionic_start_xmit, .ndo_bpf = ionic_xdp, .ndo_xdp_xmit = ionic_xdp_xmit, @@ -2833,6 +2818,8 @@ static const struct net_device_ops ionic_netdev_ops = { .ndo_get_vf_config = ionic_get_vf_config, .ndo_set_vf_link_state = ionic_set_vf_link_state, .ndo_get_vf_stats = ionic_get_vf_stats, + .ndo_hwtstamp_get = ionic_hwstamp_get, + .ndo_hwtstamp_set = ionic_hwstamp_set, }; static int ionic_cmb_reconfig(struct ionic_lif *lif, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 43bdd0fb8733..8e10f66dc50e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -6,7 +6,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> -#include <uapi/linux/net_tstamp.h> +#include <linux/net_tstamp.h> #include <linux/dim.h> #include <linux/pci.h> #include "ionic_rx_filter.h" @@ -254,7 +254,7 @@ struct ionic_phc { struct timecounter tc; struct mutex config_lock; /* lock for ts_config */ - struct hwtstamp_config ts_config; + struct kernel_hwtstamp_config ts_config; u64 ts_config_rx_filt; u32 ts_config_tx_mode; @@ -362,8 +362,11 @@ int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void ionic_lif_hwstamp_replay(struct ionic_lif *lif); void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); -int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); -int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); +int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); void ionic_lif_register_phc(struct ionic_lif *lif); void ionic_lif_unregister_phc(struct ionic_lif *lif); @@ -373,12 +376,15 @@ void ionic_lif_free_phc(struct ionic_lif *lif); static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} -static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +static inline int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } -static inline int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +static inline int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index 9f5c81d44f99..05b44fc482f8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -65,11 +65,12 @@ static u64 ionic_hwstamp_rx_filt(int config_rx_filter) } static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, - struct hwtstamp_config *new_ts) + struct kernel_hwtstamp_config *new_ts, + struct netlink_ext_ack *extack) { + struct kernel_hwtstamp_config *config; + struct kernel_hwtstamp_config ts = {}; struct ionic *ionic = lif->ionic; - struct hwtstamp_config *config; - struct hwtstamp_config ts; int tx_mode = 0; u64 rx_filt = 0; int err, err2; @@ -99,12 +100,16 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, tx_mode = ionic_hwstamp_tx_mode(config->tx_type); if (tx_mode < 0) { + NL_SET_ERR_MSG_MOD(extack, + "TX time stamping mode isn't supported"); err = tx_mode; goto err_queues; } mask = cpu_to_le64(BIT_ULL(tx_mode)); if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) { + NL_SET_ERR_MSG_MOD(extack, + "TX time stamping mode isn't supported"); err = -ERANGE; goto err_queues; } @@ -124,32 +129,47 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, if (tx_mode) { err = ionic_lif_create_hwstamp_txq(lif); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error creating TX timestamp queue"); goto err_queues; + } } if (rx_filt) { err = ionic_lif_create_hwstamp_rxq(lif); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error creating RX timestamp queue"); goto err_queues; + } } if (tx_mode != lif->phc->ts_config_tx_mode) { err = ionic_lif_set_hwstamp_txmode(lif, tx_mode); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling TX timestamp mode"); goto err_txmode; + } } if (rx_filt != lif->phc->ts_config_rx_filt) { err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling RX timestamp mode"); goto err_rxfilt; + } } if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) { err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all); - if (err) + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Error enabling RX timestamp mode"); goto err_rxall; + } } memcpy(&lif->phc->ts_config, config, sizeof(*config)); @@ -183,28 +203,24 @@ err_queues: return err; } -int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) +int ionic_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct ionic_lif *lif = netdev_priv(netdev); int err; if (!lif->phc || !lif->phc->ptp) return -EOPNOTSUPP; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - mutex_lock(&lif->queue_lock); - err = ionic_lif_hwstamp_set_ts_config(lif, &config); + err = ionic_lif_hwstamp_set_ts_config(lif, config, extack); mutex_unlock(&lif->queue_lock); if (err) { netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); return err; } - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; } @@ -216,7 +232,7 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif) return; mutex_lock(&lif->queue_lock); - err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + err = ionic_lif_hwstamp_set_ts_config(lif, NULL, NULL); mutex_unlock(&lif->queue_lock); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); @@ -246,19 +262,18 @@ void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) mutex_unlock(&lif->phc->config_lock); } -int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) +int ionic_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; + struct ionic_lif *lif = netdev_priv(netdev); if (!lif->phc || !lif->phc->ptp) return -EOPNOTSUPP; mutex_lock(&lif->phc->config_lock); - memcpy(&config, &lif->phc->ts_config, sizeof(config)); + memcpy(config, &lif->phc->ts_config, sizeof(*config)); mutex_unlock(&lif->phc->config_lock); - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index 94c5689b5abd..0c5278c0598c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -121,7 +121,8 @@ void qed_fw_reporters_destroy(struct devlink *devlink) } static int qed_dl_param_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct qed_devlink *qed_dl = devlink_priv(dl); struct qed_dev *cdev; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b5d744d2586f..66ab1b9d65a1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -506,25 +506,6 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) } #endif -static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct qede_dev *edev = netdev_priv(dev); - - if (!netif_running(dev)) - return -EAGAIN; - - switch (cmd) { - case SIOCSHWTSTAMP: - return qede_ptp_hw_ts(edev, ifr); - default: - DP_VERBOSE(edev, QED_MSG_DEBUG, - "default IOCTL cmd 0x%x\n", cmd); - return -EOPNOTSUPP; - } - - return 0; -} - static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp) { char *p_sb = (char *)fp->sb_info->sb_virt; @@ -717,7 +698,6 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, - .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, @@ -742,6 +722,8 @@ static const struct net_device_ops qede_netdev_ops = { #endif .ndo_xdp_xmit = qede_xdp_transmit, .ndo_setup_tc = qede_setup_tc_offload, + .ndo_hwtstamp_get = qede_hwtstamp_get, + .ndo_hwtstamp_set = qede_hwtstamp_set, }; static const struct net_device_ops qede_netdev_vf_ops = { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index a38f1e72c62b..d351be5fbda1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -199,18 +199,15 @@ static u64 qede_ptp_read_cc(struct cyclecounter *cc) return phc_cycles; } -static int qede_ptp_cfg_filters(struct qede_dev *edev) +static void qede_ptp_cfg_filters(struct qede_dev *edev) { enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON; enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE; struct qede_ptp *ptp = edev->ptp; - if (!ptp) - return -EIO; - if (!ptp->hw_ts_ioctl_called) { DP_INFO(edev, "TS IOCTL not called\n"); - return 0; + return; } switch (ptp->tx_type) { @@ -223,11 +220,6 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags); tx_type = QED_PTP_HWTSTAMP_TX_OFF; break; - - case HWTSTAMP_TX_ONESTEP_SYNC: - case HWTSTAMP_TX_ONESTEP_P2P: - DP_ERR(edev, "One-step timestamping is not supported\n"); - return -ERANGE; } spin_lock_bh(&ptp->lock); @@ -286,39 +278,65 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type); spin_unlock_bh(&ptp->lock); - - return 0; } -int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) +int qede_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct qede_dev *edev = netdev_priv(netdev); struct qede_ptp *ptp; - int rc; + + if (!netif_running(netdev)) { + NL_SET_ERR_MSG_MOD(extack, "Device is down"); + return -EAGAIN; + } ptp = edev->ptp; - if (!ptp) + if (!ptp) { + NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported"); return -EIO; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + } DP_VERBOSE(edev, QED_MSG_DEBUG, - "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", - config.tx_type, config.rx_filter); + "HWTSTAMP SET: Requested tx_type = %d, requested rx_filters = %d\n", + config->tx_type, config->rx_filter); + + switch (config->tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "One-step timestamping is not supported"); + return -ERANGE; + } ptp->hw_ts_ioctl_called = 1; - ptp->tx_type = config.tx_type; - ptp->rx_filter = config.rx_filter; + ptp->tx_type = config->tx_type; + ptp->rx_filter = config->rx_filter; - rc = qede_ptp_cfg_filters(edev); - if (rc) - return rc; + qede_ptp_cfg_filters(edev); + + config->rx_filter = ptp->rx_filter; + + return 0; +} - config.rx_filter = ptp->rx_filter; +int qede_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct qede_dev *edev = netdev_priv(netdev); + struct qede_ptp *ptp; - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; + ptp = edev->ptp; + if (!ptp) + return -EIO; + + config->tx_type = ptp->tx_type; + config->rx_filter = ptp->rx_filter; + + return 0; } int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index adafc894797e..88f168395812 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -14,7 +14,11 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); -int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); +int qede_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int qede_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void qede_ptp_disable(struct qede_dev *edev); int qede_ptp_enable(struct qede_dev *edev); int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 853aabedb128..405e91eb3141 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -57,7 +57,9 @@ #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" #define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw" #define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw" +#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw" #define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw" +#define FIRMWARE_9151A_1 "rtl_nic/rtl9151a-1.fw" #define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw" #define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw" @@ -110,6 +112,8 @@ static const struct rtl_chip_info { { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 }, /* 8125D family. */ + { 0x7cf, 0x68b, RTL_GIGA_MAC_VER_64, "RTL9151A", FIRMWARE_9151A_1 }, + { 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 }, { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 }, { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 }, @@ -770,7 +774,9 @@ MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); MODULE_FIRMWARE(FIRMWARE_8125D_1); MODULE_FIRMWARE(FIRMWARE_8125D_2); +MODULE_FIRMWARE(FIRMWARE_8125K_1); MODULE_FIRMWARE(FIRMWARE_8125BP_2); +MODULE_FIRMWARE(FIRMWARE_9151A_1); MODULE_FIRMWARE(FIRMWARE_8126A_2); MODULE_FIRMWARE(FIRMWARE_8126A_3); MODULE_FIRMWARE(FIRMWARE_8127A_1); @@ -2382,26 +2388,6 @@ void r8169_apply_firmware(struct rtl8169_private *tp) } } -static void rtl8168_config_eee_mac(struct rtl8169_private *tp) -{ - /* Adjust EEE LED frequency */ - if (tp->mac_version != RTL_GIGA_MAC_VER_38) - RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); - - rtl_eri_set_bits(tp, 0x1b0, 0x0003); -} - -static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) -{ - r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); - r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); -} - -static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) -{ - r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); -} - static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr) { rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr)); @@ -3179,8 +3165,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); - rtl8168_config_eee_mac(tp); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); rtl_mod_config5(tp, Spi_en, 0); @@ -3205,8 +3189,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); rtl_mod_config5(tp, Spi_en, 0); - - rtl8168_config_eee_mac(tp); } static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) @@ -3256,8 +3238,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); rtl_eri_clear_bits(tp, 0x1b0, BIT(12)); @@ -3398,8 +3378,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl8168_config_eee_mac(tp); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); @@ -3447,8 +3425,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl8168_config_eee_mac(tp); - rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); @@ -3504,8 +3480,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl8168_config_eee_mac(tp); - RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); @@ -3746,11 +3720,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); - if (tp->mac_version == RTL_GIGA_MAC_VER_61) - rtl8125a_config_eee_mac(tp); - else - rtl8125b_config_eee_mac(tp); - rtl_disable_rxdvgate(tp); } @@ -4753,6 +4722,41 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) return work_done; } +static void rtl_enable_tx_lpi(struct rtl8169_private *tp, bool enable) +{ + if (!rtl_supports_eee(tp)) + return; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_52: + /* Adjust EEE LED frequency */ + if (tp->mac_version != RTL_GIGA_MAC_VER_38) + RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + if (enable) + rtl_eri_set_bits(tp, 0x1b0, 0x0003); + else + rtl_eri_clear_bits(tp, 0x1b0, 0x0003); + break; + case RTL_GIGA_MAC_VER_61: + if (enable) { + r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003); + r8168_mac_ocp_modify(tp, 0xeb62, 0, 0x0006); + } else { + r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0); + r8168_mac_ocp_modify(tp, 0xeb62, 0x0006, 0); + } + break; + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST: + if (enable) + r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003); + else + r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0); + break; + default: + break; + } +} + static void r8169_phylink_handler(struct net_device *ndev) { struct rtl8169_private *tp = netdev_priv(ndev); @@ -4760,6 +4764,7 @@ static void r8169_phylink_handler(struct net_device *ndev) if (netif_carrier_ok(ndev)) { rtl_link_chg_patch(tp); + rtl_enable_tx_lpi(tp, tp->phydev->enable_tx_lpi); pm_request_resume(d); } else { pm_runtime_idle(d); @@ -5004,9 +5009,7 @@ static int rtl8169_resume(struct device *device) clk_prepare_enable(tp->clk); /* Some chip versions may truncate packets without this initialization */ - if (tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_46) - rtl_init_rxcfg(tp); + rtl_init_rxcfg(tp); return rtl8169_runtime_resume(device); } @@ -5459,6 +5462,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } tp->aspm_manageable = !rc; + /* Fiber mode on RTL8127AF isn't supported */ + if (rtl_is_8125(tp)) { + u16 data = r8168_mac_ocp_read(tp, 0xd006); + + if ((data & 0xff) == 0x07) + return dev_err_probe(&pdev->dev, -ENODEV, + "Fiber mode not supported\n"); + } + tp->dash_type = rtl_get_dash_type(tp); tp->dash_enabled = rtl_dash_is_enabled(tp); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 7b48060c250b..5e56ec9b1013 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -35,16 +35,6 @@ /* Driver's parameters */ #define RAVB_ALIGN 128 -/* Hardware time stamp */ -#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */ -#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */ - -#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */ -#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */ -#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002 -#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006 -#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */ - enum ravb_reg { /* AVB-DMAC registers */ CCC = 0x0000, @@ -1017,7 +1007,6 @@ enum CSR2_BIT { #define CSR2_CSUM_ENABLE (CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4 | \ CSR2_RTCP6 | CSR2_RUDP6 | CSR2_RICMP6) -#define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 @@ -1062,6 +1051,7 @@ struct ravb_hw_info { u32 rx_max_frame_size; u32 rx_buffer_size; u32 rx_desc_size; + u32 dbat_entry_num; unsigned aligned_tx: 1; unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */ @@ -1114,8 +1104,8 @@ struct ravb_private { u32 rx_over_errors; u32 rx_fifo_errors; struct net_device_stats stats[NUM_RX_QUEUE]; - u32 tstamp_tx_ctrl; - u32 tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; struct list_head ts_skb_list; u32 ts_skb_tag; struct ravb_ptp ptp; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index e2d7ce1a85e8..57b0db314fb5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -946,6 +946,30 @@ refill: return rx_packets; } +static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q, + struct ravb_ex_rx_desc *desc, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct timespec64 ts; + bool get_ts; + + if (q == RAVB_NC) + get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; + else + get_ts = priv->tstamp_rx_ctrl == HWTSTAMP_FILTER_ALL; + + if (!get_ts) + return; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + ts.tv_sec = ((u64)le16_to_cpu(desc->ts_sh) << 32) + | le32_to_cpu(desc->ts_sl); + ts.tv_nsec = le32_to_cpu(desc->ts_n); + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); +} + /* Packet receive function for Ethernet AVB */ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) { @@ -955,7 +979,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) struct ravb_ex_rx_desc *desc; unsigned int limit, i; struct sk_buff *skb; - struct timespec64 ts; int rx_packets = 0; u8 desc_status; u16 pkt_len; @@ -992,7 +1015,6 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) if (desc_status & MSC_CEEF) stats->rx_missed_errors++; } else { - u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; struct ravb_rx_buffer *rx_buff; void *rx_addr; @@ -1010,19 +1032,8 @@ static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) break; } skb_mark_for_recycle(skb); - get_ts &= (q == RAVB_NC) ? - RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : - ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; - if (get_ts) { - struct skb_shared_hwtstamps *shhwtstamps; - - shhwtstamps = skb_hwtstamps(skb); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << - 32) | le32_to_cpu(desc->ts_sl); - ts.tv_nsec = le32_to_cpu(desc->ts_n); - shhwtstamps->hwtstamp = timespec64_to_ktime(ts); - } + + ravb_rx_rcar_hwstamp(priv, q, desc, skb); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); @@ -1975,7 +1986,6 @@ out_ptp_stop: out_set_reset: ravb_set_opmode(ndev, CCC_OPC_RESET); out_rpm_put: - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); out_napi_off: if (info->nc_queues) @@ -2404,95 +2414,55 @@ static int ravb_close(struct net_device *ndev) if (error) return error; - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; } -static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) +static int ravb_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct ravb_private *priv = netdev_priv(ndev); - struct hwtstamp_config config; - config.flags = 0; - config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : - HWTSTAMP_TX_OFF; - switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { - case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RAVB_RXTSTAMP_TYPE_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config.rx_filter = HWTSTAMP_FILTER_NONE; - } + config->flags = 0; + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } /* Control hardware time stamping */ -static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) +static int ravb_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct ravb_private *priv = netdev_priv(ndev); - struct hwtstamp_config config; - u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED; - u32 tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; - if (copy_from_user(&config, req->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: - config.rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; } priv->tstamp_tx_ctrl = tstamp_tx_ctrl; priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -/* ioctl to device function */ -static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) -{ - struct phy_device *phydev = ndev->phydev; - - if (!netif_running(ndev)) - return -EINVAL; - - if (!phydev) - return -ENODEV; - - switch (cmd) { - case SIOCGHWTSTAMP: - return ravb_hwtstamp_get(ndev, req); - case SIOCSHWTSTAMP: - return ravb_hwtstamp_set(ndev, req); - } - - return phy_mii_ioctl(phydev, req, cmd); + return 0; } static int ravb_change_mtu(struct net_device *ndev, int new_mtu) @@ -2628,11 +2598,13 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_get_stats = ravb_get_stats, .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, - .ndo_eth_ioctl = ravb_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = ravb_set_features, + .ndo_hwtstamp_get = ravb_hwtstamp_get, + .ndo_hwtstamp_set = ravb_hwtstamp_set, }; /* MDIO bus init function */ @@ -2714,6 +2686,7 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .aligned_tx = 1, .gptp = 1, .nc_queues = 1, @@ -2737,6 +2710,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, @@ -2763,6 +2737,7 @@ static const struct ravb_hw_info ravb_gen4_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, @@ -2789,6 +2764,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = { .rx_buffer_size = SZ_2K + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .dbat_entry_num = 22, .multi_irqs = 1, .err_mgmt_irqs = 1, .gptp = 1, @@ -2814,6 +2790,7 @@ static const struct ravb_hw_info gbeth_hw_info = { .rx_max_frame_size = SZ_8K, .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), + .dbat_entry_num = 2, .aligned_tx = 1, .coalesce_irqs = 1, .tx_counters = 1, @@ -2941,13 +2918,14 @@ static int ravb_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(rstc), "failed to get cpg reset\n"); + info = of_device_get_match_data(&pdev->dev); + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), - NUM_TX_QUEUE, NUM_RX_QUEUE); + info->nc_queues ? NUM_TX_QUEUE : 1, + info->nc_queues ? NUM_RX_QUEUE : 1); if (!ndev) return -ENOMEM; - info = of_device_get_match_data(&pdev->dev); - ndev->features = info->net_features; ndev->hw_features = info->net_hw_features; ndev->vlan_features = info->vlan_features; @@ -3045,7 +3023,7 @@ static int ravb_probe(struct platform_device *pdev) ravb_parse_delay_mode(np, ndev); /* Allocate descriptor base address table */ - priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; + priv->desc_bat_size = sizeof(struct ravb_desc) * info->dbat_entry_num; priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, &priv->desc_bat_dma, GFP_KERNEL); if (!priv->desc_bat) { @@ -3055,7 +3033,7 @@ static int ravb_probe(struct platform_device *pdev) error = -ENOMEM; goto out_rpm_put; } - for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) + for (q = RAVB_BE; q < info->dbat_entry_num; q++) priv->desc_bat[q].die_dt = DT_EOS; /* Initialise HW timestamp list */ @@ -3110,7 +3088,6 @@ static int ravb_probe(struct platform_device *pdev) netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); - pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -3294,10 +3271,8 @@ static int ravb_resume(struct device *dev) return 0; out_rpm_put: - if (!priv->wol_enabled) { - pm_runtime_mark_last_busy(dev); + if (!priv->wol_enabled) pm_runtime_put_autosuspend(dev); - } return ret; } diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h index f77e79e47357..9a9c232c854e 100644 --- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h +++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h @@ -9,24 +9,11 @@ #include <linux/ptp_clock_kernel.h> -#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000 - -/* driver's definitions */ -#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0) -#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1) -#define RCAR_GEN4_RXTSTAMP_TYPE_ALL (RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT | BIT(2)) -#define RCAR_GEN4_RXTSTAMP_TYPE RCAR_GEN4_RXTSTAMP_TYPE_ALL - -#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0) - - struct rcar_gen4_ptp_private { void __iomem *addr; struct ptp_clock *clock; struct ptp_clock_info info; spinlock_t lock; /* For multiple registers access */ - u32 tstamp_tx_ctrl; - u32 tstamp_rx_ctrl; s64 default_addend; bool initialized; }; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index a1d4a877e5bd..aa605304fed0 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -1063,6 +1063,9 @@ struct rswitch_private { bool etha_no_runtime_change; bool gwca_halt; struct net_device *offload_brdev; + + enum hwtstamp_tx_types tstamp_tx_ctrl; + enum hwtstamp_rx_filters tstamp_rx_ctrl; }; bool is_rdev(const struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c index 8d8acc2124b8..e14b21148f27 100644 --- a/drivers/net/ethernet/renesas/rswitch_main.c +++ b/drivers/net/ethernet/renesas/rswitch_main.c @@ -30,6 +30,8 @@ #include "rswitch.h" #include "rswitch_l2.h" +#define RSWITCH_GPTP_OFFSET_S4 0x00018000 + static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected) { u32 val; @@ -843,7 +845,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) if (!skb) goto out; - get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + get_ts = rdev->priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; if (get_ts) { struct skb_shared_hwtstamps *shhwtstamps; struct timespec64 ts; @@ -1793,88 +1795,54 @@ static struct net_device_stats *rswitch_get_stats(struct net_device *ndev) return &ndev->stats; } -static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req) +static int rswitch_hwstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct rswitch_device *rdev = netdev_priv(ndev); - struct rcar_gen4_ptp_private *ptp_priv; - struct hwtstamp_config config; - - ptp_priv = rdev->priv->ptp_priv; + struct rswitch_private *priv = rdev->priv; - config.flags = 0; - config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : - HWTSTAMP_TX_OFF; - switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { - case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RCAR_GEN4_RXTSTAMP_TYPE_ALL: - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config.rx_filter = HWTSTAMP_FILTER_NONE; - break; - } + config->flags = 0; + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; + return 0; } -static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req) +static int rswitch_hwstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct rswitch_device *rdev = netdev_priv(ndev); - u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED; - struct hwtstamp_config config; - u32 tstamp_tx_ctrl; - - if (copy_from_user(&config, req->ifr_data, sizeof(config))) - return -EFAULT; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; - if (config.flags) + if (config->flags) return -EINVAL; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: - config.rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; break; } - rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; - rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - - return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; -} - -static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) -{ - if (!netif_running(ndev)) - return -EINVAL; + rdev->priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + rdev->priv->tstamp_rx_ctrl = tstamp_rx_ctrl; - switch (cmd) { - case SIOCGHWTSTAMP: - return rswitch_hwstamp_get(ndev, req); - case SIOCSHWTSTAMP: - return rswitch_hwstamp_set(ndev, req); - default: - return phy_mii_ioctl(ndev->phydev, req, cmd); - } + return 0; } static int rswitch_get_port_parent_id(struct net_device *ndev, @@ -1905,11 +1873,13 @@ static const struct net_device_ops rswitch_netdev_ops = { .ndo_stop = rswitch_stop, .ndo_start_xmit = rswitch_start_xmit, .ndo_get_stats = rswitch_get_stats, - .ndo_eth_ioctl = rswitch_eth_ioctl, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_get_port_parent_id = rswitch_get_port_parent_id, .ndo_get_phys_port_name = rswitch_get_phys_port_name, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_hwtstamp_get = rswitch_hwstamp_get, + .ndo_hwtstamp_set = rswitch_hwstamp_set, }; bool is_rdev(const struct net_device *ndev) @@ -2190,7 +2160,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev) if (IS_ERR(priv->addr)) return PTR_ERR(priv->addr); - priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4; + priv->ptp_priv->addr = priv->addr + RSWITCH_GPTP_OFFSET_S4; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); if (ret < 0) { diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 15a043e85431..fdb1e7b7fb06 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -62,6 +62,9 @@ struct rtsn_private { int tx_data_irq; int rx_data_irq; + + u32 tstamp_tx_ctrl; + u32 tstamp_rx_ctrl; }; static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg) @@ -162,8 +165,7 @@ static int rtsn_rx(struct net_device *ndev, int budget) unsigned int i; bool get_ts; - get_ts = priv->ptp_priv->tstamp_rx_ctrl & - RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + get_ts = priv->tstamp_rx_ctrl != HWTSTAMP_FILTER_NONE; ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx; rx_packets = 0; @@ -1122,31 +1124,16 @@ static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) static int rtsn_hwtstamp_get(struct net_device *ndev, struct kernel_hwtstamp_config *config) { - struct rcar_gen4_ptp_private *ptp_priv; struct rtsn_private *priv; if (!netif_running(ndev)) return -ENODEV; priv = netdev_priv(ndev); - ptp_priv = priv->ptp_priv; config->flags = 0; - - config->tx_type = - ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - - switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { - case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: - config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - break; - case RCAR_GEN4_RXTSTAMP_TYPE_ALL: - config->rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - config->rx_filter = HWTSTAMP_FILTER_NONE; - break; - } + config->tx_type = priv->tstamp_tx_ctrl; + config->rx_filter = priv->tstamp_rx_ctrl; return 0; } @@ -1155,26 +1142,22 @@ static int rtsn_hwtstamp_set(struct net_device *ndev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack) { - struct rcar_gen4_ptp_private *ptp_priv; + enum hwtstamp_rx_filters tstamp_rx_ctrl; + enum hwtstamp_tx_types tstamp_tx_ctrl; struct rtsn_private *priv; - u32 tstamp_rx_ctrl; - u32 tstamp_tx_ctrl; if (!netif_running(ndev)) return -ENODEV; priv = netdev_priv(ndev); - ptp_priv = priv->ptp_priv; if (config->flags) return -EINVAL; switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tstamp_tx_ctrl = 0; - break; case HWTSTAMP_TX_ON: - tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + tstamp_tx_ctrl = config->tx_type; break; default: return -ERANGE; @@ -1182,21 +1165,17 @@ static int rtsn_hwtstamp_set(struct net_device *ndev, switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tstamp_rx_ctrl = 0; - break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | - RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + tstamp_rx_ctrl = config->rx_filter; break; default: config->rx_filter = HWTSTAMP_FILTER_ALL; - tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | - RCAR_GEN4_RXTSTAMP_TYPE_ALL; + tstamp_rx_ctrl = HWTSTAMP_FILTER_ALL; break; } - ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; - ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + priv->tstamp_rx_ctrl = tstamp_rx_ctrl; return 0; } diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h index 5a09e946a276..577efe66573e 100644 --- a/drivers/net/ethernet/spacemit/k1_emac.h +++ b/drivers/net/ethernet/spacemit/k1_emac.h @@ -363,7 +363,7 @@ struct emac_desc { /* Keep stats in this order, index used for accessing hardware */ union emac_hw_tx_stats { - struct { + struct individual_tx_stats { u64 tx_ok_pkts; u64 tx_total_pkts; u64 tx_ok_bytes; @@ -378,11 +378,11 @@ union emac_hw_tx_stats { u64 tx_pause_pkts; } stats; - DECLARE_FLEX_ARRAY(u64, array); + u64 array[sizeof(struct individual_tx_stats) / sizeof(u64)]; }; union emac_hw_rx_stats { - struct { + struct individual_rx_stats { u64 rx_ok_pkts; u64 rx_total_pkts; u64 rx_crc_err_pkts; @@ -410,7 +410,7 @@ union emac_hw_rx_stats { u64 rx_truncate_fifo_full_pkts; } stats; - DECLARE_FLEX_ARRAY(u64, array); + u64 array[sizeof(struct individual_rx_stats) / sizeof(u64)]; }; #endif /* _K1_EMAC_H_ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 9507131875b2..907fe2e927f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -10,6 +10,7 @@ config STMMAC_ETH select PHYLINK select CRC32 select RESET_CONTROLLER + select NET_DEVLINK help This is the driver for the Ethernet IPs built around a Synopsys IP Core. @@ -67,6 +68,15 @@ config DWMAC_ANARION This selects the Anarion SoC glue layer support for the stmmac driver. +config DWMAC_EIC7700 + tristate "Support for Eswin eic7700 ethernet driver" + depends on OF && HAS_DMA && ARCH_ESWIN || COMPILE_TEST + help + This driver supports the Eswin EIC7700 Ethernet controller, + which integrates Synopsys DesignWare QoS features. It enables + high-speed networking with DMA acceleration and is optimized + for embedded systems. + config DWMAC_INGENIC tristate "Ingenic MAC support" default MACH_INGENIC @@ -339,6 +349,11 @@ config DWMAC_VISCONTI endif +config STMMAC_LIBPCI + tristate + help + This option enables the PCI bus helpers for the stmmac driver. + config DWMAC_INTEL tristate "Intel GMAC support" default X86 @@ -352,16 +367,18 @@ config DWMAC_INTEL config DWMAC_LOONGSON tristate "Loongson PCI DWMAC support" default MACH_LOONGSON64 - depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI + depends on (MACH_LOONGSON64 || COMPILE_TEST) && PCI depends on COMMON_CLK + select STMMAC_LIBPCI help This selects the LOONGSON PCI bus support for the stmmac driver, Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge. config STMMAC_PCI tristate "STMMAC PCI bus support" - depends on STMMAC_ETH && PCI + depends on PCI depends on COMMON_CLK + select STMMAC_LIBPCI help This selects the platform specific bus support for the stmmac driver. This driver was tested on XLINX XC2V3000 FF1152AMT0221 diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 51e068e26ce4..7bf528731034 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -7,13 +7,14 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \ - $(stmmac-y) + stmmac_pcs.o $(stmmac-y) stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o +obj-$(CONFIG_DWMAC_EIC7700) += dwmac-eic7700.o obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o @@ -43,6 +44,7 @@ obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o stmmac-platform-objs:= stmmac_platform.o dwmac-altr-socfpga-objs := dwmac-socfpga.o +obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index fb55efd52240..120a009c9992 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -83,14 +83,13 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb, return entry; } -static unsigned int is_jumbo_frm(int len, int enh_desc) +static bool is_jumbo_frm(unsigned int len, bool enh_desc) { - unsigned int ret = 0; + bool ret = false; if ((enh_desc && (len > BUF_SIZE_8KiB)) || - (!enh_desc && (len > BUF_SIZE_2KiB))) { - ret = 1; - } + (!enh_desc && (len > BUF_SIZE_2KiB))) + ret = true; return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 8f34c9ad457f..49df46be3669 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -26,6 +26,9 @@ #include "hwif.h" #include "mmc.h" +#define DWMAC_SNPSVER GENMASK_U32(7, 0) +#define DWMAC_USERVER GENMASK_U32(15, 8) + /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 @@ -43,6 +46,11 @@ #define DWXGMAC_ID 0x76 #define DWXLGMAC_ID 0x27 +static inline bool dwmac_is_xmac(enum dwmac_core_type core_type) +{ + return core_type == DWMAC_CORE_GMAC4 || core_type == DWMAC_CORE_XGMAC; +} + #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ /* TX and RX Descriptor Length, these need to be power of two. @@ -192,9 +200,6 @@ struct stmmac_extra_stats { unsigned long irq_pcs_ane_n; unsigned long irq_pcs_link_n; unsigned long irq_rgmii_n; - unsigned long pcs_link; - unsigned long pcs_duplex; - unsigned long pcs_speed; /* debug register */ unsigned long mtl_tx_status_fifo_full; unsigned long mtl_tx_fifo_not_empty; @@ -273,7 +278,6 @@ struct stmmac_safety_stats { #define FLOW_AUTO (FLOW_TX | FLOW_RX) /* PCS defines */ -#define STMMAC_PCS_RGMII (1 << 0) #define STMMAC_PCS_SGMII (1 << 1) #define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ @@ -309,6 +313,16 @@ struct stmmac_safety_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* phy_intf_sel_i and ACTPHYIF encodings */ +#define PHY_INTF_SEL_GMII_MII 0 +#define PHY_INTF_SEL_RGMII 1 +#define PHY_INTF_SEL_SGMII 2 +#define PHY_INTF_SEL_TBI 3 +#define PHY_INTF_SEL_RMII 4 +#define PHY_INTF_SEL_RTBI 5 +#define PHY_INTF_SEL_SMII 6 +#define PHY_INTF_SEL_REVMII 7 + /* MSI defines */ #define STMMAC_MSI_VEC_MAX 32 @@ -534,6 +548,19 @@ struct dma_features { #define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ #define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ +/* Common definitions for AXI Master Bus Mode */ +#define DMA_AXI_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_AXI_BLEN_MASK GENMASK(7, 1) + +void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len); + #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 @@ -603,13 +630,18 @@ struct mac_device_info { unsigned int mcast_bits_log2; unsigned int rx_csum; unsigned int pcs; - unsigned int ps; unsigned int xlgmac; unsigned int num_vlan; u32 vlan_filter[32]; bool vlan_fail_q_en; u8 vlan_fail_q; bool hw_vlan_en; + bool reverse_sgmii_enable; + + /* This spinlock protects read-modify-write of the interrupt + * mask/enable registers. + */ + spinlock_t irq_ctrl_lock; }; struct stmmac_rx_routing { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index 84072c8ed741..5e0fc31762d9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -34,7 +34,7 @@ static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val) writel(val, gmac->ctl_block + reg); } -static int anarion_gmac_init(struct platform_device *pdev, void *priv) +static int anarion_gmac_init(struct device *dev, void *priv) { uint32_t sw_config; struct anarion_gmac *gmac = priv; @@ -52,7 +52,7 @@ static int anarion_gmac_init(struct platform_device *pdev, void *priv) return 0; } -static void anarion_gmac_exit(struct platform_device *pdev, void *priv) +static void anarion_gmac_exit(struct device *dev, void *priv) { struct anarion_gmac *gmac = priv; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index e8539cad4602..d043bad4a862 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -38,8 +38,6 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, { struct device *dev = &pdev->dev; u32 burst_map = 0; - u32 bit_index = 0; - u32 a_index = 0; if (!plat_dat->axi) { plat_dat->axi = devm_kzalloc(&pdev->dev, @@ -83,33 +81,11 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, } device_property_read_u32(dev, "snps,burst-map", &burst_map); - /* converts burst-map bitmask to burst array */ - for (bit_index = 0; bit_index < 7; bit_index++) { - if (burst_map & (1 << bit_index)) { - switch (bit_index) { - case 0: - plat_dat->axi->axi_blen[a_index] = 4; break; - case 1: - plat_dat->axi->axi_blen[a_index] = 8; break; - case 2: - plat_dat->axi->axi_blen[a_index] = 16; break; - case 3: - plat_dat->axi->axi_blen[a_index] = 32; break; - case 4: - plat_dat->axi->axi_blen[a_index] = 64; break; - case 5: - plat_dat->axi->axi_blen[a_index] = 128; break; - case 6: - plat_dat->axi->axi_blen[a_index] = 256; break; - default: - break; - } - a_index++; - } - } + plat_dat->axi->axi_blen_regval = FIELD_PREP(DMA_AXI_BLEN_MASK, + burst_map); /* dwc-qos needs GMAC4, AAL, TSO and PMT */ - plat_dat->has_gmac4 = 1; + plat_dat->core_type = DWMAC_CORE_GMAC4; plat_dat->dma_cfg->aal = 1; plat_dat->flags |= STMMAC_FLAG_TSO_EN; plat_dat->pmt = 1; @@ -162,7 +138,7 @@ static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode) priv = netdev_priv(dev_get_drvdata(eqos->dev)); /* Calibration should be done with the MDIO bus idle */ - mutex_lock(&priv->mii->mdio_lock); + stmmac_mdio_lock(priv); /* calibrate */ value = readl(eqos->regs + SDMEMCOMPPADCTRL); @@ -198,7 +174,7 @@ static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode) value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD; writel(value, eqos->regs + SDMEMCOMPPADCTRL); - mutex_unlock(&priv->mii->mdio_lock); + stmmac_mdio_unlock(priv); } else { value = readl(eqos->regs + AUTO_CAL_CONFIG); value &= ~AUTO_CAL_CONFIG_ENABLE; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c new file mode 100644 index 000000000000..bcb8e000e720 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Eswin DWC Ethernet linux driver + * + * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd. + * + * Authors: + * Zhi Li <lizhi2@eswincomputing.com> + * Shuang Liang <liangshuang@eswincomputing.com> + * Shangjuan Wei <weishangjuan@eswincomputing.com> + */ + +#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/pm_runtime.h> +#include <linux/stmmac.h> +#include <linux/regmap.h> +#include <linux/of.h> + +#include "stmmac_platform.h" + +/* eth_phy_ctrl_offset eth0:0x100 */ +#define EIC7700_ETH_TX_CLK_SEL BIT(16) +#define EIC7700_ETH_PHY_INTF_SELI BIT(0) + +/* eth_axi_lp_ctrl_offset eth0:0x108 */ +#define EIC7700_ETH_CSYSREQ_VAL BIT(0) + +/* + * TX/RX Clock Delay Bit Masks: + * - TX Delay: bits [14:8] — TX_CLK delay (unit: 0.1ns per bit) + * - RX Delay: bits [30:24] — RX_CLK delay (unit: 0.1ns per bit) + */ +#define EIC7700_ETH_TX_ADJ_DELAY GENMASK(14, 8) +#define EIC7700_ETH_RX_ADJ_DELAY GENMASK(30, 24) + +#define EIC7700_MAX_DELAY_UNIT 0x7F + +static const char * const eic7700_clk_names[] = { + "tx", "axi", "cfg", +}; + +struct eic7700_qos_priv { + struct plat_stmmacenet_data *plat_dat; +}; + +static int eic7700_clks_config(void *priv, bool enabled) +{ + struct eic7700_qos_priv *dwc = (struct eic7700_qos_priv *)priv; + struct plat_stmmacenet_data *plat = dwc->plat_dat; + int ret = 0; + + if (enabled) + ret = clk_bulk_prepare_enable(plat->num_clks, plat->clks); + else + clk_bulk_disable_unprepare(plat->num_clks, plat->clks); + + return ret; +} + +static int eic7700_dwmac_init(struct device *dev, void *priv) +{ + struct eic7700_qos_priv *dwc = priv; + + return eic7700_clks_config(dwc, true); +} + +static void eic7700_dwmac_exit(struct device *dev, void *priv) +{ + struct eic7700_qos_priv *dwc = priv; + + eic7700_clks_config(dwc, false); +} + +static int eic7700_dwmac_suspend(struct device *dev, void *priv) +{ + return pm_runtime_force_suspend(dev); +} + +static int eic7700_dwmac_resume(struct device *dev, void *priv) +{ + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + dev_err(dev, "%s failed: %d\n", __func__, ret); + + return ret; +} + +static int eic7700_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct eic7700_qos_priv *dwc_priv; + struct regmap *eic7700_hsp_regmap; + u32 eth_axi_lp_ctrl_offset; + u32 eth_phy_ctrl_offset; + u32 eth_phy_ctrl_regset; + u32 eth_rxd_dly_offset; + u32 eth_dly_param = 0; + u32 delay_ps; + int i, ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to get resources\n"); + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat), + "dt configuration failed\n"); + + dwc_priv = devm_kzalloc(&pdev->dev, sizeof(*dwc_priv), GFP_KERNEL); + if (!dwc_priv) + return -ENOMEM; + + /* Read rx-internal-delay-ps and update rx_clk delay */ + if (!of_property_read_u32(pdev->dev.of_node, + "rx-internal-delay-ps", &delay_ps)) { + u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT); + + eth_dly_param &= ~EIC7700_ETH_RX_ADJ_DELAY; + eth_dly_param |= FIELD_PREP(EIC7700_ETH_RX_ADJ_DELAY, val); + } else { + return dev_err_probe(&pdev->dev, -EINVAL, + "missing required property rx-internal-delay-ps\n"); + } + + /* Read tx-internal-delay-ps and update tx_clk delay */ + if (!of_property_read_u32(pdev->dev.of_node, + "tx-internal-delay-ps", &delay_ps)) { + u32 val = min(delay_ps / 100, EIC7700_MAX_DELAY_UNIT); + + eth_dly_param &= ~EIC7700_ETH_TX_ADJ_DELAY; + eth_dly_param |= FIELD_PREP(EIC7700_ETH_TX_ADJ_DELAY, val); + } else { + return dev_err_probe(&pdev->dev, -EINVAL, + "missing required property tx-internal-delay-ps\n"); + } + + eic7700_hsp_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "eswin,hsp-sp-csr"); + if (IS_ERR(eic7700_hsp_regmap)) + return dev_err_probe(&pdev->dev, + PTR_ERR(eic7700_hsp_regmap), + "Failed to get hsp-sp-csr regmap\n"); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 1, ð_phy_ctrl_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_phy_ctrl_offset\n"); + + regmap_read(eic7700_hsp_regmap, eth_phy_ctrl_offset, + ð_phy_ctrl_regset); + eth_phy_ctrl_regset |= + (EIC7700_ETH_TX_CLK_SEL | EIC7700_ETH_PHY_INTF_SELI); + regmap_write(eic7700_hsp_regmap, eth_phy_ctrl_offset, + eth_phy_ctrl_regset); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 2, ð_axi_lp_ctrl_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_axi_lp_ctrl_offset\n"); + + regmap_write(eic7700_hsp_regmap, eth_axi_lp_ctrl_offset, + EIC7700_ETH_CSYSREQ_VAL); + + ret = of_property_read_u32_index(pdev->dev.of_node, + "eswin,hsp-sp-csr", + 3, ð_rxd_dly_offset); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't get eth_rxd_dly_offset\n"); + + regmap_write(eic7700_hsp_regmap, eth_rxd_dly_offset, + eth_dly_param); + + plat_dat->num_clks = ARRAY_SIZE(eic7700_clk_names); + plat_dat->clks = devm_kcalloc(&pdev->dev, + plat_dat->num_clks, + sizeof(*plat_dat->clks), + GFP_KERNEL); + if (!plat_dat->clks) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(eic7700_clk_names); i++) + plat_dat->clks[i].id = eic7700_clk_names[i]; + + ret = devm_clk_bulk_get_optional(&pdev->dev, + plat_dat->num_clks, + plat_dat->clks); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to get clocks\n"); + + plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx"); + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->clks_config = eic7700_clks_config; + plat_dat->bsp_priv = dwc_priv; + dwc_priv->plat_dat = plat_dat; + plat_dat->init = eic7700_dwmac_init; + plat_dat->exit = eic7700_dwmac_exit; + plat_dat->suspend = eic7700_dwmac_suspend; + plat_dat->resume = eic7700_dwmac_resume; + + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); +} + +static const struct of_device_id eic7700_dwmac_match[] = { + { .compatible = "eswin,eic7700-qos-eth" }, + { } +}; +MODULE_DEVICE_TABLE(of, eic7700_dwmac_match); + +static struct platform_driver eic7700_dwmac_driver = { + .probe = eic7700_dwmac_probe, + .driver = { + .name = "eic7700-eth-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = eic7700_dwmac_match, + }, +}; +module_platform_driver(eic7700_dwmac_driver); + +MODULE_AUTHOR("Zhi Li <lizhi2@eswincomputing.com>"); +MODULE_AUTHOR("Shuang Liang <liangshuang@eswincomputing.com>"); +MODULE_AUTHOR("Shangjuan Wei <weishangjuan@eswincomputing.com>"); +MODULE_DESCRIPTION("Eswin eic7700 qos ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 4268b9987237..db288fbd5a4d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -23,18 +23,13 @@ #include "stmmac_platform.h" #define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16) -#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16) -#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16) -#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16) +#define GPR_ENET_QOS_INTF_SEL_MASK GENMASK(20, 16) #define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19) #define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20) #define GPR_ENET_QOS_RGMII_EN (0x1 << 21) #define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0) -#define MX93_GPR_ENET_QOS_INTF_MASK GENMASK(3, 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1) -#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1) +#define MX93_GPR_ENET_QOS_INTF_SEL_MASK GENMASK(3, 1) #define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0) #define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0) #define MX93_GPR_CLK_SEL_OFFSET (4) @@ -44,13 +39,15 @@ #define RMII_RESET_SPEED (0x3 << 14) #define CTRL_SPEED_MASK GENMASK(15, 14) +struct imx_priv_data; + struct imx_dwmac_ops { u32 addr_width; u32 flags; bool mac_rgmii_txclk_auto_adj; int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr); - int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat); + int (*set_intf_mode)(struct imx_priv_data *dwmac, u8 phy_intf_sel); void (*fix_mac_speed)(void *priv, int speed, unsigned int mode); }; @@ -67,79 +64,46 @@ struct imx_priv_data { struct plat_stmmacenet_data *plat_dat; }; -static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +static int imx8mp_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - struct imx_priv_data *dwmac = plat_dat->bsp_priv; - int val; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = GPR_ENET_QOS_INTF_SEL_MII; - break; - case PHY_INTERFACE_MODE_RMII: - val = GPR_ENET_QOS_INTF_SEL_RMII; - val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - val = GPR_ENET_QOS_INTF_SEL_RGMII | - GPR_ENET_QOS_RGMII_EN; - break; - default: - pr_debug("imx dwmac doesn't support %s interface\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + unsigned int val; + + val = FIELD_PREP(GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) | + GPR_ENET_QOS_CLK_GEN_EN; + + if (phy_intf_sel == PHY_INTF_SEL_RMII && !dwmac->rmii_refclk_ext) + val |= GPR_ENET_QOS_CLK_TX_CLK_SEL; + else if (phy_intf_sel == PHY_INTF_SEL_RGMII) + val |= GPR_ENET_QOS_RGMII_EN; - val |= GPR_ENET_QOS_CLK_GEN_EN; return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, GPR_ENET_QOS_INTF_MODE_MASK, val); }; static int -imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +imx8dxl_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - int ret = 0; - /* TBD: depends on imx8dxl scu interfaces to be upstreamed */ - return ret; + return 0; } -static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat) +static int imx93_set_intf_mode(struct imx_priv_data *dwmac, u8 phy_intf_sel) { - struct imx_priv_data *dwmac = plat_dat->bsp_priv; - int val, ret; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = MX93_GPR_ENET_QOS_INTF_SEL_MII; - break; - case PHY_INTERFACE_MODE_RMII: - if (dwmac->rmii_refclk_ext) { - ret = regmap_clear_bits(dwmac->intf_regmap, - dwmac->intf_reg_off + - MX93_GPR_CLK_SEL_OFFSET, - MX93_GPR_ENET_QOS_CLK_SEL_MASK); - if (ret) - return ret; - } - val = MX93_GPR_ENET_QOS_INTF_SEL_RMII; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII; - break; - default: - dev_dbg(dwmac->dev, "imx dwmac doesn't support %s interface\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; + unsigned int val; + int ret; + + if (phy_intf_sel == PHY_INTF_SEL_RMII && dwmac->rmii_refclk_ext) { + ret = regmap_clear_bits(dwmac->intf_regmap, + dwmac->intf_reg_off + + MX93_GPR_CLK_SEL_OFFSET, + MX93_GPR_ENET_QOS_CLK_SEL_MASK); + if (ret) + return ret; } - val |= MX93_GPR_ENET_QOS_CLK_GEN_EN; + val = FIELD_PREP(MX93_GPR_ENET_QOS_INTF_SEL_MASK, phy_intf_sel) | + MX93_GPR_ENET_QOS_CLK_GEN_EN; + return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, MX93_GPR_ENET_QOS_INTF_MODE_MASK, val); }; @@ -170,34 +134,24 @@ static int imx_dwmac_clks_config(void *priv, bool enabled) return ret; } -static int imx_dwmac_init(struct platform_device *pdev, void *priv) +static int imx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) { - struct plat_stmmacenet_data *plat_dat; - struct imx_priv_data *dwmac = priv; - int ret; - - plat_dat = dwmac->plat_dat; + struct imx_priv_data *dwmac = bsp_priv; - if (dwmac->ops->set_intf_mode) { - ret = dwmac->ops->set_intf_mode(plat_dat); - if (ret) - return ret; - } + if (!dwmac->ops->set_intf_mode) + return 0; - return 0; -} + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) + return -EINVAL; -static void imx_dwmac_exit(struct platform_device *pdev, void *priv) -{ - /* nothing to do now */ + return dwmac->ops->set_intf_mode(dwmac, phy_intf_sel); } static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, phy_interface_t interface, int speed) { - struct imx_priv_data *dwmac = bsp_priv; - - interface = dwmac->plat_dat->phy_interface; if (interface == PHY_INTERFACE_MODE_RMII || interface == PHY_INTERFACE_MODE_MII) return 0; @@ -244,8 +198,8 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode) if (regmap_read(dwmac->intf_regmap, dwmac->intf_reg_off, &iface)) return; - iface &= MX93_GPR_ENET_QOS_INTF_MASK; - if (iface != MX93_GPR_ENET_QOS_INTF_SEL_RGMII) + if (FIELD_GET(MX93_GPR_ENET_QOS_INTF_SEL_MASK, iface) != + PHY_INTF_SEL_RGMII) return; old_ctrl = readl(dwmac->base_addr + MAC_CTRL_REG); @@ -258,6 +212,7 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode) readl(dwmac->base_addr + MAC_CTRL_REG); usleep_range(10, 20); + iface &= MX93_GPR_ENET_QOS_INTF_SEL_MASK; iface |= MX93_GPR_ENET_QOS_CLK_GEN_EN; regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off, MX93_GPR_ENET_QOS_INTF_MODE_MASK, iface); @@ -370,8 +325,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->tx_queues_cfg[i].tbs_en = 1; plat_dat->host_dma_width = dwmac->ops->addr_width; - plat_dat->init = imx_dwmac_init; - plat_dat->exit = imx_dwmac_exit; + plat_dat->set_phy_intf_sel = imx_set_phy_intf_sel; plat_dat->clks_config = imx_dwmac_clks_config; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index c1670f6bae14..8e4a30c11db0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -35,10 +35,6 @@ #define MACPHYC_RX_DELAY_MASK GENMASK(10, 4) #define MACPHYC_SOFT_RST_MASK GENMASK(3, 3) #define MACPHYC_PHY_INFT_MASK GENMASK(2, 0) -#define MACPHYC_PHY_INFT_RMII 0x4 -#define MACPHYC_PHY_INFT_RGMII 0x1 -#define MACPHYC_PHY_INFT_GMII 0x0 -#define MACPHYC_PHY_INFT_MII 0x0 #define MACPHYC_TX_DELAY_PS_MAX 2496 #define MACPHYC_TX_DELAY_PS_MIN 20 @@ -68,172 +64,93 @@ struct ingenic_soc_info { enum ingenic_mac_version version; u32 mask; - int (*set_mode)(struct plat_stmmacenet_data *plat_dat); -}; - -static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv) -{ - struct ingenic_mac *mac = bsp_priv; - int ret; + int (*set_mode)(struct ingenic_mac *mac, u8 phy_intf_sel); - if (mac->soc_info->set_mode) { - ret = mac->soc_info->set_mode(mac->plat_dat); - if (ret) - return ret; - } - - return 0; -} + u8 valid_phy_intf_sel; +}; -static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int jz4775_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n"); - break; - - case PHY_INTERFACE_MODE_GMII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n"); - break; - - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel) | + FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } - /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0); } -static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1600_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x1830_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; - } + val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) | + FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } -static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat) +static int x2000_mac_set_mode(struct ingenic_mac *mac, u8 phy_intf_sel) { - struct ingenic_mac *mac = plat_dat->bsp_priv; unsigned int val; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) | - FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) | - FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII); - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n"); - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII); + val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, phy_intf_sel); + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) | + FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN); + } else if (phy_intf_sel == PHY_INTF_SEL_RGMII) { if (mac->tx_delay == 0) val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN); else val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) | - FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1); + FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1); if (mac->rx_delay == 0) val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN); else val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) | FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1); - - dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n"); - break; - - default: - dev_err(mac->dev, "Unsupported interface %s\n", - phy_modes(plat_dat->phy_interface)); - return -EINVAL; } /* Update MAC PHY control register */ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val); } +static int ingenic_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) +{ + struct ingenic_mac *mac = bsp_priv; + + if (!mac->soc_info->set_mode) + return 0; + + if (phy_intf_sel >= BITS_PER_BYTE || + ~mac->soc_info->valid_phy_intf_sel & BIT(phy_intf_sel)) + return -EINVAL; + + dev_dbg(mac->dev, "MAC PHY control register: interface %s\n", + phy_modes(mac->plat_dat->phy_interface)); + + return mac->soc_info->set_mode(mac, phy_intf_sel); +} + static int ingenic_mac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -293,7 +210,7 @@ static int ingenic_mac_probe(struct platform_device *pdev) mac->plat_dat = plat_dat; plat_dat->bsp_priv = mac; - plat_dat->init = ingenic_mac_init; + plat_dat->set_phy_intf_sel = ingenic_set_phy_intf_sel; return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } @@ -303,6 +220,9 @@ static struct ingenic_soc_info jz4775_soc_info = { .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = jz4775_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_GMII_MII) | + BIT(PHY_INTF_SEL_RGMII) | + BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1000_soc_info = { @@ -310,6 +230,7 @@ static struct ingenic_soc_info x1000_soc_info = { .mask = MACPHYC_SOFT_RST_MASK, .set_mode = x1000_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1600_soc_info = { @@ -317,6 +238,7 @@ static struct ingenic_soc_info x1600_soc_info = { .mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x1600_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x1830_soc_info = { @@ -324,6 +246,7 @@ static struct ingenic_soc_info x1830_soc_info = { .mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x1830_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RMII), }; static struct ingenic_soc_info x2000_soc_info = { @@ -332,6 +255,8 @@ static struct ingenic_soc_info x2000_soc_info = { MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK, .set_mode = x2000_mac_set_mode, + .valid_phy_intf_sel = BIT(PHY_INTF_SEL_RGMII) | + BIT(PHY_INTF_SEL_RMII), }; static const struct of_device_id ingenic_mac_of_matches[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index e74d00984b88..aad1be1ec4c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -565,30 +565,10 @@ static void common_default_data(struct plat_stmmacenet_data *plat) { /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; plat->mdio_bus_data->needs_reset = true; - - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - - /* Set default number of RX and TX queues to use */ - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - - /* Disable Priority config by default */ - plat->tx_queues_cfg[0].use_prio = false; - plat->rx_queues_cfg[0].use_prio = false; - - /* Disable RX queues routing by default */ - plat->rx_queues_cfg[0].pkt_route = 0x0; } static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, @@ -612,8 +592,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->pdev = pdev; plat->phy_addr = -1; plat->clk_csr = STMMAC_CSR_250_300M; - plat->has_gmac = 0; - plat->has_gmac4 = 1; + plat->core_type = DWMAC_CORE_GMAC4; plat->force_sf_dma_mode = 0; plat->flags |= (STMMAC_FLAG_TSO_EN | STMMAC_FLAG_SPH_DISABLE); @@ -630,22 +609,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; - for (i = 0; i < plat->rx_queues_to_use; i++) { + for (i = 0; i < plat->rx_queues_to_use; i++) plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; - plat->rx_queues_cfg[i].chan = i; - - /* Disable Priority config by default */ - plat->rx_queues_cfg[i].use_prio = false; - - /* Disable RX queues routing by default */ - plat->rx_queues_cfg[i].pkt_route = 0x0; - } for (i = 0; i < plat->tx_queues_to_use; i++) { plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; - /* Disable Priority config by default */ - plat->tx_queues_cfg[i].use_prio = false; /* Default TX Q0 to use TSO and rest TXQ for TBS */ if (i > 0) plat->tx_queues_cfg[i].tbs_en = 1; @@ -681,9 +650,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->axi->axi_xit_frm = 0; plat->axi->axi_wr_osr_lmt = 1; plat->axi->axi_rd_osr_lmt = 1; - plat->axi->axi_blen[0] = 4; - plat->axi->axi_blen[1] = 8; - plat->axi->axi_blen[2] = 16; + plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 | + DMA_AXI_BLEN16; plat->ptp_max_adj = plat->clk_ptp_rate; @@ -707,15 +675,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN; /* Use the last Rx queue */ @@ -1287,7 +1246,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, if (!intel_priv) return -ENOMEM; - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + plat = stmmac_plat_dat_alloc(&pdev->dev); if (!plat) return -ENOMEM; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index ca4035cbb55b..c05f85534f0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -473,7 +473,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return err; } - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->bsp_priv = gmac; plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate; plat_dat->multicast_filter_bins = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 592aa9d636e5..107a7c84ace8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/of_irq.h> #include "stmmac.h" +#include "stmmac_libpci.h" #include "dwmac_dma.h" #include "dwmac1000.h" @@ -92,31 +93,15 @@ static void loongson_default_data(struct pci_dev *pdev, /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; - /* Set default value for multicast hash bins */ + /* Increase the default value for multicast hash bins */ plat->multicast_filter_bins = 256; - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - - /* Disable Priority config by default */ - plat->tx_queues_cfg[0].use_prio = false; - plat->rx_queues_cfg[0].use_prio = false; - - /* Disable RX queues routing by default */ - plat->rx_queues_cfg[0].pkt_route = 0x0; - plat->clk_ref_rate = 125000000; plat->clk_ptp_rate = 125000000; - /* Default to phy auto-detection */ - plat->phy_addr = -1; - plat->dma_cfg->pbl = 32; plat->dma_cfg->pblx8 = true; @@ -140,8 +125,6 @@ static void loongson_default_data(struct pci_dev *pdev, break; default: ld->multichan = 0; - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; break; } } @@ -320,10 +303,9 @@ static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv, return ret; } -static struct mac_device_info *loongson_dwmac_setup(void *apriv) +static int loongson_dwmac_setup(void *apriv, struct mac_device_info *mac) { struct stmmac_priv *priv = apriv; - struct mac_device_info *mac; struct stmmac_dma_ops *dma; struct loongson_data *ld; struct pci_dev *pdev; @@ -331,13 +313,9 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) ld = priv->plat->bsp_priv; pdev = to_pci_dev(priv->device); - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); - if (!mac) - return NULL; - dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL); if (!dma) - return NULL; + return -ENOMEM; /* The Loongson GMAC and GNET devices are based on the DW GMAC * v3.50a and v3.73a IP-cores. But the HW designers have changed @@ -396,7 +374,7 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - return mac; + return 0; } static int loongson_dwmac_msi_config(struct pci_dev *pdev, @@ -525,37 +503,6 @@ static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioad 10000, 2000000); } -static int loongson_dwmac_suspend(struct device *dev, void *bsp_priv) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - ret = pci_save_state(pdev); - if (ret) - return ret; - - pci_disable_device(pdev); - pci_wake_from_d3(pdev, true); - return 0; -} - -static int loongson_dwmac_resume(struct device *dev, void *bsp_priv) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); - - ret = pci_enable_device(pdev); - if (ret) - return ret; - - pci_set_master(pdev); - - return 0; -} - static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; @@ -564,7 +511,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id struct loongson_data *ld; int ret; - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + plat = stmmac_plat_dat_alloc(&pdev->dev); if (!plat) return -ENOMEM; @@ -598,10 +545,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id goto err_disable_device; plat->bsp_priv = ld; - plat->setup = loongson_dwmac_setup; + plat->mac_setup = loongson_dwmac_setup; plat->fix_soc_reset = loongson_dwmac_fix_reset; - plat->suspend = loongson_dwmac_suspend; - plat->resume = loongson_dwmac_resume; + plat->suspend = stmmac_pci_plat_suspend; + plat->resume = stmmac_pci_plat_resume; ld->dev = &pdev->dev; ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c index 32b5d1492e2e..de9aba756aac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c @@ -38,8 +38,6 @@ #define GMAC_SHUT BIT(6) #define PHY_INTF_SELI GENMASK(30, 28) -#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0) -#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4) struct ls1x_dwmac { struct plat_stmmacenet_data *plat_dat; @@ -50,7 +48,7 @@ struct ls1x_dwmac { struct ls1x_data { int (*setup)(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat); - int (*init)(struct platform_device *pdev, void *bsp_priv); + int (*init)(struct device *dev, void *bsp_priv); }; static int ls1b_dwmac_setup(struct platform_device *pdev, @@ -81,7 +79,7 @@ static int ls1b_dwmac_setup(struct platform_device *pdev, return 0; } -static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) +static int ls1b_dwmac_syscon_init(struct device *dev, void *priv) { struct ls1x_dwmac *dwmac = priv; struct plat_stmmacenet_data *plat = dwmac->plat_dat; @@ -100,7 +98,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) GMAC0_USE_TXCLK | GMAC0_USE_PWM01); break; default: - dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + dev_err(dev, "Unsupported PHY mode %u\n", plat->phy_interface); return -EOPNOTSUPP; } @@ -124,7 +122,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) GMAC1_USE_TXCLK | GMAC1_USE_PWM23); break; default: - dev_err(&pdev->dev, "Unsupported PHY mode %u\n", + dev_err(dev, "Unsupported PHY mode %u\n", plat->phy_interface); return -EOPNOTSUPP; } @@ -135,27 +133,23 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv) return 0; } -static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv) +static int ls1c_dwmac_syscon_init(struct device *dev, void *priv) { struct ls1x_dwmac *dwmac = priv; struct plat_stmmacenet_data *plat = dwmac->plat_dat; struct regmap *regmap = dwmac->regmap; + int phy_intf_sel; - switch (plat->phy_interface) { - case PHY_INTERFACE_MODE_MII: - regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, - PHY_INTF_MII); - break; - case PHY_INTERFACE_MODE_RMII: - regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, - PHY_INTF_RMII); - break; - default: - dev_err(&pdev->dev, "Unsupported PHY-mode %u\n", + phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RMII) { + dev_err(dev, "Unsupported PHY-mode %u\n", plat->phy_interface); return -EOPNOTSUPP; } + regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI, + FIELD_PREP(PHY_INTF_SELI, phy_intf_sel)); regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0); return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 2562a6d036a2..c68d7de1f8ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -21,16 +21,29 @@ /* Register defines for CREG syscon */ #define LPC18XX_CREG_CREG6 0x12c -# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7 -# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0 -# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4 +# define LPC18XX_CREG_CREG6_ETHMODE_MASK GENMASK(2, 0) + +static int lpc18xx_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) +{ + struct regmap *reg = bsp_priv; + + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RMII) + return -EINVAL; + + regmap_update_bits(reg, LPC18XX_CREG_CREG6, + LPC18XX_CREG_CREG6_ETHMODE_MASK, + FIELD_PREP(LPC18XX_CREG_CREG6_ETHMODE_MASK, + phy_intf_sel)); + + return 0; +} static int lpc18xx_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - struct regmap *reg; - u8 ethmode; + struct regmap *regmap; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -41,25 +54,16 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; - reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); - if (IS_ERR(reg)) { + regmap = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(regmap)) { dev_err(&pdev->dev, "syscon lookup failed\n"); - return PTR_ERR(reg); - } - - if (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII) { - ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) { - ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; - } else { - dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); - return -EINVAL; + return PTR_ERR(regmap); } - regmap_update_bits(reg, LPC18XX_CREG_CREG6, - LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); + plat_dat->bsp_priv = regmap; + plat_dat->set_phy_intf_sel = lpc18xx_set_phy_intf_sel; return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index f1b36f0a401d..1f2d7d19ca56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -17,9 +17,6 @@ /* Peri Configuration register for mt2712 */ #define PERI_ETH_PHY_INTF_SEL 0x418 -#define PHY_INTF_MII 0 -#define PHY_INTF_RGMII 1 -#define PHY_INTF_RMII 4 #define RMII_CLK_SRC_RXC BIT(4) #define RMII_CLK_SRC_INTERNAL BIT(5) @@ -88,7 +85,8 @@ struct mediatek_dwmac_plat_data { }; struct mediatek_dwmac_variant { - int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat); + int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel); int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat); /* clock ids to be requested */ @@ -109,29 +107,16 @@ static const char * const mt8195_dwmac_clk_l[] = { "axi", "apb", "mac_cg", "mac_main", "ptp_ref" }; -static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat) +static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel) { - int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0; - int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0; - u32 intf_val = 0; + u32 intf_val = phy_intf_sel; - /* select phy interface in top control domain */ - switch (plat->phy_mode) { - case PHY_INTERFACE_MODE_MII: - intf_val |= PHY_INTF_MII; - break; - case PHY_INTERFACE_MODE_RMII: - intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - intf_val |= PHY_INTF_RGMII; - break; - default: - dev_err(plat->dev, "phy interface not supported\n"); - return -EINVAL; + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + if (plat->rmii_clk_from_mac) + intf_val |= RMII_CLK_SRC_INTERNAL; + if (plat->rmii_rxc) + intf_val |= RMII_CLK_SRC_RXC; } regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val); @@ -288,30 +273,16 @@ static const struct mediatek_dwmac_variant mt2712_gmac_variant = { .tx_delay_max = 17600, }; -static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat) +static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat, + u8 phy_intf_sel) { - int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0; - int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0; - u32 intf_val = 0; + u32 intf_val = FIELD_PREP(MT8195_ETH_INTF_SEL, phy_intf_sel); - /* select phy interface in top control domain */ - switch (plat->phy_mode) { - case PHY_INTERFACE_MODE_MII: - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII); - break; - case PHY_INTERFACE_MODE_RMII: - intf_val |= (rmii_rxc | rmii_clk_from_mac); - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII); - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII); - break; - default: - dev_err(plat->dev, "phy interface not supported\n"); - return -EINVAL; + if (phy_intf_sel == PHY_INTF_SEL_RMII) { + if (plat->rmii_clk_from_mac) + intf_val |= MT8195_RMII_CLK_SRC_INTERNAL; + if (plat->rmii_rxc) + intf_val |= MT8195_RMII_CLK_SRC_RXC; } /* MT8195 only support external PHY */ @@ -527,10 +498,18 @@ static int mediatek_dwmac_init(struct device *dev, void *priv) { struct mediatek_dwmac_plat_data *plat = priv; const struct mediatek_dwmac_variant *variant = plat->variant; - int ret; + int phy_intf_sel, ret; if (variant->dwmac_set_phy_interface) { - ret = variant->dwmac_set_phy_interface(plat); + phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_mode); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { + dev_err(plat->dev, "phy interface not supported\n"); + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; + } + + ret = variant->dwmac_set_phy_interface(plat, phy_intf_sel); if (ret) { dev_err(dev, "failed to set phy interface, err = %d\n", ret); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index a50782994b97..e4d5c41294f4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -26,8 +26,6 @@ #define PRG_ETH0_RGMII_MODE BIT(0) #define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0) -#define PRG_ETH0_EXT_RGMII_MODE 1 -#define PRG_ETH0_EXT_RMII_MODE 4 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) @@ -238,28 +236,20 @@ static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac) static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac) { - switch (dwmac->phy_mode) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_TXID: - /* enable RGMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, - PRG_ETH0_EXT_PHY_MODE_MASK, - PRG_ETH0_EXT_RGMII_MODE); - break; - case PHY_INTERFACE_MODE_RMII: - /* disable RGMII mode -> enables RMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, - PRG_ETH0_EXT_PHY_MODE_MASK, - PRG_ETH0_EXT_RMII_MODE); - break; - default: + int phy_intf_sel; + + phy_intf_sel = stmmac_get_phy_intf_sel(dwmac->phy_mode); + if (phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(dwmac->dev, "fail to set phy-mode %s\n", phy_modes(dwmac->phy_mode)); - return -EINVAL; + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; } + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_EXT_PHY_MODE_MASK, + FIELD_PREP(PRG_ETH0_EXT_PHY_MODE_MASK, + phy_intf_sel)); + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index d8fd4d8f6ced..0826a7bd32ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -76,10 +76,6 @@ #define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6) #define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5) -/* MAC_CTRL_REG bits */ -#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) -#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) - /* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ #define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) @@ -96,7 +92,6 @@ struct ethqos_emac_driver_data { bool rgmii_config_loopback_en; bool has_emac_ge_3; const char *link_clk_name; - bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; bool needs_sgmii_loopback; @@ -121,27 +116,39 @@ struct qcom_ethqos { bool needs_sgmii_loopback; }; -static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) +static u32 rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) { return readl(ethqos->rgmii_base + offset); } -static void rgmii_writel(struct qcom_ethqos *ethqos, - int value, unsigned int offset) +static void rgmii_writel(struct qcom_ethqos *ethqos, u32 value, + unsigned int offset) { writel(value, ethqos->rgmii_base + offset); } -static void rgmii_updatel(struct qcom_ethqos *ethqos, - int mask, int val, unsigned int offset) +static void rgmii_updatel(struct qcom_ethqos *ethqos, u32 mask, u32 val, + unsigned int offset) { - unsigned int temp; + u32 temp; temp = rgmii_readl(ethqos, offset); temp = (temp & ~(mask)) | val; rgmii_writel(ethqos, temp, offset); } +static void rgmii_setmask(struct qcom_ethqos *ethqos, u32 mask, + unsigned int offset) +{ + rgmii_updatel(ethqos, mask, mask, offset); +} + +static void rgmii_clrmask(struct qcom_ethqos *ethqos, u32 mask, + unsigned int offset) +{ + rgmii_updatel(ethqos, mask, 0, offset); +} + static void rgmii_dump(void *priv) { struct qcom_ethqos *ethqos = priv; @@ -199,8 +206,7 @@ qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) { qcom_ethqos_set_sgmii_loopback(ethqos, true); - rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, - RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); + rgmii_setmask(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); } static const struct ethqos_emac_por emac_v2_3_0_por[] = { @@ -282,7 +288,6 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .rgmii_config_loopback_en = false, .has_emac_ge_3 = true, .link_clk_name = "phyaux", - .has_integrated_pcs = true, .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { @@ -306,69 +311,55 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { static int ethqos_dll_configure(struct qcom_ethqos *ethqos) { struct device *dev = ðqos->pdev->dev; - unsigned int val; - int retry = 1000; + u32 val; /* Set CDR_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN, - SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG); /* Set CDR_EXT_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN, - SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN, + SDCC_HC_REG_DLL_CONFIG); /* Clear CK_OUT_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, - 0, SDCC_HC_REG_DLL_CONFIG); + rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_HC_REG_DLL_CONFIG); /* Set DLL_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, - SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); if (!ethqos->has_emac_ge_3) { - rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN, - 0, SDCC_HC_REG_DLL_CONFIG); + rgmii_clrmask(ethqos, SDCC_DLL_MCLK_GATING_EN, + SDCC_HC_REG_DLL_CONFIG); - rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE, - 0, SDCC_HC_REG_DLL_CONFIG); + rgmii_clrmask(ethqos, SDCC_DLL_CDR_FINE_PHASE, + SDCC_HC_REG_DLL_CONFIG); } /* Wait for CK_OUT_EN clear */ - do { - val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG); - val &= SDCC_DLL_CONFIG_CK_OUT_EN; - if (!val) - break; - mdelay(1); - retry--; - } while (retry > 0); - if (!retry) + if (read_poll_timeout_atomic(rgmii_readl, val, + !(val & SDCC_DLL_CONFIG_CK_OUT_EN), + 1000, 1000000, false, + ethqos, SDCC_HC_REG_DLL_CONFIG)) dev_err(dev, "Clear CK_OUT_EN timedout\n"); /* Set CK_OUT_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, - SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_HC_REG_DLL_CONFIG); /* Wait for CK_OUT_EN set */ - retry = 1000; - do { - val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG); - val &= SDCC_DLL_CONFIG_CK_OUT_EN; - if (val) - break; - mdelay(1); - retry--; - } while (retry > 0); - if (!retry) + if (read_poll_timeout_atomic(rgmii_readl, val, + val & SDCC_DLL_CONFIG_CK_OUT_EN, + 1000, 1000000, false, + ethqos, SDCC_HC_REG_DLL_CONFIG)) dev_err(dev, "Set CK_OUT_EN timedout\n"); /* Set DDR_CAL_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN, - SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN, + SDCC_HC_REG_DLL_CONFIG2); if (!ethqos->has_emac_ge_3) { - rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, - 0, SDCC_HC_REG_DLL_CONFIG2); + rgmii_clrmask(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, + SDCC_HC_REG_DLL_CONFIG2); rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC, 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2); @@ -376,8 +367,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL, BIT(2), SDCC_HC_REG_DLL_CONFIG2); - rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW, - SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW, + rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW, SDCC_HC_REG_DLL_CONFIG2); } @@ -398,8 +388,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed) phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN; /* Disable loopback mode */ - rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN, + RGMII_IO_MACRO_CONFIG2); /* Determine if this platform wants loopback enabled after programming */ if (ethqos->rgmii_config_loopback_en) @@ -408,29 +398,26 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed) loopback = 0; /* Select RGMII, write 0 to interface select */ - rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL, - 0, RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG_INTF_SEL, RGMII_IO_MACRO_CONFIG); switch (speed) { case SPEED_1000: - rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, - RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, - 0, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, - RGMII_CONFIG_POS_NEG_DATA_SEL, + rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_IO_MACRO_CONFIG); + rgmii_setmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, - RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_setmask(ethqos, RGMII_CONFIG_PROG_SWAP, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + RGMII_IO_MACRO_CONFIG2); rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, phase_shift, RGMII_IO_MACRO_CONFIG2); - rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, - 0, RGMII_IO_MACRO_CONFIG2); - rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, - RGMII_CONFIG2_RX_PROG_SWAP, + rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + RGMII_IO_MACRO_CONFIG2); + rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_IO_MACRO_CONFIG2); /* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns, @@ -445,87 +432,78 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed) rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY, 57, SDCC_HC_REG_DDR_CONFIG); } - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN, - SDCC_DDR_CONFIG_PRG_DLY_EN, + rgmii_setmask(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN, SDCC_HC_REG_DDR_CONFIG); rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, loopback, RGMII_IO_MACRO_CONFIG); break; case SPEED_100: - rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, - RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, - RGMII_CONFIG_BYPASS_TX_ID_EN, + rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE, + RGMII_IO_MACRO_CONFIG); + rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, - 0, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, - 0, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + RGMII_IO_MACRO_CONFIG2); rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, phase_shift, RGMII_IO_MACRO_CONFIG2); rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2, BIT(6), RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + RGMII_IO_MACRO_CONFIG2); if (ethqos->has_emac_ge_3) - rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, - RGMII_CONFIG2_RX_PROG_SWAP, + rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_IO_MACRO_CONFIG2); else - rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, + RGMII_IO_MACRO_CONFIG2); /* Write 0x5 to PRG_RCLK_DLY_CODE */ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE, (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, - SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, - SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, SDCC_HC_REG_DDR_CONFIG); rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, loopback, RGMII_IO_MACRO_CONFIG); break; case SPEED_10: - rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, - RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, - RGMII_CONFIG_BYPASS_TX_ID_EN, + rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, - 0, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, - 0, RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP, + RGMII_IO_MACRO_CONFIG); + rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + RGMII_IO_MACRO_CONFIG2); rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, phase_shift, RGMII_IO_MACRO_CONFIG2); rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9, BIT(12) | GENMASK(9, 8), RGMII_IO_MACRO_CONFIG); - rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + RGMII_IO_MACRO_CONFIG2); if (ethqos->has_emac_ge_3) - rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, - RGMII_CONFIG2_RX_PROG_SWAP, + rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, RGMII_IO_MACRO_CONFIG2); else - rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, - 0, RGMII_IO_MACRO_CONFIG2); + rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, + RGMII_IO_MACRO_CONFIG2); /* Write 0x5 to PRG_RCLK_DLY_CODE */ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE, (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, - SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, SDCC_HC_REG_DDR_CONFIG); - rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, - SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, SDCC_HC_REG_DDR_CONFIG); rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, loopback, RGMII_IO_MACRO_CONFIG); @@ -541,8 +519,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed) static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed) { struct device *dev = ðqos->pdev->dev; - volatile unsigned int dll_lock; - unsigned int i, retry = 1000; + unsigned int i; + u32 val; /* Reset to POR values and enable clk */ for (i = 0; i < ethqos->num_por; i++) @@ -553,12 +531,12 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed) /* Initialize the DLL first */ /* Set DLL_RST */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, - SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, + SDCC_HC_REG_DLL_CONFIG); /* Set PDN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, - SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_PDN, + SDCC_HC_REG_DLL_CONFIG); if (ethqos->has_emac_ge_3) { if (speed == SPEED_1000) { @@ -572,21 +550,18 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed) } /* Clear DLL_RST */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0, - SDCC_HC_REG_DLL_CONFIG); + rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG); /* Clear PDN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0, - SDCC_HC_REG_DLL_CONFIG); + rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG); if (speed != SPEED_100 && speed != SPEED_10) { /* Set DLL_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, - SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, + SDCC_HC_REG_DLL_CONFIG); /* Set CK_OUT_EN */ - rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, - SDCC_DLL_CONFIG_CK_OUT_EN, + rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG); /* Set USR_CTL bit 26 with mask of 3 bits */ @@ -595,14 +570,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed) SDCC_USR_CTL); /* wait for DLL LOCK */ - do { - mdelay(1); - dll_lock = rgmii_readl(ethqos, SDC4_STATUS); - if (dll_lock & SDC4_STATUS_DLL_LOCK) - break; - retry--; - } while (retry > 0); - if (!retry) + if (read_poll_timeout_atomic(rgmii_readl, val, + val & SDC4_STATUS_DLL_LOCK, + 1000, 1000000, true, + ethqos, SDC4_STATUS)) dev_err(dev, "Timeout while waiting for DLL lock\n"); } @@ -624,7 +595,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable) { - stmmac_pcs_ctrl_ane(priv, enable, 0, 0); + stmmac_pcs_ctrl_ane(priv, enable, 0); } /* On interface toggle MAC registers gets reset. @@ -634,35 +605,25 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) { struct net_device *dev = platform_get_drvdata(ethqos->pdev); struct stmmac_priv *priv = netdev_priv(dev); - int val; - - val = readl(ethqos->mac_base + MAC_CTRL_REG); switch (speed) { case SPEED_2500: - val &= ~ETHQOS_MAC_CTRL_PORT_SEL; - rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, - RGMII_CONFIG2_RGMII_CLK_SEL_CFG, + rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); ethqos_set_serdes_speed(ethqos, SPEED_2500); ethqos_pcs_set_inband(priv, false); break; case SPEED_1000: - val &= ~ETHQOS_MAC_CTRL_PORT_SEL; - rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, - RGMII_CONFIG2_RGMII_CLK_SEL_CFG, + rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); ethqos_set_serdes_speed(ethqos, SPEED_1000); ethqos_pcs_set_inband(priv, true); break; case SPEED_100: - val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; ethqos_set_serdes_speed(ethqos, SPEED_1000); ethqos_pcs_set_inband(priv, true); break; case SPEED_10: - val |= ETHQOS_MAC_CTRL_PORT_SEL; - val &= ~ETHQOS_MAC_CTRL_SPEED_MODE; rgmii_updatel(ethqos, RGMII_CONFIG_SGMII_CLK_DVDR, FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, SGMII_10M_RX_CLK_DVDR), @@ -672,9 +633,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed) break; } - writel(val, ethqos->mac_base + MAC_CTRL_REG); - - return val; + return 0; } static int ethqos_configure(struct qcom_ethqos *ethqos, int speed) @@ -848,7 +807,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = ethqos_fix_mac_speed; plat_dat->dump_debug_regs = rgmii_dump; plat_dat->ptp_clk_freq_config = ethqos_ptp_clk_freq_config; - plat_dat->has_gmac4 = 1; + plat_dat->core_type = DWMAC_CORE_GMAC4; if (ethqos->has_emac_ge_3) plat_dat->dwmac4_addrs = &data->dwmac4_addrs; plat_dat->pmt = 1; @@ -856,8 +815,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_TSO_EN; if (of_device_is_compatible(np, "qcom,qcs404-ethqos")) plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; - if (data->has_integrated_pcs) - plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; if (data->dma_addr_width) plat_dat->host_dma_width = data->dma_addr_width; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c index bc7bb975803c..be7f5eb2cdcf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c @@ -91,7 +91,7 @@ static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv, return priv->hw->phylink_pcs; } -static int renesas_gbeth_init(struct platform_device *pdev, void *priv) +static int renesas_gbeth_init(struct device *dev, void *priv) { struct plat_stmmacenet_data *plat_dat; struct renesas_gbeth *gbeth = priv; @@ -113,7 +113,7 @@ static int renesas_gbeth_init(struct platform_device *pdev, void *priv) return ret; } -static void renesas_gbeth_exit(struct platform_device *pdev, void *priv) +static void renesas_gbeth_exit(struct device *dev, void *priv) { struct plat_stmmacenet_data *plat_dat; struct renesas_gbeth *gbeth = priv; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 0786816e05f0..0a95f54e725e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -149,11 +149,13 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv, return clk_set_rate(clk_mac_speed, rate); } -#define HIWORD_UPDATE(val, mask, shift) \ - (FIELD_PREP_WM16((mask) << (shift), (val))) +#define GRF_FIELD(hi, lo, val) \ + FIELD_PREP_WM16(GENMASK_U16(hi, lo), val) +#define GRF_FIELD_CONST(hi, lo, val) \ + FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val) -#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) -#define GRF_CLR_BIT(nr) (BIT(nr+16)) +#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) +#define GRF_CLR_BIT(nr) (BIT(nr+16)) #define DELAY_ENABLE(soc, tx, rx) \ (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ @@ -167,9 +169,9 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv, #define RK_MACPHY_ENABLE GRF_BIT(0) #define RK_MACPHY_DISABLE GRF_CLR_BIT(0) #define RK_MACPHY_CFG_CLK_50M GRF_BIT(14) -#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7)) -#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0) -#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0) +#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1) +#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234) +#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35) static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv) { @@ -203,7 +205,7 @@ static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv) #define RK_FEPHY_SHUTDOWN GRF_BIT(1) #define RK_FEPHY_POWERUP GRF_CLR_BIT(1) #define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6) -#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9)) +#define RK_FEPHY_24M_CLK_SEL GRF_FIELD(9, 8, 3) #define RK_FEPHY_PHY_ID GRF_BIT(11) static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv, @@ -232,15 +234,14 @@ static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv, #define PX30_GRF_GMAC_CON1 0x0904 /* PX30_GRF_GMAC_CON1 */ -#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ - GRF_BIT(6)) +#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2) #define PX30_GMAC_SPEED_100M GRF_BIT(2) static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, - PX30_GMAC_PHY_INTF_SEL_RMII); + PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII)); } static int px30_set_speed(struct rk_priv_data *bsp_priv, @@ -285,23 +286,20 @@ static const struct rk_gmac_ops px30_ops = { #define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) #define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) #define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) -#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) -#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3128_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val) +#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) /* RK3128_GRF_MAC_CON1 */ -#define RK3128_GMAC_PHY_INTF_SEL_RGMII \ - (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8)) -#define RK3128_GMAC_PHY_INTF_SEL_RMII \ - (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8)) +#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val) #define RK3128_GMAC_FLOW_CTRL GRF_BIT(9) #define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) #define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10) #define RK3128_GMAC_SPEED_100M GRF_BIT(10) #define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11) #define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) -#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) -#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) -#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) +#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0) +#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3) +#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2) #define RK3128_GMAC_RMII_MODE GRF_BIT(14) #define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) @@ -309,7 +307,7 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_PHY_INTF_SEL_RGMII | + RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3128_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0, DELAY_ENABLE(RK3128, tx_delay, rx_delay) | @@ -320,7 +318,8 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, - RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); + RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | + RK3128_GMAC_RMII_MODE); } static const struct rk_reg_speed_data rk3128_reg_speed_data = { @@ -350,23 +349,20 @@ static const struct rk_gmac_ops rk3128_ops = { #define RK3228_GRF_CON_MUX 0x50 /* RK3228_GRF_MAC_CON0 */ -#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) -#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3228_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val) +#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) /* RK3228_GRF_MAC_CON1 */ -#define RK3228_GMAC_PHY_INTF_SEL_RGMII \ - (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) -#define RK3228_GMAC_PHY_INTF_SEL_RMII \ - (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define RK3228_GMAC_FLOW_CTRL GRF_BIT(3) #define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) #define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2) #define RK3228_GMAC_SPEED_100M GRF_BIT(2) #define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7) #define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) -#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9)) -#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9)) -#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0) +#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3) +#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2) #define RK3228_GMAC_RMII_MODE GRF_BIT(10) #define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10) #define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) @@ -381,7 +377,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_PHY_INTF_SEL_RGMII | + RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3228_GMAC_RMII_MODE_CLR | DELAY_ENABLE(RK3228, tx_delay, rx_delay)); @@ -393,7 +389,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, - RK3228_GMAC_PHY_INTF_SEL_RMII | + RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | RK3228_GMAC_RMII_MODE); /* set MAC to RMII mode */ @@ -435,19 +431,16 @@ static const struct rk_gmac_ops rk3228_ops = { #define RK3288_GRF_SOC_CON3 0x0250 /*RK3288_GRF_SOC_CON1*/ -#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \ - GRF_CLR_BIT(8)) -#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \ - GRF_BIT(8)) +#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val) #define RK3288_GMAC_FLOW_CTRL GRF_BIT(9) #define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) #define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10) #define RK3288_GMAC_SPEED_100M GRF_BIT(10) #define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11) #define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) -#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) -#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) -#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) +#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0) +#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3) +#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2) #define RK3288_GMAC_RMII_MODE GRF_BIT(14) #define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) @@ -456,14 +449,14 @@ static const struct rk_gmac_ops rk3228_ops = { #define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) #define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) #define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) -#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) -#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3288_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val) +#define RK3288_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_PHY_INTF_SEL_RGMII | + RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3288_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3, DELAY_ENABLE(RK3288, tx_delay, rx_delay) | @@ -474,7 +467,8 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, - RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE); + RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | + RK3288_GMAC_RMII_MODE); } static const struct rk_reg_speed_data rk3288_reg_speed_data = { @@ -501,8 +495,7 @@ static const struct rk_gmac_ops rk3288_ops = { #define RK3308_GRF_MAC_CON0 0x04a0 /* RK3308_GRF_MAC_CON0 */ -#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \ - GRF_BIT(4)) +#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val) #define RK3308_GMAC_FLOW_CTRL GRF_BIT(3) #define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) #define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0) @@ -511,7 +504,7 @@ static const struct rk_gmac_ops rk3288_ops = { static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0, - RK3308_GMAC_PHY_INTF_SEL_RMII); + RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII)); } static const struct rk_reg_speed_data rk3308_reg_speed_data = { @@ -537,23 +530,20 @@ static const struct rk_gmac_ops rk3308_ops = { #define RK3328_GRF_MACPHY_CON1 0xb04 /* RK3328_GRF_MAC_CON0 */ -#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) -#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3328_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val) +#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) /* RK3328_GRF_MAC_CON1 */ -#define RK3328_GMAC_PHY_INTF_SEL_RGMII \ - (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) -#define RK3328_GMAC_PHY_INTF_SEL_RMII \ - (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define RK3328_GMAC_FLOW_CTRL GRF_BIT(3) #define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) #define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2) #define RK3328_GMAC_SPEED_100M GRF_BIT(2) #define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7) #define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) -#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12)) -#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12)) -#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0) +#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3) +#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2) #define RK3328_GMAC_RMII_MODE GRF_BIT(9) #define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9) #define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) @@ -566,7 +556,7 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, - RK3328_GMAC_PHY_INTF_SEL_RGMII | + RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3328_GMAC_RMII_MODE_CLR | RK3328_GMAC_RXCLK_DLY_ENABLE | RK3328_GMAC_TXCLK_DLY_ENABLE); @@ -584,7 +574,7 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) RK3328_GRF_MAC_CON1; regmap_write(bsp_priv->grf, reg, - RK3328_GMAC_PHY_INTF_SEL_RMII | + RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | RK3328_GMAC_RMII_MODE); } @@ -630,19 +620,16 @@ static const struct rk_gmac_ops rk3328_ops = { #define RK3366_GRF_SOC_CON7 0x041c /* RK3366_GRF_SOC_CON6 */ -#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_CLR_BIT(11)) -#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_BIT(11)) +#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val) #define RK3366_GMAC_FLOW_CTRL GRF_BIT(8) #define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8) #define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7) #define RK3366_GMAC_SPEED_100M GRF_BIT(7) #define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3) #define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3) -#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5)) -#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5)) -#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5)) +#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0) +#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3) +#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2) #define RK3366_GMAC_RMII_MODE GRF_BIT(6) #define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6) @@ -651,14 +638,14 @@ static const struct rk_gmac_ops rk3328_ops = { #define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7) #define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) #define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) -#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3366_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RK3366_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_PHY_INTF_SEL_RGMII | + RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3366_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7, DELAY_ENABLE(RK3366, tx_delay, rx_delay) | @@ -669,7 +656,8 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, - RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE); + RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | + RK3366_GMAC_RMII_MODE); } static const struct rk_reg_speed_data rk3366_reg_speed_data = { @@ -697,19 +685,16 @@ static const struct rk_gmac_ops rk3366_ops = { #define RK3368_GRF_SOC_CON16 0x0440 /* RK3368_GRF_SOC_CON15 */ -#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_CLR_BIT(11)) -#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_BIT(11)) +#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val) #define RK3368_GMAC_FLOW_CTRL GRF_BIT(8) #define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8) #define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7) #define RK3368_GMAC_SPEED_100M GRF_BIT(7) #define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3) #define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3) -#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5)) -#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5)) -#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5)) +#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0) +#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3) +#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2) #define RK3368_GMAC_RMII_MODE GRF_BIT(6) #define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6) @@ -718,14 +703,14 @@ static const struct rk_gmac_ops rk3366_ops = { #define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7) #define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) #define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) -#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3368_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RK3368_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_PHY_INTF_SEL_RGMII | + RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3368_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16, DELAY_ENABLE(RK3368, tx_delay, rx_delay) | @@ -736,7 +721,8 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, - RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE); + RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | + RK3368_GMAC_RMII_MODE); } static const struct rk_reg_speed_data rk3368_reg_speed_data = { @@ -764,19 +750,16 @@ static const struct rk_gmac_ops rk3368_ops = { #define RK3399_GRF_SOC_CON6 0xc218 /* RK3399_GRF_SOC_CON5 */ -#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_CLR_BIT(11)) -#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \ - GRF_BIT(11)) +#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val) #define RK3399_GMAC_FLOW_CTRL GRF_BIT(8) #define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8) #define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7) #define RK3399_GMAC_SPEED_100M GRF_BIT(7) #define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3) #define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3) -#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5)) -#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5)) -#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5)) +#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0) +#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3) +#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2) #define RK3399_GMAC_RMII_MODE GRF_BIT(6) #define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6) @@ -785,14 +768,14 @@ static const struct rk_gmac_ops rk3368_ops = { #define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7) #define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) #define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) -#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3399_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RK3399_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_PHY_INTF_SEL_RGMII | + RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3399_GMAC_RMII_MODE_CLR); regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6, DELAY_ENABLE(RK3399, tx_delay, rx_delay) | @@ -803,7 +786,8 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, - RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE); + RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) | + RK3399_GMAC_RMII_MODE); } static const struct rk_reg_speed_data rk3399_reg_speed_data = { @@ -827,6 +811,69 @@ static const struct rk_gmac_ops rk3399_ops = { .set_speed = rk3399_set_speed, }; +#define RK3506_GRF_SOC_CON8 0x0020 +#define RK3506_GRF_SOC_CON11 0x002c + +#define RK3506_GMAC_RMII_MODE GRF_BIT(1) + +#define RK3506_GMAC_CLK_RMII_DIV2 GRF_BIT(3) +#define RK3506_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(3) + +#define RK3506_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(5) +#define RK3506_GMAC_CLK_SELECT_IO GRF_BIT(5) + +#define RK3506_GMAC_CLK_RMII_GATE GRF_BIT(2) +#define RK3506_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(2) + +static void rk3506_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + unsigned int id = bsp_priv->id, offset; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + regmap_write(bsp_priv->grf, offset, RK3506_GMAC_RMII_MODE); +} + +static const struct rk_reg_speed_data rk3506_reg_speed_data = { + .rmii_10 = RK3506_GMAC_CLK_RMII_DIV20, + .rmii_100 = RK3506_GMAC_CLK_RMII_DIV2, +}; + +static int rk3506_set_speed(struct rk_priv_data *bsp_priv, + phy_interface_t interface, int speed) +{ + unsigned int id = bsp_priv->id, offset; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + return rk_set_reg_speed(bsp_priv, &rk3506_reg_speed_data, + offset, interface, speed); +} + +static void rk3506_set_clock_selection(struct rk_priv_data *bsp_priv, + bool input, bool enable) +{ + unsigned int value, offset, id = bsp_priv->id; + + offset = (id == 1) ? RK3506_GRF_SOC_CON11 : RK3506_GRF_SOC_CON8; + + value = input ? RK3506_GMAC_CLK_SELECT_IO : + RK3506_GMAC_CLK_SELECT_CRU; + value |= enable ? RK3506_GMAC_CLK_RMII_NOGATE : + RK3506_GMAC_CLK_RMII_GATE; + regmap_write(bsp_priv->grf, offset, value); +} + +static const struct rk_gmac_ops rk3506_ops = { + .set_to_rmii = rk3506_set_to_rmii, + .set_speed = rk3506_set_speed, + .set_clock_selection = rk3506_set_clock_selection, + .regs_valid = true, + .regs = { + 0xff4c8000, /* gmac0 */ + 0xff4d0000, /* gmac1 */ + 0x0, /* sentinel */ + }, +}; + #define RK3528_VO_GRF_GMAC_CON 0x0018 #define RK3528_VO_GRF_MACPHY_CON0 0x001c #define RK3528_VO_GRF_MACPHY_CON1 0x0020 @@ -838,8 +885,8 @@ static const struct rk_gmac_ops rk3399_ops = { #define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) #define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) -#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8) -#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0) +#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val) +#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val) #define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1) #define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8) @@ -853,9 +900,9 @@ static const struct rk_gmac_ops rk3399_ops = { #define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10) #define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10) -#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10)) -#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10)) -#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10)) +#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0) +#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3) +#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2) #define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2) #define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2) @@ -966,10 +1013,7 @@ static const struct rk_gmac_ops rk3528_ops = { #define RK3568_GRF_GMAC1_CON1 0x038c /* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */ -#define RK3568_GMAC_PHY_INTF_SEL_RGMII \ - (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) -#define RK3568_GMAC_PHY_INTF_SEL_RMII \ - (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define RK3568_GMAC_FLOW_CTRL GRF_BIT(3) #define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) #define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) @@ -978,8 +1022,8 @@ static const struct rk_gmac_ops rk3528_ops = { #define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) /* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */ -#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) @@ -996,7 +1040,7 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3568_GMAC_CLK_TX_DL_CFG(tx_delay)); regmap_write(bsp_priv->grf, con1, - RK3568_GMAC_PHY_INTF_SEL_RGMII | + RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RK3568_GMAC_RXCLK_DLY_ENABLE | RK3568_GMAC_TXCLK_DLY_ENABLE); } @@ -1007,7 +1051,8 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv) con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 : RK3568_GRF_GMAC0_CON1; - regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII); + regmap_write(bsp_priv->grf, con1, + RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII)); } static const struct rk_gmac_ops rk3568_ops = { @@ -1033,8 +1078,8 @@ static const struct rk_gmac_ops rk3568_ops = { #define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7) #define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7) -#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RK3576_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RK3576_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) /* SDGMAC_GRF */ #define RK3576_GRF_GMAC_CON0 0X0020 @@ -1049,12 +1094,9 @@ static const struct rk_gmac_ops rk3568_ops = { #define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5) #define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5) -#define RK3576_GMAC_CLK_RGMII_DIV1 \ - (GRF_CLR_BIT(6) | GRF_CLR_BIT(5)) -#define RK3576_GMAC_CLK_RGMII_DIV5 \ - (GRF_BIT(6) | GRF_BIT(5)) -#define RK3576_GMAC_CLK_RGMII_DIV50 \ - (GRF_BIT(6) | GRF_CLR_BIT(5)) +#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0) +#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3) +#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2) #define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4) #define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4) @@ -1157,17 +1199,15 @@ static const struct rk_gmac_ops rk3576_ops = { #define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2) #define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2) -#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8) -#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0) +#define RK3588_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val) +#define RK3588_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val) /* php_grf */ #define RK3588_GRF_GMAC_CON0 0X0008 #define RK3588_GRF_CLK_CON1 0X0070 -#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \ - (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6)) -#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \ - (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6)) +#define RK3588_GMAC_PHY_INTF_SEL(id, val) \ + (GRF_FIELD(5, 3, val) << ((id) * 6)) #define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id)) #define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id)) @@ -1179,11 +1219,11 @@ static const struct rk_gmac_ops rk3576_ops = { #define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2) #define RK3588_GMAC_CLK_RGMII_DIV1(id) \ - (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3)) + (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5)) #define RK3588_GMAC_CLK_RGMII_DIV5(id) \ - (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3)) + (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5)) #define RK3588_GMAC_CLK_RGMII_DIV50(id) \ - (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3)) + (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5)) #define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1) #define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1) @@ -1197,7 +1237,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv, RK3588_GRF_GMAC_CON8; regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0, - RK3588_GMAC_PHY_INTF_SEL_RGMII(id)); + RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII)); regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, RK3588_GMAC_CLK_RGMII_MODE(id)); @@ -1214,7 +1254,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0, - RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id)); + RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII)); regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id)); @@ -1284,8 +1324,7 @@ static const struct rk_gmac_ops rk3588_ops = { #define RV1108_GRF_GMAC_CON0 0X0900 /* RV1108_GRF_GMAC_CON0 */ -#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ - GRF_BIT(6)) +#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define RV1108_GMAC_FLOW_CTRL GRF_BIT(3) #define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) #define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2) @@ -1296,7 +1335,7 @@ static const struct rk_gmac_ops rk3588_ops = { static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, - RV1108_GMAC_PHY_INTF_SEL_RMII); + RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII)); } static const struct rk_reg_speed_data rv1108_reg_speed_data = { @@ -1321,10 +1360,7 @@ static const struct rk_gmac_ops rv1108_ops = { #define RV1126_GRF_GMAC_CON2 0X0078 /* RV1126_GRF_GMAC_CON0 */ -#define RV1126_GMAC_PHY_INTF_SEL_RGMII \ - (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) -#define RV1126_GMAC_PHY_INTF_SEL_RMII \ - (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val) #define RV1126_GMAC_FLOW_CTRL GRF_BIT(7) #define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7) #define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1) @@ -1337,17 +1373,17 @@ static const struct rk_gmac_ops rv1108_ops = { #define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2) /* RV1126_GRF_GMAC_CON1 */ -#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) /* RV1126_GRF_GMAC_CON2 */ -#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8) -#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) +#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val) +#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val) static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0, - RV1126_GMAC_PHY_INTF_SEL_RGMII | + RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) | RV1126_GMAC_M0_RXCLK_DLY_ENABLE | RV1126_GMAC_M0_TXCLK_DLY_ENABLE | RV1126_GMAC_M1_RXCLK_DLY_ENABLE | @@ -1365,7 +1401,7 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv) { regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0, - RV1126_GMAC_PHY_INTF_SEL_RMII); + RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII)); } static const struct rk_gmac_ops rv1126_ops = { @@ -1699,8 +1735,7 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i, struct rk_priv_data *bsp_priv = bsp_priv_; if (bsp_priv->ops->set_speed) - return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface, - speed); + return bsp_priv->ops->set_speed(bsp_priv, interface, speed); return -EINVAL; } @@ -1727,6 +1762,22 @@ static int rk_gmac_resume(struct device *dev, void *bsp_priv_) return 0; } +static int rk_gmac_init(struct device *dev, void *bsp_priv) +{ + return rk_gmac_powerup(bsp_priv); +} + +static void rk_gmac_exit(struct device *dev, void *bsp_priv_) +{ + struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev)); + struct rk_priv_data *bsp_priv = bsp_priv_; + + rk_gmac_powerdown(bsp_priv); + + if (priv->plat->phy_node && bsp_priv->integrated_phy) + clk_put(bsp_priv->clk_phy); +} + static int rk_gmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -1751,14 +1802,16 @@ static int rk_gmac_probe(struct platform_device *pdev) /* If the stmmac is not already selected as gmac4, * then make sure we fallback to gmac. */ - if (!plat_dat->has_gmac4) { - plat_dat->has_gmac = true; + if (plat_dat->core_type != DWMAC_CORE_GMAC4) { + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->rx_fifo_size = 4096; plat_dat->tx_fifo_size = 2048; } plat_dat->get_interfaces = rk_get_interfaces; plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate; + plat_dat->init = rk_gmac_init; + plat_dat->exit = rk_gmac_exit; plat_dat->suspend = rk_gmac_suspend; plat_dat->resume = rk_gmac_resume; @@ -1770,33 +1823,7 @@ static int rk_gmac_probe(struct platform_device *pdev) if (ret) return ret; - ret = rk_gmac_powerup(plat_dat->bsp_priv); - if (ret) - return ret; - - ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) - goto err_gmac_powerdown; - - return 0; - -err_gmac_powerdown: - rk_gmac_powerdown(plat_dat->bsp_priv); - - return ret; -} - -static void rk_gmac_remove(struct platform_device *pdev) -{ - struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev)); - struct rk_priv_data *bsp_priv = priv->plat->bsp_priv; - - stmmac_dvr_remove(&pdev->dev); - - rk_gmac_powerdown(bsp_priv); - - if (priv->plat->phy_node && bsp_priv->integrated_phy) - clk_put(bsp_priv->clk_phy); + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct of_device_id rk_gmac_dwmac_match[] = { @@ -1809,6 +1836,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, + { .compatible = "rockchip,rk3506-gmac", .data = &rk3506_ops }, { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops }, { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops }, { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops }, @@ -1821,7 +1849,6 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); static struct platform_driver rk_gmac_dwmac_driver = { .probe = rk_gmac_probe, - .remove = rk_gmac_remove, .driver = { .name = "rk_gmac-dwmac", .pm = &stmmac_simple_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c index 221539d760bc..5a485ee98fa7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -24,10 +24,10 @@ #define GMAC_INTF_RATE_125M 125000000 /* 125MHz */ /* SoC PHY interface control register */ -#define PHY_INTF_SEL_MII 0x00 -#define PHY_INTF_SEL_SGMII 0x01 -#define PHY_INTF_SEL_RGMII 0x02 -#define PHY_INTF_SEL_RMII 0x08 +#define S32_PHY_INTF_SEL_MII 0x00 +#define S32_PHY_INTF_SEL_SGMII 0x01 +#define S32_PHY_INTF_SEL_RGMII 0x02 +#define S32_PHY_INTF_SEL_RMII 0x08 struct s32_priv_data { void __iomem *ioaddr; @@ -40,14 +40,14 @@ struct s32_priv_data { static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac) { - writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts); + writel(S32_PHY_INTF_SEL_RGMII, gmac->ctrl_sts); dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode)); return 0; } -static int s32_gmac_init(struct platform_device *pdev, void *priv) +static int s32_gmac_init(struct device *dev, void *priv) { struct s32_priv_data *gmac = priv; int ret; @@ -55,31 +55,31 @@ static int s32_gmac_init(struct platform_device *pdev, void *priv) /* Set initial TX interface clock */ ret = clk_prepare_enable(gmac->tx_clk); if (ret) { - dev_err(&pdev->dev, "Can't enable tx clock\n"); + dev_err(dev, "Can't enable tx clock\n"); return ret; } ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M); if (ret) { - dev_err(&pdev->dev, "Can't set tx clock\n"); + dev_err(dev, "Can't set tx clock\n"); goto err_tx_disable; } /* Set initial RX interface clock */ ret = clk_prepare_enable(gmac->rx_clk); if (ret) { - dev_err(&pdev->dev, "Can't enable rx clock\n"); + dev_err(dev, "Can't enable rx clock\n"); goto err_tx_disable; } ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M); if (ret) { - dev_err(&pdev->dev, "Can't set rx clock\n"); + dev_err(dev, "Can't set rx clock\n"); goto err_txrx_disable; } /* Set interface mode */ ret = s32_gmac_write_phy_intf_select(gmac); if (ret) { - dev_err(&pdev->dev, "Can't set PHY interface mode\n"); + dev_err(dev, "Can't set PHY interface mode\n"); goto err_txrx_disable; } @@ -92,7 +92,7 @@ err_tx_disable: return ret; } -static void s32_gmac_exit(struct platform_device *pdev, void *priv) +static void s32_gmac_exit(struct device *dev, void *priv) { struct s32_priv_data *gmac = priv; @@ -146,7 +146,7 @@ static int s32_dwmac_probe(struct platform_device *pdev) gmac->ioaddr = res.addr; /* S32CC core feature set */ - plat->has_gmac4 = true; + plat->core_type = DWMAC_CORE_GMAC4; plat->pmt = 1; plat->flags |= STMMAC_FLAG_SPH_DISABLE; plat->rx_fifo_size = 20480; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 354f01184e6c..a2b52d2c4eb6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -5,6 +5,7 @@ */ #include <linux/mfd/altera-sysmgr.h> +#include <linux/clocksource_ids.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_net.h> @@ -15,8 +16,10 @@ #include <linux/reset.h> #include <linux/stmmac.h> +#include "dwxgmac2.h" #include "stmmac.h" #include "stmmac_platform.h" +#include "stmmac_ptp.h" #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 @@ -41,9 +44,17 @@ #define SGMII_ADAPTER_ENABLE 0x0000 #define SGMII_ADAPTER_DISABLE 0x0001 +#define SMTG_MDIO_ADDR 0x15 +#define SMTG_TSC_WORD0 0xC +#define SMTG_TSC_WORD1 0xD +#define SMTG_TSC_WORD2 0xE +#define SMTG_TSC_WORD3 0xF +#define SMTG_TSC_SHIFT 16 + struct socfpga_dwmac; struct socfpga_dwmac_ops { int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv); + void (*setup_plat_dat)(struct socfpga_dwmac *dwmac_priv); }; struct socfpga_dwmac { @@ -268,6 +279,112 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val) return 0; } +static void get_smtgtime(struct mii_bus *mii, int smtg_addr, u64 *smtg_time) +{ + u64 ns; + + ns = mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD3); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD2); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD1); + ns <<= SMTG_TSC_SHIFT; + ns |= mdiobus_read(mii, smtg_addr, SMTG_TSC_WORD0); + + *smtg_time = ns; +} + +static int smtg_crosststamp(ktime_t *device, struct system_counterval_t *system, + void *ctx) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)ctx; + u32 num_snapshot, gpio_value, acr_value; + void __iomem *ptpaddr = priv->ptpaddr; + void __iomem *ioaddr = priv->hw->pcsr; + unsigned long flags; + u64 smtg_time = 0; + u64 ptp_time = 0; + int i, ret; + u32 v; + + /* Both internal crosstimestamping and external triggered event + * timestamping cannot be run concurrently. + */ + if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN) + return -EBUSY; + + mutex_lock(&priv->aux_ts_lock); + /* Enable Internal snapshot trigger */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value &= ~PTP_ACR_MASK; + switch (priv->plat->int_snapshot_num) { + case AUX_SNAPSHOT0: + acr_value |= PTP_ACR_ATSEN0; + break; + case AUX_SNAPSHOT1: + acr_value |= PTP_ACR_ATSEN1; + break; + case AUX_SNAPSHOT2: + acr_value |= PTP_ACR_ATSEN2; + break; + case AUX_SNAPSHOT3: + acr_value |= PTP_ACR_ATSEN3; + break; + default: + mutex_unlock(&priv->aux_ts_lock); + return -EINVAL; + } + writel(acr_value, ptpaddr + PTP_ACR); + + /* Clear FIFO */ + acr_value = readl(ptpaddr + PTP_ACR); + acr_value |= PTP_ACR_ATSFC; + writel(acr_value, ptpaddr + PTP_ACR); + /* Release the mutex */ + mutex_unlock(&priv->aux_ts_lock); + + /* Trigger Internal snapshot signal. Create a rising edge by just toggle + * the GPO0 to low and back to high. + */ + gpio_value = readl(ioaddr + XGMAC_GPIO_STATUS); + gpio_value &= ~XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + gpio_value |= XGMAC_GPIO_GPO0; + writel(gpio_value, ioaddr + XGMAC_GPIO_STATUS); + + /* Poll for time sync operation done */ + ret = readl_poll_timeout(priv->ioaddr + XGMAC_INT_STATUS, v, + (v & XGMAC_INT_TSIS), 100, 10000); + if (ret) { + netdev_err(priv->dev, "%s: Wait for time sync operation timeout\n", + __func__); + return ret; + } + + *system = (struct system_counterval_t) { + .cycles = 0, + .cs_id = CSID_ARM_ARCH_COUNTER, + .use_nsecs = false, + }; + + num_snapshot = (readl(ioaddr + XGMAC_TIMESTAMP_STATUS) & + XGMAC_TIMESTAMP_ATSNS_MASK) >> + XGMAC_TIMESTAMP_ATSNS_SHIFT; + + /* Repeat until the timestamps are from the FIFO last segment */ + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, ptpaddr, &ptp_time); + *device = ns_to_ktime(ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + } + + get_smtgtime(priv->mii, SMTG_MDIO_ADDR, &smtg_time); + system->cycles = smtg_time; + + return 0; +} + static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; @@ -434,13 +551,50 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv, return priv->hw->phylink_pcs; } -static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv) +static int socfpga_dwmac_init(struct device *dev, void *bsp_priv) { struct socfpga_dwmac *dwmac = bsp_priv; return dwmac->ops->set_phy_mode(dwmac); } +static void socfpga_gen5_setup_plat_dat(struct socfpga_dwmac *dwmac) +{ + struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat; + + plat_dat->core_type = DWMAC_CORE_GMAC; + + /* Rx watchdog timer in dwmac is buggy in this hw */ + plat_dat->riwt_off = 1; +} + +static void socfpga_agilex5_setup_plat_dat(struct socfpga_dwmac *dwmac) +{ + struct plat_stmmacenet_data *plat_dat = dwmac->plat_dat; + + plat_dat->core_type = DWMAC_CORE_XGMAC; + + /* Enable TSO */ + plat_dat->flags |= STMMAC_FLAG_TSO_EN; + + /* Enable TBS */ + switch (plat_dat->tx_queues_to_use) { + case 8: + plat_dat->tx_queues_cfg[7].tbs_en = true; + fallthrough; + case 7: + plat_dat->tx_queues_cfg[6].tbs_en = true; + break; + default: + /* Tx Queues 0 - 5 doesn't support TBS on Agilex5 */ + break; + } + + /* Hw supported cross-timestamp */ + plat_dat->int_snapshot_num = AUX_SNAPSHOT0; + plat_dat->crosststamp = smtg_crosststamp; +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -497,25 +651,31 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; plat_dat->select_pcs = socfpga_dwmac_select_pcs; - plat_dat->has_gmac = true; - plat_dat->riwt_off = 1; + ops->setup_plat_dat(dwmac); return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } static const struct socfpga_dwmac_ops socfpga_gen5_ops = { .set_phy_mode = socfpga_gen5_set_phy_mode, + .setup_plat_dat = socfpga_gen5_setup_plat_dat, }; static const struct socfpga_dwmac_ops socfpga_gen10_ops = { .set_phy_mode = socfpga_gen10_set_phy_mode, + .setup_plat_dat = socfpga_gen5_setup_plat_dat, +}; + +static const struct socfpga_dwmac_ops socfpga_agilex5_ops = { + .set_phy_mode = socfpga_gen10_set_phy_mode, + .setup_plat_dat = socfpga_agilex5_setup_plat_dat, }; static const struct of_device_id socfpga_dwmac_match[] = { { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops }, { .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops }, - { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_gen10_ops }, + { .compatible = "altr,socfpga-stmmac-agilex5", .data = &socfpga_agilex5_ops }, { } }; MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c index 3b7947a7a7ba..44d4ceb8415f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c @@ -7,11 +7,16 @@ #include <linux/clk.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include "stmmac_platform.h" +struct sophgo_dwmac_data { + bool has_internal_rx_delay; +}; + static int sophgo_sg2044_dwmac_init(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *stmmac_res) @@ -24,7 +29,6 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev, plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE; plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; plat_dat->multicast_filter_bins = 0; - plat_dat->unicast_filter_entries = 1; return 0; } @@ -32,6 +36,7 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev, static int sophgo_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; + const struct sophgo_dwmac_data *data; struct stmmac_resources stmmac_res; struct device *dev = &pdev->dev; int ret; @@ -50,11 +55,23 @@ static int sophgo_dwmac_probe(struct platform_device *pdev) if (ret) return ret; + data = device_get_match_data(&pdev->dev); + if (data && data->has_internal_rx_delay) { + plat_dat->phy_interface = phy_fix_phy_mode_for_mac_delays(plat_dat->phy_interface, + false, true); + if (plat_dat->phy_interface == PHY_INTERFACE_MODE_NA) + return -EINVAL; + } + return stmmac_dvr_probe(dev, plat_dat, &stmmac_res); } +static const struct sophgo_dwmac_data sg2042_dwmac_data = { + .has_internal_rx_delay = true, +}; + static const struct of_device_id sophgo_dwmac_match[] = { - { .compatible = "sophgo,sg2042-dwmac" }, + { .compatible = "sophgo,sg2042-dwmac", .data = &sg2042_dwmac_data }, { .compatible = "sophgo,sg2044-dwmac" }, { /* sentinel */ } }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 6938dd2a79b7..16b955a6d77b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -15,8 +15,6 @@ #include "stmmac_platform.h" -#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1 -#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4 #define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U #define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8 @@ -35,25 +33,15 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) struct starfive_dwmac *dwmac = plat_dat->bsp_priv; struct regmap *regmap; unsigned int args[2]; - unsigned int mode; + int phy_intf_sel; int err; - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RMII: - mode = STARFIVE_DWMAC_PHY_INFT_RMII; - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - mode = STARFIVE_DWMAC_PHY_INFT_RGMII; - break; - - default: + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(dwmac->dev, "unsupported interface %s\n", phy_modes(plat_dat->phy_interface)); - return -EINVAL; + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; } regmap = syscon_regmap_lookup_by_phandle_args(dwmac->dev->of_node, @@ -65,7 +53,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) /* args[0]:offset args[1]: shift */ err = regmap_update_bits(regmap, args[0], STARFIVE_DWMAC_PHY_INFT_FIELD << args[1], - mode << args[1]); + phy_intf_sel << args[1]); if (err) return dev_err_probe(dwmac->dev, err, "error setting phy mode\n"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 53d5ce1f6dc6..f50547b67fbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -77,13 +77,9 @@ * 001-RGMII * 010-SGMII * 100-RMII + * These are the DW MAC phy_intf_sel values. */ #define MII_PHY_SEL_MASK GENMASK(4, 2) -#define ETH_PHY_SEL_RMII BIT(4) -#define ETH_PHY_SEL_SGMII BIT(3) -#define ETH_PHY_SEL_RGMII BIT(2) -#define ETH_PHY_SEL_GMII 0x0 -#define ETH_PHY_SEL_MII 0x0 struct sti_dwmac { phy_interface_t interface; /* MII interface */ @@ -102,15 +98,6 @@ struct sti_dwmac_of_data { void (*fix_retime_src)(void *priv, int speed, unsigned int mode); }; -static u32 phy_intf_sels[] = { - [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, - [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, - [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, - [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, - [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, - [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, -}; - enum { TX_RETIME_SRC_NA = 0, TX_RETIME_SRC_TXCLK = 1, @@ -159,19 +146,28 @@ static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode) stih4xx_tx_retime_val[src]); } -static int sti_dwmac_set_mode(struct sti_dwmac *dwmac) +static int sti_set_phy_intf_sel(void *bsp_priv, u8 phy_intf_sel) { - struct regmap *regmap = dwmac->regmap; - int iface = dwmac->interface; - u32 reg = dwmac->ctrl_reg; - u32 val; + struct sti_dwmac *dwmac = bsp_priv; + struct regmap *regmap; + u32 reg, val; + + regmap = dwmac->regmap; + reg = dwmac->ctrl_reg; if (dwmac->gmac_en) regmap_update_bits(regmap, reg, EN_MASK, EN); - regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_SGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) + phy_intf_sel = PHY_INTF_SEL_GMII_MII; + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, + FIELD_PREP(MII_PHY_SEL_MASK, phy_intf_sel)); - val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + val = (dwmac->interface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; regmap_update_bits(regmap, reg, ENMII_MASK, val); dwmac->fix_retime_src(dwmac, dwmac->speed, 0); @@ -233,23 +229,14 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, return 0; } -static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv) +static int sti_dwmac_init(struct device *dev, void *bsp_priv) { struct sti_dwmac *dwmac = bsp_priv; - int ret; - - ret = clk_prepare_enable(dwmac->clk); - if (ret) - return ret; - - ret = sti_dwmac_set_mode(dwmac); - if (ret) - clk_disable_unprepare(dwmac->clk); - return ret; + return clk_prepare_enable(dwmac->clk); } -static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv) +static void sti_dwmac_exit(struct device *dev, void *bsp_priv) { struct sti_dwmac *dwmac = bsp_priv; @@ -291,6 +278,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) dwmac->fix_retime_src = data->fix_retime_src; plat_dat->bsp_priv = dwmac; + plat_dat->set_phy_intf_sel = sti_set_phy_intf_sel; plat_dat->fix_mac_speed = data->fix_retime_src; plat_dat->init = sti_dwmac_init; plat_dat->exit = sti_dwmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 6c179911ef3f..e1b260ed4790 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -47,23 +47,18 @@ *------------------------------------------ */ #define SYSCFG_PMCR_ETH_SEL_MII BIT(20) -#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) -#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) -#define SYSCFG_PMCR_ETH_SEL_GMII 0 +#define SYSCFG_PMCR_PHY_INTF_SEL_MASK GENMASK(23, 21) #define SYSCFG_MCU_ETH_SEL_MII 0 #define SYSCFG_MCU_ETH_SEL_RMII 1 /* STM32MP2 register definitions */ #define SYSCFG_MP2_ETH_MASK GENMASK(31, 0) +#define SYSCFG_ETHCR_ETH_SEL_MASK GENMASK(6, 4) #define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2) #define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1) #define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0) -#define SYSCFG_ETHCR_ETH_SEL_MII 0 -#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4) -#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6) - /* STM32MPx register definitions * * Below table summarizes the clock requirement and clock sources for @@ -232,11 +227,14 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat) return -EINVAL; } -static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) +static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat, + u8 phy_intf_sel) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val = 0; + int val; + + val = FIELD_PREP(SYSCFG_PMCR_PHY_INTF_SEL_MASK, phy_intf_sel); switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_MII: @@ -250,12 +248,10 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) val |= SYSCFG_PMCR_ETH_SEL_MII; break; case PHY_INTERFACE_MODE_GMII: - val = SYSCFG_PMCR_ETH_SEL_GMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; break; case PHY_INTERFACE_MODE_RMII: - val = SYSCFG_PMCR_ETH_SEL_RMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; break; @@ -263,7 +259,6 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - val = SYSCFG_PMCR_ETH_SEL_RGMII; if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; break; @@ -288,18 +283,20 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) dwmac->mode_mask, val); } -static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) +static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat, + u8 phy_intf_sel) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val = 0; + int val; + + val = FIELD_PREP(SYSCFG_ETHCR_ETH_SEL_MASK, phy_intf_sel); switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_MII: /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */ break; case PHY_INTERFACE_MODE_RMII: - val = SYSCFG_ETHCR_ETH_SEL_RMII; if (dwmac->enable_eth_ck) { /* Internal clock ETH_CLK of 50MHz from RCC is used */ val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL; @@ -309,8 +306,6 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - val = SYSCFG_ETHCR_ETH_SEL_RGMII; - fallthrough; case PHY_INTERFACE_MODE_GMII: if (dwmac->enable_eth_ck) { /* Internal clock ETH_CLK of 125MHz from RCC is used */ @@ -337,7 +332,7 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - int ret; + int phy_intf_sel, ret; ret = stm32mp1_select_ethck_external(plat_dat); if (ret) @@ -347,10 +342,19 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) if (ret) return ret; + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { + dev_err(dwmac->dev, "Mode %s not supported\n", + phy_modes(plat_dat->phy_interface)); + return phy_intf_sel < 0 ? phy_intf_sel : -EINVAL; + } + if (!dwmac->ops->is_mp2) - return stm32mp1_configure_pmcr(plat_dat); + return stm32mp1_configure_pmcr(plat_dat, phy_intf_sel); else - return stm32mp2_configure_syscfg(plat_dat); + return stm32mp2_configure_syscfg(plat_dat, phy_intf_sel); } static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 5d871b2cd111..8aa496ac85cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -571,16 +571,16 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv); -static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) +static int sun8i_dwmac_init(struct device *dev, void *priv) { - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct sunxi_priv_data *gmac = priv; int ret; if (gmac->regulator) { ret = regulator_enable(gmac->regulator); if (ret) { - dev_err(&pdev->dev, "Fail to enable regulator\n"); + dev_err(dev, "Fail to enable regulator\n"); return ret; } } @@ -1005,7 +1005,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT)); } -static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) +static void sun8i_dwmac_exit(struct device *dev, void *priv) { struct sunxi_priv_data *gmac = priv; @@ -1040,15 +1040,10 @@ static const struct stmmac_ops sun8i_dwmac_ops = { .set_mac_loopback = sun8i_dwmac_set_mac_loopback, }; -static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) +static int sun8i_dwmac_setup(void *ppriv, struct mac_device_info *mac) { - struct mac_device_info *mac; struct stmmac_priv *priv = ppriv; - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); - if (!mac) - return NULL; - mac->pcsr = priv->ioaddr; mac->mac = &sun8i_dwmac_ops; mac->dma = &sun8i_dwmac_dma_ops; @@ -1079,7 +1074,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) /* Synopsys Id is not available */ priv->synopsys_id = 0; - return mac; + return 0; } static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node) @@ -1192,7 +1187,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->init = sun8i_dwmac_init; plat_dat->exit = sun8i_dwmac_exit; - plat_dat->setup = sun8i_dwmac_setup; + plat_dat->mac_setup = sun8i_dwmac_setup; plat_dat->tx_fifo_size = 4096; plat_dat->rx_fifo_size = 16384; @@ -1270,7 +1265,7 @@ static void sun8i_dwmac_shutdown(struct platform_device *pdev) struct stmmac_priv *priv = netdev_priv(ndev); struct sunxi_priv_data *gmac = priv->plat->bsp_priv; - sun8i_dwmac_exit(pdev, gmac); + sun8i_dwmac_exit(&pdev->dev, gmac); } static const struct of_device_id sun8i_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 1eadcf5d1ad6..52593ba3a3a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -27,7 +27,7 @@ struct sunxi_priv_data { #define SUN7I_GMAC_GMII_RGMII_RATE 125000000 #define SUN7I_GMAC_MII_RATE 25000000 -static int sun7i_gmac_init(struct platform_device *pdev, void *priv) +static int sun7i_gmac_init(struct device *dev, void *priv) { struct sunxi_priv_data *gmac = priv; int ret = 0; @@ -58,7 +58,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) return ret; } -static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) +static void sun7i_gmac_exit(struct device *dev, void *priv) { struct sunxi_priv_data *gmac = priv; @@ -136,7 +136,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. */ plat_dat->tx_coe = 1; - plat_dat->has_gmac = true; + plat_dat->core_type = DWMAC_CORE_GMAC; plat_dat->bsp_priv = gmac; plat_dat->init = sun7i_gmac_init; plat_dat->exit = sun7i_gmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c index dc903b846b1b..d765acbe3754 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c @@ -308,7 +308,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev) goto disable_clks; } - plat->has_xgmac = 1; + plat->core_type = DWMAC_CORE_XGMAC; plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; plat->bsp_priv = mgbe; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c index a3378046b061..e291028ba56e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c @@ -186,7 +186,7 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat) return 0; } -static int thead_dwmac_init(struct platform_device *pdev, void *priv) +static int thead_dwmac_init(struct device *dev, void *priv) { struct thead_dwmac *dwmac = priv; unsigned int reg; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index bd65d4239054..9497b13a5753 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -42,10 +42,6 @@ #define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN) -#define ETHER_CONFIG_INTF_MII 0 -#define ETHER_CONFIG_INTF_RGMII BIT(0) -#define ETHER_CONFIG_INTF_RMII BIT(2) - struct visconti_eth { void __iomem *reg; struct clk *phy_ref_clk; @@ -150,22 +146,12 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac { struct visconti_eth *dwmac = plat_dat->bsp_priv; unsigned int clk_sel_val; - u32 phy_intf_sel; - - switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - phy_intf_sel = ETHER_CONFIG_INTF_RGMII; - break; - case PHY_INTERFACE_MODE_MII: - phy_intf_sel = ETHER_CONFIG_INTF_MII; - break; - case PHY_INTERFACE_MODE_RMII: - phy_intf_sel = ETHER_CONFIG_INTF_RMII; - break; - default: + int phy_intf_sel; + + phy_intf_sel = stmmac_get_phy_intf_sel(plat_dat->phy_interface); + if (phy_intf_sel != PHY_INTF_SEL_GMII_MII && + phy_intf_sel != PHY_INTF_SEL_RGMII && + phy_intf_sel != PHY_INTF_SEL_RMII) { dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 0c011a47d5a3..697bba641e05 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -38,11 +38,10 @@ #define GMAC_INT_DISABLE_PCSAN BIT(2) #define GMAC_INT_DISABLE_PMT BIT(3) #define GMAC_INT_DISABLE_TIMESTAMP BIT(9) -#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_RGMII | \ GMAC_INT_DISABLE_PCSLINK | \ - GMAC_INT_DISABLE_PCSAN) -#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ - GMAC_INT_DISABLE_PCS) + GMAC_INT_DISABLE_PCSAN | \ + GMAC_INT_DISABLE_TIMESTAMP) /* PMT Control and Status */ #define GMAC_PMT 0x0000002c diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fe776ddf6889..a2ae136d2c0e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -22,47 +22,35 @@ #include "stmmac_ptp.h" #include "dwmac1000.h" +static int dwmac1000_pcs_init(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.pcs) + return 0; + + return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE, + GMAC_INT_DISABLE_PCSLINK | + GMAC_INT_DISABLE_PCSAN); +} + static void dwmac1000_core_init(struct mac_device_info *hw, struct net_device *dev) { void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_CONTROL); int mtu = dev->mtu; + u32 value; /* Configure GMAC core */ - value |= GMAC_CORE_INIT; + value = readl(ioaddr + GMAC_CONTROL); if (mtu > 1500) value |= GMAC_CONTROL_2K; if (mtu > 2000) value |= GMAC_CONTROL_JE; - if (hw->ps) { - value |= GMAC_CONTROL_TE; - - value &= ~hw->link.speed_mask; - switch (hw->ps) { - case SPEED_1000: - value |= hw->link.speed1000; - break; - case SPEED_100: - value |= hw->link.speed100; - break; - case SPEED_10: - value |= hw->link.speed10; - break; - } - } - - writel(value, ioaddr + GMAC_CONTROL); + writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONTROL); /* Mask GMAC interrupts */ - value = GMAC_INT_DEFAULT_MASK; - - if (hw->pcs) - value &= ~GMAC_INT_DISABLE_PCS; - - writel(value, ioaddr + GMAC_INT_MASK); + writel(GMAC_INT_DEFAULT_MASK, ioaddr + GMAC_INT_MASK); #ifdef STMMAC_VLAN_TAG_USED /* Tag detection without filtering */ @@ -70,6 +58,20 @@ static void dwmac1000_core_init(struct mac_device_info *hw, #endif } +static void dwmac1000_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + GMAC_INT_MASK; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) | disable; + value &= ~enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); +} + static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) { void __iomem *ioaddr = hw->pcsr; @@ -263,39 +265,6 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) writel(pmt, ioaddr + GMAC_PMT); } -/* RGMII or SMII interface */ -static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) -{ - u32 status; - - status = readl(ioaddr + GMAC_RGSMIIIS); - x->irq_rgmii_n++; - - /* Check the link status */ - if (status & GMAC_RGSMIIIS_LNKSTS) { - int speed_value; - - x->pcs_link = 1; - - speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> - GMAC_RGSMIIIS_SPEED_SHIFT); - if (speed_value == GMAC_RGSMIIIS_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_RGSMIIIS_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; - - x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); - - pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_info("Link is Down\n"); - } -} - static int dwmac1000_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -337,9 +306,6 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - if (intr_status & PCS_RGSMIIIS_IRQ) - dwmac1000_rgsmii(ioaddr, x); - return ret; } @@ -394,9 +360,9 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) } static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane, - bool srgmi_ral, bool loopback) + bool srgmi_ral) { - dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral); } static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -488,7 +454,9 @@ static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable) } const struct stmmac_ops dwmac1000_ops = { + .pcs_init = dwmac1000_pcs_init, .core_init = dwmac1000_core_init, + .irq_modify = dwmac1000_irq_modify, .set_mac = stmmac_set_mac, .rx_ipc = dwmac1000_rx_ipc_enable, .dump_regs = dwmac1000_dump_regs, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 118a22406a2e..5877fec9f6c3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -19,7 +19,6 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) { u32 value = readl(ioaddr + DMA_AXI_BUS_MODE); - int i; pr_info("dwmac1000: Master AXI performs %s burst length\n", !(value & DMA_AXI_UNDEF) ? "fixed" : "any"); @@ -39,33 +38,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) /* Depending on the UNDEF bit the Master AXI will perform any burst * length according to the BLEN programmed (by default all BLEN are - * set). + * set). Note that the UNDEF bit is readonly, and is the inverse of + * Bus Mode bit 16. */ - for (i = 0; i < AXI_BLEN; i++) { - switch (axi->axi_blen[i]) { - case 256: - value |= DMA_AXI_BLEN256; - break; - case 128: - value |= DMA_AXI_BLEN128; - break; - case 64: - value |= DMA_AXI_BLEN64; - break; - case 32: - value |= DMA_AXI_BLEN32; - break; - case 16: - value |= DMA_AXI_BLEN16; - break; - case 8: - value |= DMA_AXI_BLEN8; - break; - case 4: - value |= DMA_AXI_BLEN4; - break; - } - } + value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval; writel(value, ioaddr + DMA_AXI_BUS_MODE); } @@ -159,10 +135,10 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable RX store and forward mode\n"); - csr6 |= DMA_CONTROL_RSF; + csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF; } else { pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); - csr6 &= ~DMA_CONTROL_RSF; + csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF); csr6 &= DMA_CONTROL_TC_RX_MASK; if (mode <= 32) csr6 |= DMA_CONTROL_RTC_32; @@ -286,6 +262,7 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .dma_rx_mode = dwmac1000_dma_operation_mode_rx, .dma_tx_mode = dwmac1000_dma_operation_mode_tx, .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_reception = dwmac_enable_dma_reception, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, .start_tx = dwmac_dma_start_tx, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 3dec1a264cf6..3cb733781e1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -106,9 +106,6 @@ #define GMAC_INT_LPI_EN BIT(5) #define GMAC_INT_TSIE BIT(12) -#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ - GMAC_INT_PCS_ANE) - #define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \ GMAC_INT_TSIE) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index d85bc0bb5c3c..a4282fd7c3c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -22,51 +22,51 @@ #include "dwmac4.h" #include "dwmac5.h" +static int dwmac4_pcs_init(struct stmmac_priv *priv) +{ + if (!priv->dma_cap.pcs) + return 0; + + return stmmac_integrated_pcs_init(priv, GMAC_PCS_BASE, + GMAC_INT_PCS_LINK | GMAC_INT_PCS_ANE); +} + static void dwmac4_core_init(struct mac_device_info *hw, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_CONFIG); unsigned long clk_rate; + u32 value; - value |= GMAC_CORE_INIT; - - if (hw->ps) { - value |= GMAC_CONFIG_TE; - - value &= hw->link.speed_mask; - switch (hw->ps) { - case SPEED_1000: - value |= hw->link.speed1000; - break; - case SPEED_100: - value |= hw->link.speed100; - break; - case SPEED_10: - value |= hw->link.speed10; - break; - } - } - - writel(value, ioaddr + GMAC_CONFIG); + value = readl(ioaddr + GMAC_CONFIG); + writel(value | GMAC_CORE_INIT, ioaddr + GMAC_CONFIG); /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */ clk_rate = clk_get_rate(priv->plat->stmmac_clk); writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER); /* Enable GMAC interrupts */ - value = GMAC_INT_DEFAULT_ENABLE; - - if (hw->pcs) - value |= GMAC_PCS_IRQ_DEFAULT; - - writel(value, ioaddr + GMAC_INT_EN); + writel(GMAC_INT_DEFAULT_ENABLE, ioaddr + GMAC_INT_EN); if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) init_waitqueue_head(&priv->tstamp_busy_wait); } +static void dwmac4_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + GMAC_INT_EN; + unsigned long flags; + u32 value; + + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) & ~disable; + value |= enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); +} + static void dwmac4_update_caps(struct stmmac_priv *priv) { if (priv->plat->tx_queues_to_use > 1) @@ -583,43 +583,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } } -static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral, - bool loopback) -{ - dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); -} - -/* RGMII or SMII interface */ -static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) +static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral) { - u32 status; - - status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); - x->irq_rgmii_n++; - - /* Check the link status */ - if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { - int speed_value; - - x->pcs_link = 1; - - speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> - GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); - if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; - - x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD); - - pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_info("Link is Down\n"); - } + dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral); } static int dwmac4_irq_mtl_status(struct stmmac_priv *priv, @@ -693,8 +659,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw, } dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - if (intr_status & PCS_RGSMIIIS_IRQ) - dwmac4_phystatus(ioaddr, x); return ret; } @@ -929,7 +893,9 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, } const struct stmmac_ops dwmac4_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -963,7 +929,9 @@ const struct stmmac_ops dwmac4_ops = { }; const struct stmmac_ops dwmac410_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -999,7 +967,9 @@ const struct stmmac_ops dwmac410_ops = { }; const struct stmmac_ops dwmac510_ops = { + .pcs_init = dwmac4_pcs_init, .core_init = dwmac4_core_init, + .irq_modify = dwmac4_irq_modify, .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d87a8b595e6a..7b513324cfb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -18,7 +18,6 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); - int i; pr_info("dwmac4: Master AXI performs %s burst length\n", (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); @@ -38,33 +37,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) /* Depending on the UNDEF bit the Master AXI will perform any burst * length according to the BLEN programmed (by default all BLEN are - * set). + * set). Note that the UNDEF bit is readonly, and is the inverse of + * Bus Mode bit 16. */ - for (i = 0; i < AXI_BLEN; i++) { - switch (axi->axi_blen[i]) { - case 256: - value |= DMA_AXI_BLEN256; - break; - case 128: - value |= DMA_AXI_BLEN128; - break; - case 64: - value |= DMA_AXI_BLEN64; - break; - case 32: - value |= DMA_AXI_BLEN32; - break; - case 16: - value |= DMA_AXI_BLEN16; - break; - case 8: - value |= DMA_AXI_BLEN8; - break; - case 4: - value |= DMA_AXI_BLEN4; - break; - } - } + value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval; writel(value, ioaddr + DMA_SYS_BUS_MODE); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 4f980dcd3958..f27126f05551 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -69,15 +69,8 @@ #define DMA_SYS_BUS_MB BIT(14) #define DMA_AXI_1KBBE BIT(13) -#define DMA_SYS_BUS_AAL BIT(12) +#define DMA_SYS_BUS_AAL DMA_AXI_AAL #define DMA_SYS_BUS_EAME BIT(11) -#define DMA_AXI_BLEN256 BIT(7) -#define DMA_AXI_BLEN128 BIT(6) -#define DMA_AXI_BLEN64 BIT(5) -#define DMA_AXI_BLEN32 BIT(4) -#define DMA_AXI_BLEN16 BIT(3) -#define DMA_AXI_BLEN8 BIT(2) -#define DMA_AXI_BLEN4 BIT(1) #define DMA_SYS_BUS_FB BIT(0) #define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ @@ -85,8 +78,6 @@ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ DMA_AXI_BLEN4) -#define DMA_AXI_BURST_LEN_MASK 0x000000FE - /* DMA TBS Control */ #define DMA_TBS_FTOS GENMASK(31, 8) #define DMA_TBS_FTOV BIT(0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 5d9c18f5bbf5..054ecb20ce3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -68,23 +68,14 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan) #define DMA_AXI_OSR_MAX 0xf #define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) -#define DMA_AXI_1KBBE BIT(13) -#define DMA_AXI_AAL BIT(12) -#define DMA_AXI_BLEN256 BIT(7) -#define DMA_AXI_BLEN128 BIT(6) -#define DMA_AXI_BLEN64 BIT(5) -#define DMA_AXI_BLEN32 BIT(4) -#define DMA_AXI_BLEN16 BIT(3) -#define DMA_AXI_BLEN8 BIT(2) -#define DMA_AXI_BLEN4 BIT(1) #define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ DMA_AXI_BLEN4) -#define DMA_AXI_UNDEF BIT(0) +#define DMA_AXI_1KBBE BIT(13) -#define DMA_AXI_BURST_LEN_MASK 0x000000FE +#define DMA_AXI_UNDEF BIT(0) #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ @@ -178,6 +169,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan) #define NUM_DWMAC4_DMA_REGS 27 void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan); +void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 467f1a05747e..97a803d68e3a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -33,6 +33,11 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan)); } +void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan) +{ + writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan)); +} + void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0d408ee17f33..fecda3034d36 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -79,6 +79,7 @@ #define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8) #define XGMAC_PSRQ_SHIFT(x) ((x) * 8) #define XGMAC_INT_STATUS 0x000000b0 +#define XGMAC_INT_TSIS BIT(12) #define XGMAC_LPIIS BIT(5) #define XGMAC_PMTIS BIT(4) #define XGMAC_INT_EN 0x000000b4 @@ -173,6 +174,8 @@ #define XGMAC_MDIO_ADDR 0x00000200 #define XGMAC_MDIO_DATA 0x00000204 #define XGMAC_MDIO_C22P 0x00000220 +#define XGMAC_GPIO_STATUS 0x0000027c +#define XGMAC_GPIO_GPO0 BIT(16) #define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) #define XGMAC_ADDR_MAX 32 #define XGMAC_AE BIT(31) @@ -220,6 +223,8 @@ #define XGMAC_OB BIT(0) #define XGMAC_RSS_DATA 0x00000c8c #define XGMAC_TIMESTAMP_STATUS 0x00000d20 +#define XGMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25) +#define XGMAC_TIMESTAMP_ATSNS_SHIFT 25 #define XGMAC_TXTSC BIT(15) #define XGMAC_TXTIMESTAMP_NSEC 0x00000d30 #define XGMAC_TXTSSTSLO GENMASK(30, 0) @@ -333,16 +338,9 @@ #define XGMAC_RD_OSR_LMT_SHIFT 16 #define XGMAC_EN_LPI BIT(15) #define XGMAC_LPI_XIT_PKT BIT(14) -#define XGMAC_AAL BIT(12) +#define XGMAC_AAL DMA_AXI_AAL #define XGMAC_EAME BIT(11) -#define XGMAC_BLEN GENMASK(7, 1) -#define XGMAC_BLEN256 BIT(7) -#define XGMAC_BLEN128 BIT(6) -#define XGMAC_BLEN64 BIT(5) -#define XGMAC_BLEN32 BIT(4) -#define XGMAC_BLEN16 BIT(3) -#define XGMAC_BLEN8 BIT(2) -#define XGMAC_BLEN4 BIT(1) +/* XGMAC_BLEN* are now defined as DMA_AXI_BLEN* in common.h */ #define XGMAC_UNDEF BIT(0) #define XGMAC_TX_EDMA_CTRL 0x00003040 #define XGMAC_TDPS GENMASK(29, 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 00e929bf280b..b40b3ea50e25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -23,30 +23,23 @@ static void dwxgmac2_core_init(struct mac_device_info *hw, tx = readl(ioaddr + XGMAC_TX_CONFIG); rx = readl(ioaddr + XGMAC_RX_CONFIG); - tx |= XGMAC_CORE_INIT_TX; - rx |= XGMAC_CORE_INIT_RX; - - if (hw->ps) { - tx |= XGMAC_CONFIG_TE; - tx &= ~hw->link.speed_mask; + writel(tx | XGMAC_CORE_INIT_TX, ioaddr + XGMAC_TX_CONFIG); + writel(rx | XGMAC_CORE_INIT_RX, ioaddr + XGMAC_RX_CONFIG); + writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); +} - switch (hw->ps) { - case SPEED_10000: - tx |= hw->link.xgmii.speed10000; - break; - case SPEED_2500: - tx |= hw->link.speed2500; - break; - case SPEED_1000: - default: - tx |= hw->link.speed1000; - break; - } - } +static void dwxgmac2_irq_modify(struct mac_device_info *hw, u32 disable, + u32 enable) +{ + void __iomem *int_mask = hw->pcsr + XGMAC_INT_EN; + unsigned long flags; + u32 value; - writel(tx, ioaddr + XGMAC_TX_CONFIG); - writel(rx, ioaddr + XGMAC_RX_CONFIG); - writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN); + spin_lock_irqsave(&hw->irq_ctrl_lock, flags); + value = readl(int_mask) & ~disable; + value |= enable; + writel(value, int_mask); + spin_unlock_irqrestore(&hw->irq_ctrl_lock, flags); } static void dwxgmac2_update_caps(struct stmmac_priv *priv) @@ -1432,6 +1425,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en, const struct stmmac_ops dwxgmac210_ops = { .core_init = dwxgmac2_core_init, + .irq_modify = dwxgmac2_irq_modify, .update_caps = dwxgmac2_update_caps, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, @@ -1487,6 +1481,7 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode, const struct stmmac_ops dwxlgmac2_ops = { .core_init = dwxgmac2_core_init, + .irq_modify = dwxgmac2_irq_modify, .set_mac = dwxgmac2_set_mac, .rx_ipc = dwxgmac2_rx_ipc, .rx_queue_enable = dwxlgmac2_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 4d6bb995d8d8..cc1bdc0975d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -84,7 +84,6 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv, static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) { u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); - int i; if (axi->axi_lpi_en) value |= XGMAC_EN_LPI; @@ -102,32 +101,12 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) if (!axi->axi_fb) value |= XGMAC_UNDEF; - value &= ~XGMAC_BLEN; - for (i = 0; i < AXI_BLEN; i++) { - switch (axi->axi_blen[i]) { - case 256: - value |= XGMAC_BLEN256; - break; - case 128: - value |= XGMAC_BLEN128; - break; - case 64: - value |= XGMAC_BLEN64; - break; - case 32: - value |= XGMAC_BLEN32; - break; - case 16: - value |= XGMAC_BLEN16; - break; - case 8: - value |= XGMAC_BLEN8; - break; - case 4: - value |= XGMAC_BLEN4; - break; - } - } + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). Note that the UNDEF bit is readonly, and is the inverse of + * Bus Mode bit 16. + */ + value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval; writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE); writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 3f7c765dcb79..014f7cd79a3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -13,31 +13,42 @@ #include "dwmac4_descs.h" #include "dwxgmac2.h" -static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +struct stmmac_version { + u8 snpsver; + u8 dev_id; +}; + +static void stmmac_get_version(struct stmmac_priv *priv, + struct stmmac_version *ver) { - u32 reg = readl(priv->ioaddr + id_reg); + enum dwmac_core_type core_type = priv->plat->core_type; + unsigned int version_offset; + u32 version; - if (!reg) { - dev_info(priv->device, "Version ID not available\n"); - return 0x0; - } + ver->snpsver = 0; + ver->dev_id = 0; - dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", - (unsigned int)(reg & GENMASK(15, 8)) >> 8, - (unsigned int)(reg & GENMASK(7, 0))); - return reg & GENMASK(7, 0); -} + if (core_type == DWMAC_CORE_MAC100) + return; -static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg) -{ - u32 reg = readl(priv->ioaddr + id_reg); + if (core_type == DWMAC_CORE_GMAC) + version_offset = GMAC_VERSION; + else + version_offset = GMAC4_VERSION; - if (!reg) { + version = readl(priv->ioaddr + version_offset); + if (version == 0) { dev_info(priv->device, "Version ID not available\n"); - return 0x0; + return; } - return (reg & GENMASK(15, 8)) >> 8; + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + FIELD_GET(DWMAC_USERVER, version), + FIELD_GET(DWMAC_SNPSVER, version)); + + ver->snpsver = FIELD_GET(DWMAC_SNPSVER, version); + if (core_type == DWMAC_CORE_XGMAC) + ver->dev_id = FIELD_GET(DWMAC_USERVER, version); } static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) @@ -92,12 +103,10 @@ static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv) return 0; } -int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr) +int stmmac_reset(struct stmmac_priv *priv) { - struct plat_stmmacenet_data *plat = priv ? priv->plat : NULL; - - if (!priv) - return -EINVAL; + struct plat_stmmacenet_data *plat = priv->plat; + void __iomem *ioaddr = priv->ioaddr; if (plat && plat->fix_soc_reset) return plat->fix_soc_reset(priv, ioaddr); @@ -106,9 +115,7 @@ int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr) } static const struct stmmac_hwif_entry { - bool gmac; - bool gmac4; - bool xgmac; + enum dwmac_core_type core_type; u32 min_id; u32 dev_id; const struct stmmac_regs_off regs; @@ -127,9 +134,7 @@ static const struct stmmac_hwif_entry { } stmmac_hw[] = { /* NOTE: New HW versions shall go to the end of this table */ { - .gmac = false, - .gmac4 = false, - .xgmac = false, + .core_type = DWMAC_CORE_MAC100, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -146,9 +151,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac100_setup, .quirks = stmmac_dwmac1_quirks, }, { - .gmac = true, - .gmac4 = false, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC, .min_id = 0, .regs = { .ptp_off = PTP_GMAC3_X_OFFSET, @@ -165,9 +168,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac1000_setup, .quirks = stmmac_dwmac1_quirks, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = 0, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -187,9 +188,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_4_00, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -210,9 +209,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_4_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -233,9 +230,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = true, - .xgmac = false, + .core_type = DWMAC_CORE_GMAC4, .min_id = DWMAC_CORE_5_10, .regs = { .ptp_off = PTP_GMAC4_OFFSET, @@ -256,9 +251,7 @@ static const struct stmmac_hwif_entry { .setup = dwmac4_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = false, - .xgmac = true, + .core_type = DWMAC_CORE_XGMAC, .min_id = DWXGMAC_CORE_2_10, .dev_id = DWXGMAC_ID, .regs = { @@ -280,9 +273,7 @@ static const struct stmmac_hwif_entry { .setup = dwxgmac2_setup, .quirks = NULL, }, { - .gmac = false, - .gmac4 = false, - .xgmac = true, + .core_type = DWMAC_CORE_XGMAC, .min_id = DWXLGMAC_CORE_2_00, .dev_id = DWXLGMAC_ID, .regs = { @@ -306,100 +297,114 @@ static const struct stmmac_hwif_entry { }, }; +static const struct stmmac_hwif_entry * +stmmac_hwif_find(enum dwmac_core_type core_type, u8 snpsver, u8 dev_id) +{ + const struct stmmac_hwif_entry *entry; + int i; + + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (core_type != entry->core_type) + continue; + /* Use synopsys_id var because some setups can override this */ + if (snpsver < entry->min_id) + continue; + if (core_type == DWMAC_CORE_XGMAC && + dev_id != entry->dev_id) + continue; + + return entry; + } + + return NULL; +} + int stmmac_hwif_init(struct stmmac_priv *priv) { - bool needs_xgmac = priv->plat->has_xgmac; - bool needs_gmac4 = priv->plat->has_gmac4; - bool needs_gmac = priv->plat->has_gmac; + enum dwmac_core_type core_type = priv->plat->core_type; const struct stmmac_hwif_entry *entry; + struct stmmac_version version; struct mac_device_info *mac; bool needs_setup = true; - u32 id, dev_id = 0; - int i, ret; - - if (needs_gmac) { - id = stmmac_get_id(priv, GMAC_VERSION); - } else if (needs_gmac4 || needs_xgmac) { - id = stmmac_get_id(priv, GMAC4_VERSION); - if (needs_xgmac) - dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION); - } else { - id = 0; - } + int ret; + + stmmac_get_version(priv, &version); /* Save ID for later use */ - priv->synopsys_id = id; + priv->synopsys_id = version.snpsver; /* Lets assume some safe values first */ - priv->ptpaddr = priv->ioaddr + - (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); - priv->mmcaddr = priv->ioaddr + - (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); - if (needs_gmac4) + if (core_type == DWMAC_CORE_GMAC4) { + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; + priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET; - else if (needs_xgmac) - priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET; - - /* Check for HW specific setup first */ - if (priv->plat->setup) { - mac = priv->plat->setup(priv); - needs_setup = false; } else { - mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; + priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + if (core_type == DWMAC_CORE_XGMAC) + priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET; } + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); if (!mac) return -ENOMEM; + /* Check for HW specific setup first */ + if (priv->plat->mac_setup) { + ret = priv->plat->mac_setup(priv, mac); + if (ret) + return ret; + + needs_setup = false; + } + + spin_lock_init(&mac->irq_ctrl_lock); + /* Fallback to generic HW */ - for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { - entry = &stmmac_hw[i]; - if (needs_gmac ^ entry->gmac) - continue; - if (needs_gmac4 ^ entry->gmac4) - continue; - if (needs_xgmac ^ entry->xgmac) - continue; - /* Use synopsys_id var because some setups can override this */ - if (priv->synopsys_id < entry->min_id) - continue; - if (needs_xgmac && (dev_id ^ entry->dev_id)) - continue; + /* Use synopsys_id var because some setups can override this */ + entry = stmmac_hwif_find(core_type, priv->synopsys_id, version.dev_id); + if (!entry) { + dev_err(priv->device, + "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + version.snpsver, core_type == DWMAC_CORE_GMAC, + core_type == DWMAC_CORE_GMAC4); - /* Only use generic HW helpers if needed */ - mac->desc = mac->desc ? : entry->desc; - mac->dma = mac->dma ? : entry->dma; - mac->mac = mac->mac ? : entry->mac; - mac->ptp = mac->ptp ? : entry->hwtimestamp; - mac->mode = mac->mode ? : entry->mode; - mac->tc = mac->tc ? : entry->tc; - mac->mmc = mac->mmc ? : entry->mmc; - mac->est = mac->est ? : entry->est; - mac->vlan = mac->vlan ? : entry->vlan; - - priv->hw = mac; - priv->fpe_cfg.reg = entry->regs.fpe_reg; - priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; - priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; - memcpy(&priv->ptp_clock_ops, entry->ptp, - sizeof(struct ptp_clock_info)); - if (entry->est) - priv->estaddr = priv->ioaddr + entry->regs.est_off; - - /* Entry found */ - if (needs_setup) { - ret = entry->setup(priv); - if (ret) - return ret; - } + return -EINVAL; + } - /* Save quirks, if needed for posterior use */ - priv->hwif_quirks = entry->quirks; - return 0; + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + mac->mmc = mac->mmc ? : entry->mmc; + mac->est = mac->est ? : entry->est; + mac->vlan = mac->vlan ? : entry->vlan; + + priv->hw = mac; + priv->fpe_cfg.reg = entry->regs.fpe_reg; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + memcpy(&priv->ptp_clock_ops, entry->ptp, + sizeof(struct ptp_clock_info)); + + if (entry->est) + priv->estaddr = priv->ioaddr + entry->regs.est_off; + + /* Entry found */ + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; } - dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", - id, needs_gmac, needs_gmac4); - return -EINVAL; + /* Save quirks, if needed for posterior use */ + priv->hwif_quirks = entry->quirks; + + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 14dbe0685997..df6e8a567b1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -201,6 +201,7 @@ struct stmmac_dma_ops { void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, void __iomem *ioaddr); void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan); + void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan); void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -261,6 +262,8 @@ struct stmmac_dma_ops { stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) #define stmmac_enable_dma_transmission(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) +#define stmmac_enable_dma_reception(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args) #define stmmac_enable_dma_irq(__priv, __args...) \ stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args) #define stmmac_disable_dma_irq(__priv, __args...) \ @@ -313,10 +316,14 @@ enum stmmac_lpi_mode { /* Helpers to program the MAC core */ struct stmmac_ops { + /* Initialise any PCS instances */ + int (*pcs_init)(struct stmmac_priv *priv); /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); /* Update MAC capabilities */ void (*update_caps)(struct stmmac_priv *priv); + /* Change the interrupt enable setting. Enable takes precedence. */ + void (*irq_modify)(struct mac_device_info *hw, u32 disable, u32 enable); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -374,8 +381,8 @@ struct stmmac_ops { struct stmmac_extra_stats *x, u32 rx_queues, u32 tx_queues); /* PCS calls */ - void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, bool srgmi_ral, - bool loopback); + void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, + bool srgmi_ral); /* Safety Features */ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_feature_cfg *safety_cfg); @@ -413,10 +420,14 @@ struct stmmac_ops { u32 pclass); }; +#define stmmac_mac_pcs_init(__priv) \ + stmmac_do_callback(__priv, mac, pcs_init, __priv) #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) #define stmmac_mac_update_caps(__priv) \ stmmac_do_void_callback(__priv, mac, update_caps, __priv) +#define stmmac_mac_irq_modify(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, irq_modify, (__priv)->hw, __args) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ @@ -533,7 +544,7 @@ struct stmmac_rx_queue; struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + bool (*is_jumbo_frm)(unsigned int len, bool enh_desc); int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb, int csum); int (*set_16kib_bfsize)(int mtu); @@ -690,7 +701,7 @@ extern const struct stmmac_tc_ops dwmac510_tc_ops; #define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ -int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr); +int stmmac_reset(struct stmmac_priv *priv); int stmmac_hwif_init(struct stmmac_priv *priv); #endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index d218412ca832..382d94a3b972 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -91,14 +91,9 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb, return entry; } -static unsigned int is_jumbo_frm(int len, int enh_desc) +static bool is_jumbo_frm(unsigned int len, bool enh_desc) { - unsigned int ret = 0; - - if (len >= BUF_SIZE_4KiB) - ret = 1; - - return ret; + return len >= BUF_SIZE_4KiB; } static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 7ca5477be390..012b0a477255 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -25,6 +25,8 @@ #include <net/xdp.h> #include <uapi/linux/bpf.h> +struct stmmac_pcs; + struct stmmac_resources { void __iomem *addr; u8 mac[ETH_ALEN]; @@ -252,11 +254,12 @@ struct stmmac_priv { int hwts_tx_en; bool tx_path_in_lpi_mode; bool tso; - int sph; - int sph_cap; + bool sph_active; + bool sph_capable; u32 sarc_type; u32 rx_riwt[MTL_MAX_RX_QUEUES]; int hwts_rx_en; + bool tsfupdt_coarse; void __iomem *ioaddr; struct net_device *dev; @@ -273,6 +276,8 @@ struct stmmac_priv { unsigned int pause_time; struct mii_bus *mii; + struct stmmac_pcs *integrated_pcs; + struct phylink_config phylink_config; struct phylink *phylink; @@ -287,6 +292,7 @@ struct stmmac_priv { int hw_cap_support; int synopsys_id; u32 msg_enable; + /* Our MAC Wake-on-Lan options */ int wolopts; int wol_irq; u32 gmii_address_bus_config; @@ -364,6 +370,8 @@ struct stmmac_priv { /* XDP BPF Program */ unsigned long *af_xdp_zc_qps; struct bpf_prog *xdp_prog; + + struct devlink *devlink; }; enum stmmac_state { @@ -375,19 +383,11 @@ enum stmmac_state { extern const struct dev_pm_ops stmmac_simple_pm_ops; -static inline bool stmmac_wol_enabled_mac(struct stmmac_priv *priv) -{ - return priv->plat->pmt && device_may_wakeup(priv->device); -} - -static inline bool stmmac_wol_enabled_phy(struct stmmac_priv *priv) -{ - return !priv->plat->pmt && device_may_wakeup(priv->device); -} - int stmmac_mdio_unregister(struct net_device *ndev); int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); +void stmmac_mdio_lock(struct stmmac_priv *priv); +void stmmac_mdio_unlock(struct stmmac_priv *priv); int stmmac_pcs_setup(struct net_device *ndev); void stmmac_pcs_clean(struct net_device *ndev); void stmmac_set_ethtool_ops(struct net_device *netdev); @@ -396,6 +396,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_xdp_open(struct net_device *dev); void stmmac_xdp_release(struct net_device *dev); +int stmmac_get_phy_intf_sel(phy_interface_t interface); int stmmac_resume(struct device *dev); int stmmac_suspend(struct device *dev); void stmmac_dvr_remove(struct device *dev); @@ -407,6 +408,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, phy_interface_t interface, int speed); +struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev); + static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) { return !!priv->xdp_prog; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c index 4b513d27a988..afc516059b89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c @@ -53,7 +53,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, } ctrl = readl(est_addr + EST_CONTROL); - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { ctrl &= ~EST_XGMAC_PTOV; ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) << EST_XGMAC_PTOV_SHIFT; @@ -148,7 +148,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, } if (status & EST_BTRE) { - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { btrl = FIELD_GET(EST_XGMAC_BTRL, status); btrl_max = FIELD_MAX(EST_XGMAC_BTRL); } else { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 39fa1ec92f82..b155e71aac51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -303,9 +303,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (priv->plat->has_gmac || priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC || + priv->plat->core_type == DWMAC_CORE_GMAC4) strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); - else if (priv->plat->has_xgmac) + else if (priv->plat->core_type == DWMAC_CORE_XGMAC) strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); else strscpy(info->driver, MAC100_ETHTOOL_NAME, @@ -322,47 +323,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) && - (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII)) { - u32 supported, advertising, lp_advertising; - - if (!priv->xstats.pcs_link) { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - return 0; - } - cmd->base.duplex = priv->xstats.pcs_duplex; - - cmd->base.speed = priv->xstats.pcs_speed; - - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ - - ethtool_convert_link_mode_to_legacy_u32( - &supported, cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32( - &advertising, cmd->link_modes.advertising); - ethtool_convert_link_mode_to_legacy_u32( - &lp_advertising, cmd->link_modes.lp_advertising); - - /* Reg49[3] always set because ANE is always supported */ - cmd->base.autoneg = ADVERTISED_Autoneg; - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; - lp_advertising |= ADVERTISED_Autoneg; - - cmd->base.port = PORT_OTHER; - - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.advertising, advertising); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.lp_advertising, lp_advertising); - - return 0; - } - return phylink_ethtool_ksettings_get(priv->phylink, cmd); } @@ -372,20 +332,6 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, { struct stmmac_priv *priv = netdev_priv(dev); - if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) && - (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII)) { - /* Only support ANE */ - if (cmd->base.autoneg != AUTONEG_ENABLE) - return -EINVAL; - - mutex_lock(&priv->lock); - stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); - mutex_unlock(&priv->lock); - - return 0; - } - return phylink_ethtool_ksettings_set(priv->phylink, cmd); } @@ -406,9 +352,9 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) return XGMAC_REGSIZE * 4; - else if (priv->plat->has_gmac4) + else if (priv->plat->core_type == DWMAC_CORE_GMAC4) return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -423,12 +369,12 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); /* Copy DMA registers to where ethtool expects them */ - if (priv->plat->has_gmac4) { + if (priv->plat->core_type == DWMAC_CORE_GMAC4) { /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], NUM_DWMAC4_DMA_REGS * 4); - } else if (!priv->plat->has_xgmac) { + } else if (priv->plat->core_type != DWMAC_CORE_XGMAC) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); @@ -479,11 +425,7 @@ stmmac_get_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->hw->pcs) { - pause->autoneg = 1; - } else { - phylink_ethtool_get_pauseparam(priv->phylink, pause); - } + phylink_ethtool_get_pauseparam(priv->phylink, pause); } static int @@ -492,12 +434,7 @@ stmmac_set_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->hw->pcs) { - pause->autoneg = 1; - return 0; - } else { - return phylink_ethtool_set_pauseparam(priv->phylink, pause); - } + return phylink_ethtool_set_pauseparam(priv->phylink, pause); } static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) @@ -787,41 +724,14 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - if (!priv->plat->pmt) - return phylink_ethtool_get_wol(priv->phylink, wol); - - mutex_lock(&priv->lock); - if (device_can_wakeup(priv->device)) { - wol->supported = WAKE_MAGIC | WAKE_UCAST; - if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame) - wol->supported &= ~WAKE_MAGIC; - wol->wolopts = priv->wolopts; - } - mutex_unlock(&priv->lock); + return phylink_ethtool_get_wol(priv->phylink, wol); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - if (!device_can_wakeup(priv->device)) - return -EOPNOTSUPP; - - if (!priv->plat->pmt) { - int ret = phylink_ethtool_set_wol(priv->phylink, wol); - - if (!ret) - device_set_wakeup_enable(priv->device, !!wol->wolopts); - return ret; - } - - device_set_wakeup_enable(priv->device, !!wol->wolopts); - - mutex_lock(&priv->lock); - priv->wolopts = wol->wolopts; - mutex_unlock(&priv->lock); - - return 0; + return phylink_ethtool_set_wol(priv->phylink, wol); } static int stmmac_ethtool_op_get_eee(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c index 75b470ee621a..c54c70224351 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c @@ -70,8 +70,10 @@ static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enabl struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg); const struct stmmac_fpe_reg *reg = cfg->reg; void __iomem *ioaddr = priv->ioaddr; + unsigned long flags; u32 value; + spin_lock_irqsave(&priv->hw->irq_ctrl_lock, flags); value = readl(ioaddr + reg->int_en_reg); if (pmac_enable) { @@ -86,6 +88,7 @@ static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enabl } writel(value, ioaddr + reg->int_en_reg); + spin_unlock_irqrestore(&priv->hw->irq_ctrl_lock, flags); } static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c new file mode 100644 index 000000000000..5c5dd502f79a --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PCI bus helpers for STMMAC driver + * Copyright (C) 2025 Yao Zi <ziyao@disroot.org> + */ + +#include <linux/device.h> +#include <linux/pci.h> + +#include "stmmac_libpci.h" + +int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + ret = pci_save_state(pdev); + if (ret) + return ret; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, true); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_pci_plat_suspend); + +int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_pci_plat_resume); + +MODULE_DESCRIPTION("STMMAC PCI helper library"); +MODULE_AUTHOR("Yao Zi <ziyao@disroot.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h new file mode 100644 index 000000000000..71553184f982 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 Yao Zi <ziyao@disroot.org> + */ + +#ifndef __STMMAC_LIBPCI_H__ +#define __STMMAC_LIBPCI_H__ + +int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv); +int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv); + +#endif /* __STMMAC_LIBPCI_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b90ecd3a55e..da206b24aaed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -40,12 +40,14 @@ #include <linux/phylink.h> #include <linux/udp.h> #include <linux/bpf_trace.h> +#include <net/devlink.h> #include <net/page_pool/helpers.h> #include <net/pkt_cls.h> #include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" #include "stmmac_fpe.h" #include "stmmac.h" +#include "stmmac_pcs.h" #include "stmmac_xdp.h" #include <linux/reset.h> #include <linux/of_mdio.h> @@ -57,8 +59,7 @@ * with fine resolution and binary rollover. This avoid non-monotonic behavior * (clock jumps) when changing timestamping settings at runtime. */ -#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ - PTP_TCR_TSCTRLSSR) +#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR) #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -147,6 +148,15 @@ static void stmmac_exit_fs(struct net_device *dev); #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) +struct stmmac_devlink_priv { + struct stmmac_priv *stmmac_priv; +}; + +enum stmmac_dl_param_id { + STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + STMMAC_DEVLINK_PARAM_ID_TS_COARSE, +}; + /** * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock * @bsp_priv: BSP private data structure (unused) @@ -180,6 +190,44 @@ int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate); /** + * stmmac_axi_blen_to_mask() - convert a burst length array to reg value + * @regval: pointer to a u32 for the resulting register value + * @blen: pointer to an array of u32 containing the burst length values in bytes + * @len: the number of entries in the @blen array + */ +void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len) +{ + size_t i; + u32 val; + + for (val = i = 0; i < len; i++) { + u32 burst = blen[i]; + + /* Burst values of zero must be skipped. */ + if (!burst) + continue; + + /* The valid range for the burst length is 4 to 256 inclusive, + * and it must be a power of two. + */ + if (burst < 4 || burst > 256 || !is_power_of_2(burst)) { + pr_err("stmmac: invalid burst length %u at index %zu\n", + burst, i); + continue; + } + + /* Since burst is a power of two, and the register field starts + * with burst = 4, shift right by two bits so bit 0 of the field + * corresponds with the minimum value. + */ + val |= burst >> 2; + } + + *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val); +} +EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask); + +/** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of * errors. @@ -445,7 +493,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, if (!priv->hwts_rx_en) return; /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + if (dwmac_is_xmac(priv->plat->core_type)) desc = np; /* Check if timestamp is available */ @@ -463,6 +511,33 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, } } +static void stmmac_update_subsecond_increment(struct stmmac_priv *priv) +{ + bool xmac = dwmac_is_xmac(priv->plat->core_type); + u32 sec_inc = 0; + u64 temp = 0; + + stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); + + /* program Sub Second Increment reg */ + stmmac_config_sub_second_increment(priv, priv->ptpaddr, + priv->plat->clk_ptp_rate, + xmac, &sec_inc); + temp = div_u64(1000000000ULL, sec_inc); + + /* Store sub second increment for later use */ + priv->sub_second_inc = sec_inc; + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); +} + /** * stmmac_hwtstamp_set - control hardware timestamping. * @dev: device pointer. @@ -647,6 +722,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON; priv->systime_flags = STMMAC_HWTS_ACTIVE; + if (!priv->tsfupdt_coarse) + priv->systime_flags |= PTP_TCR_TSCFUPDT; if (priv->hwts_tx_en || priv->hwts_rx_en) { priv->systime_flags |= tstamp_all | ptp_v2 | @@ -696,10 +773,7 @@ static int stmmac_hwtstamp_get(struct net_device *dev, static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) { - bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; struct timespec64 now; - u32 sec_inc = 0; - u64 temp = 0; if (!priv->plat->clk_ptp_rate) { netdev_err(priv->dev, "Invalid PTP clock rate"); @@ -709,23 +783,7 @@ static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags; - /* program Sub Second Increment reg */ - stmmac_config_sub_second_increment(priv, priv->ptpaddr, - priv->plat->clk_ptp_rate, - xmac, &sec_inc); - temp = div_u64(1000000000ULL, sec_inc); - - /* Store sub second increment for later use */ - priv->sub_second_inc = sec_inc; - - /* calculate default added value: - * formula is : - * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = 1e9ns/sec_inc - */ - temp = (u64)(temp << 32); - priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); + stmmac_update_subsecond_increment(priv); /* initialize system time */ ktime_get_real_ts64(&now); @@ -745,7 +803,7 @@ static int stmmac_init_tstamp_counter(struct stmmac_priv *priv, */ static int stmmac_init_timestamping(struct stmmac_priv *priv) { - bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + bool xmac = dwmac_is_xmac(priv->plat->core_type); int ret; if (priv->plat->ptp_clk_freq_config) @@ -756,7 +814,8 @@ static int stmmac_init_timestamping(struct stmmac_priv *priv) return -EOPNOTSUPP; } - ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); + ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE | + PTP_TCR_TSCFUPDT); if (ret) { netdev_warn(priv->dev, "PTP init failed\n"); return ret; @@ -850,6 +909,13 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, return pcs; } + /* The PCS control register is only relevant for SGMII, TBI and RTBI + * modes. We no longer support TBI or RTBI, so only configure this + * register when operating in SGMII mode with the integrated PCS. + */ + if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs) + return &priv->integrated_pcs->pcs; + return NULL; } @@ -859,6 +925,18 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, /* Nothing to do, xpcs_config() handles everything */ } +static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + if (priv->plat->mac_finish) + priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); + + return 0; +} + static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -1053,14 +1131,16 @@ static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, return 0; } -static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts, + const u8 *sopass) { - struct net_device *ndev = to_net_dev(config->dev); - struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); - if (priv->plat->mac_finish) - priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); + device_set_wakeup_enable(priv->device, !!wolopts); + + mutex_lock(&priv->lock); + priv->wolopts = wolopts; + mutex_unlock(&priv->lock); return 0; } @@ -1069,11 +1149,12 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, + .mac_finish = stmmac_mac_finish, .mac_link_down = stmmac_mac_link_down, .mac_link_up = stmmac_mac_link_up, .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi, .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi, - .mac_finish = stmmac_mac_finish, + .mac_wol_set = stmmac_mac_wol_set, }; /** @@ -1086,17 +1167,25 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { static void stmmac_check_pcs_mode(struct stmmac_priv *priv) { int interface = priv->plat->phy_interface; + int speed = priv->plat->mac_port_sel_speed; + + if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) { + netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); + priv->hw->pcs = STMMAC_PCS_SGMII; + + switch (speed) { + case SPEED_10: + case SPEED_100: + case SPEED_1000: + priv->hw->reverse_sgmii_enable = true; + break; - if (priv->dma_cap.pcs) { - if ((interface == PHY_INTERFACE_MODE_RGMII) || - (interface == PHY_INTERFACE_MODE_RGMII_ID) || - (interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { - netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); - priv->hw->pcs = STMMAC_PCS_RGMII; - } else if (interface == PHY_INTERFACE_MODE_SGMII) { - netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); - priv->hw->pcs = STMMAC_PCS_SGMII; + default: + dev_warn(priv->device, "invalid port speed\n"); + fallthrough; + case 0: + priv->hw->reverse_sgmii_enable = false; + break; } } } @@ -1174,18 +1263,10 @@ static int stmmac_init_phy(struct net_device *dev) phylink_ethtool_set_eee(priv->phylink, &eee); } - if (!priv->plat->pmt) { - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; - - phylink_ethtool_get_wol(priv->phylink, &wol); - device_set_wakeup_capable(priv->device, !!wol.supported); - device_set_wakeup_enable(priv->device, !!wol.wolopts); - } - return 0; } -static int stmmac_phy_setup(struct stmmac_priv *priv) +static int stmmac_phylink_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; struct phylink_config *config; @@ -1202,7 +1283,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) /* Stmmac always requires an RX clock for hardware initialization */ config->mac_requires_rxc = true; - if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) + /* Disable EEE RX clock stop to ensure VLAN register access works + * correctly. + */ + if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) && + !(priv->dev->features & NETIF_F_VLAN_FEATURES)) config->eee_rx_clk_stop_enable = true; /* Set the default transmit clock stop bit based on the platform glue */ @@ -1250,6 +1335,16 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) config->eee_enabled_default = true; } + config->wol_phy_speed_ctrl = true; + if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) { + config->wol_phy_legacy = true; + } else { + if (priv->dma_cap.pmt_remote_wake_up) + config->wol_mac_support |= WAKE_UCAST; + if (priv->dma_cap.pmt_magic_frame) + config->wol_mac_support |= WAKE_MAGIC; + } + fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -1339,9 +1434,9 @@ static unsigned int stmmac_rx_offset(struct stmmac_priv *priv) return NET_SKB_PAD; } -static int stmmac_set_bfsize(int mtu, int bufsize) +static int stmmac_set_bfsize(int mtu) { - int ret = bufsize; + int ret; if (mtu >= BUF_SIZE_8KiB) ret = BUF_SIZE_16KiB; @@ -1470,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, buf->page_offset = stmmac_rx_offset(priv); } - if (priv->sph && !buf->sec_page) { + if (priv->sph_active && !buf->sec_page) { buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) return -ENOMEM; @@ -2056,7 +2151,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, pp_params.offset = stmmac_rx_offset(priv); pp_params.max_len = dma_conf->dma_buf_sz; - if (priv->sph) { + if (priv->sph_active) { pp_params.offset = 0; pp_params.max_len += stmmac_rx_offset(priv); } @@ -2397,7 +2492,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) txfifosz = priv->dma_cap.tx_fifo_size; /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + if (dwmac_is_xmac(priv->plat->core_type)) { rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; } @@ -3029,6 +3124,56 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) } } +int stmmac_get_phy_intf_sel(phy_interface_t interface) +{ + int phy_intf_sel = -EINVAL; + + if (interface == PHY_INTERFACE_MODE_MII || + interface == PHY_INTERFACE_MODE_GMII) + phy_intf_sel = PHY_INTF_SEL_GMII_MII; + else if (phy_interface_mode_is_rgmii(interface)) + phy_intf_sel = PHY_INTF_SEL_RGMII; + else if (interface == PHY_INTERFACE_MODE_SGMII) + phy_intf_sel = PHY_INTF_SEL_SGMII; + else if (interface == PHY_INTERFACE_MODE_RMII) + phy_intf_sel = PHY_INTF_SEL_RMII; + else if (interface == PHY_INTERFACE_MODE_REVMII) + phy_intf_sel = PHY_INTF_SEL_REVMII; + + return phy_intf_sel; +} +EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel); + +static int stmmac_prereset_configure(struct stmmac_priv *priv) +{ + struct plat_stmmacenet_data *plat_dat = priv->plat; + phy_interface_t interface; + int phy_intf_sel, ret; + + if (!plat_dat->set_phy_intf_sel) + return 0; + + interface = plat_dat->phy_interface; + phy_intf_sel = stmmac_get_phy_intf_sel(interface); + if (phy_intf_sel < 0) { + netdev_err(priv->dev, + "failed to get phy_intf_sel for %s: %pe\n", + phy_modes(interface), ERR_PTR(phy_intf_sel)); + return phy_intf_sel; + } + + ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel); + if (ret == -EINVAL) + netdev_err(priv->dev, "platform does not support %s\n", + phy_modes(interface)); + else if (ret < 0) + netdev_err(priv->dev, + "platform failed to set interface %s: %pe\n", + phy_modes(interface), ERR_PTR(ret)); + + return ret; +} + /** * stmmac_init_dma_engine - DMA init. * @priv: driver private structure @@ -3055,7 +3200,11 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) priv->plat->dma_cfg->atds = 1; - ret = stmmac_reset(priv, priv->ioaddr); + ret = stmmac_prereset_configure(priv); + if (ret) + return ret; + + ret = stmmac_reset(priv); if (ret) { netdev_err(priv->dev, "Failed to reset the dma\n"); return ret; @@ -3443,19 +3592,6 @@ static int stmmac_hw_setup(struct net_device *dev) stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); phylink_rx_clk_stop_unblock(priv->phylink); - /* PS and related bits will be programmed according to the speed */ - if (priv->hw->pcs) { - int speed = priv->plat->mac_port_sel_speed; - - if ((speed == SPEED_10) || (speed == SPEED_100) || - (speed == SPEED_1000)) { - priv->hw->ps = speed; - } else { - dev_warn(priv->device, "invalid port speed\n"); - priv->hw->ps = 0; - } - } - /* Initialize the MAC Core */ stmmac_core_init(priv, priv->hw, dev); @@ -3492,9 +3628,6 @@ static int stmmac_hw_setup(struct net_device *dev) } } - if (priv->hw->pcs) - stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); - /* set TX and RX rings length */ stmmac_set_rings_length(priv); @@ -3512,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev) } /* Enable Split Header */ - sph_en = (priv->hw->rx_csum > 0) && priv->sph; + sph_en = (priv->hw->rx_csum > 0) && priv->sph_active; for (chan = 0; chan < rx_cnt; chan++) stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); @@ -3867,12 +4000,13 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) return ERR_PTR(-ENOMEM); } + /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */ bfsize = stmmac_set_16kib_bfsize(priv, mtu); if (bfsize < 0) bfsize = 0; if (bfsize < BUF_SIZE_16KiB) - bfsize = stmmac_set_bfsize(mtu, 0); + bfsize = stmmac_set_bfsize(mtu); dma_conf->dma_buf_sz = bfsize; /* Chose the tx/rx size from the already defined one in the @@ -3963,8 +4097,6 @@ static int __stmmac_open(struct net_device *dev, stmmac_init_coalesce(priv); phylink_start(priv->phylink); - /* We may have called phylink_speed_down before */ - phylink_speed_up(priv->phylink); ret = stmmac_request_irq(dev); if (ret) @@ -4015,6 +4147,9 @@ static int stmmac_open(struct net_device *dev) kfree(dma_conf); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); + return ret; err_disconnect_phy: @@ -4032,13 +4167,6 @@ static void __stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; - /* If the PHY or MAC has WoL enabled, then the PHY will not be - * suspended when phylink_stop() is called below. Set the PHY - * to its slowest speed to save power. - */ - if (device_may_wakeup(priv->device)) - phylink_speed_down(priv->phylink, false); - /* Stop and disconnect the PHY */ phylink_stop(priv->phylink); @@ -4078,6 +4206,13 @@ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + /* If the PHY or MAC has WoL enabled, then the PHY will not be + * suspended when phylink_stop() is called below. Set the PHY + * to its slowest speed to save power. + */ + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); + __stmmac_release(dev); phylink_disconnect_phy(priv->phylink); @@ -4486,18 +4621,18 @@ static bool stmmac_has_ip_ethertype(struct sk_buff *skb) */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { - unsigned int first_entry, tx_packets, enh_desc; + bool enh_desc, has_vlan, set_ic, is_jumbo = false; struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); - int i, csum_insertion = 0, is_jumbo = 0; u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int first_entry, tx_packets; int gso = skb_shinfo(skb)->gso_type; struct stmmac_txq_stats *txq_stats; struct dma_edesc *tbs_desc = NULL; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; - bool has_vlan, set_ic; + int i, csum_insertion = 0; int entry, first_tx; dma_addr_t des; u32 sdu_len; @@ -4513,7 +4648,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (skb_is_gso(skb) && priv->tso) { if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) return stmmac_tso_xmit(skb, dev); - if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) + if (priv->plat->core_type == DWMAC_CORE_GMAC4 && + (gso & SKB_GSO_UDP_L4)) return stmmac_tso_xmit(skb, dev); } @@ -4801,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; } - if (priv->sph && !buf->sec_page) { + if (priv->sph_active && !buf->sec_page) { buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); if (!buf->sec_page) break; @@ -4812,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); - if (priv->sph) + if (priv->sph_active) stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); else stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); @@ -4837,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->dirty_rx * sizeof(struct dma_desc)); stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); + /* Wake up Rx DMA from the suspend state if required */ + stmmac_enable_dma_reception(priv, priv->ioaddr, queue); } static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, @@ -4847,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, int coe = priv->hw->rx_csum; /* Not first descriptor, buffer is always zero */ - if (priv->sph && len) + if (priv->sph_active && len) return 0; /* First descriptor, get split header length */ stmmac_get_rx_header_len(priv, p, &hlen); - if (priv->sph && hlen) { + if (priv->sph_active && hlen) { priv->xstats.rx_split_hdr_pkt_n++; return hlen; } @@ -4875,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, unsigned int plen = 0; /* Not split header, buffer is not available */ - if (!priv->sph) + if (!priv->sph_active) return 0; /* Not last descriptor */ @@ -5258,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) len = 0; } +read_again: if (count >= limit) break; -read_again: buf1_len = 0; entry = next_entry; buf = &rx_q->buf_pool[entry]; @@ -5943,8 +6081,8 @@ static int stmmac_set_features(struct net_device *netdev, */ stmmac_rx_ipc(priv, priv->hw); - if (priv->sph_cap) { - bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; + if (priv->sph_capable) { + bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active; u32 chan; for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) @@ -5971,7 +6109,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) u32 queue; bool xmac; - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + xmac = dwmac_is_xmac(priv->plat->core_type); queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; if (priv->irq_wake) @@ -5985,7 +6123,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) stmmac_fpe_irq_status(priv); /* To handle GMAC own interrupts */ - if ((priv->plat->has_gmac) || xmac) { + if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) { int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); if (unlikely(status)) { @@ -5999,15 +6137,6 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) for (queue = 0; queue < queues_count; queue++) stmmac_host_mtl_irq_status(priv, priv->hw, queue); - /* PCS link status */ - if (priv->hw->pcs && - !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { - if (priv->xstats.pcs_link) - netif_carrier_on(priv->dev); - else - netif_carrier_off(priv->dev); - } - stmmac_timestamp_interrupt(priv, priv); } } @@ -6355,7 +6484,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.mbps_1000) ? "Y" : "N"); seq_printf(seq, "\tHalf duplex: %s\n", (priv->dma_cap.half_duplex) ? "Y" : "N"); - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { seq_printf(seq, "\tNumber of Additional MAC address registers: %d\n", priv->dma_cap.multi_addr); @@ -6379,7 +6508,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) (priv->dma_cap.time_stamp) ? "Y" : "N"); seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", (priv->dma_cap.atime_stamp) ? "Y" : "N"); - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) seq_printf(seq, "\tTimestamp System Time Source: %s\n", dwxgmac_timestamp_source[priv->dma_cap.tssrc]); seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", @@ -6388,7 +6517,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); if (priv->synopsys_id >= DWMAC_CORE_4_00 || - priv->plat->has_xgmac) { + priv->plat->core_type == DWMAC_CORE_XGMAC) { seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", (priv->dma_cap.rx_coe) ? "Y" : "N"); } else { @@ -6902,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev) } /* Adjust Split header */ - sph_en = (priv->hw->rx_csum > 0) && priv->sph; + sph_en = (priv->hw->rx_csum > 0) && priv->sph_active; /* DMA RX Channel Configuration */ for (chan = 0; chan < rx_cnt; chan++) { @@ -7240,13 +7369,21 @@ static int stmmac_hw_init(struct stmmac_priv *priv) * has to be disable and this can be done by passing the * riwt_off field from the platform. */ - if (((priv->synopsys_id >= DWMAC_CORE_3_50) || - (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { + if ((priv->synopsys_id >= DWMAC_CORE_3_50 || + priv->plat->core_type == DWMAC_CORE_XGMAC) && + !priv->plat->riwt_off) { priv->use_riwt = 1; dev_info(priv->device, "Enable RX Mitigation via HW Watchdog Timer\n"); } + /* Unimplemented PCS init (as indicated by stmmac_do_callback() + * perversely returning -EINVAL) is non-fatal. + */ + ret = stmmac_mac_pcs_init(priv); + if (ret != -EINVAL) + return ret; + return 0; } @@ -7355,7 +7492,7 @@ static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) return -ENODATA; /* For GMAC4, the valid timestamp is from CTX next desc. */ - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) + if (dwmac_is_xmac(priv->plat->core_type)) desc_contains_ts = ndesc; /* Check if timestamp is available */ @@ -7373,19 +7510,133 @@ static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, }; -/** - * stmmac_dvr_probe - * @device: device pointer - * @plat_dat: platform data pointer - * @res: stmmac resource pointer - * Description: this is the main probe function used to - * call the alloc_etherdev, allocate the priv structure. - * Return: - * returns 0 on success, otherwise errno. - */ -int stmmac_dvr_probe(struct device *device, - struct plat_stmmacenet_data *plat_dat, - struct stmmac_resources *res) +static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct stmmac_devlink_priv *dl_priv = devlink_priv(dl); + struct stmmac_priv *priv = dl_priv->stmmac_priv; + + priv->tsfupdt_coarse = ctx->val.vbool; + + if (priv->tsfupdt_coarse) + priv->systime_flags &= ~PTP_TCR_TSCFUPDT; + else + priv->systime_flags |= PTP_TCR_TSCFUPDT; + + /* In Coarse mode, we can use a smaller subsecond increment, let's + * reconfigure the systime, subsecond increment and addend. + */ + stmmac_update_subsecond_increment(priv); + + return 0; +} + +static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct stmmac_devlink_priv *dl_priv = devlink_priv(dl); + struct stmmac_priv *priv = dl_priv->stmmac_priv; + + ctx->val.vbool = priv->tsfupdt_coarse; + + return 0; +} + +static const struct devlink_param stmmac_devlink_params[] = { + DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + stmmac_dl_ts_coarse_get, + stmmac_dl_ts_coarse_set, NULL), +}; + +/* None of the generic devlink parameters are implemented */ +static const struct devlink_ops stmmac_devlink_ops = {}; + +static int stmmac_register_devlink(struct stmmac_priv *priv) +{ + struct stmmac_devlink_priv *dl_priv; + int ret; + + /* For now, what is exposed over devlink is only relevant when + * timestamping is available and we have a valid ptp clock rate + */ + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) || + !priv->plat->clk_ptp_rate) + return 0; + + priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv), + priv->device); + if (!priv->devlink) + return -ENOMEM; + + dl_priv = devlink_priv(priv->devlink); + dl_priv->stmmac_priv = priv; + + ret = devlink_params_register(priv->devlink, stmmac_devlink_params, + ARRAY_SIZE(stmmac_devlink_params)); + if (ret) + goto dl_free; + + devlink_register(priv->devlink); + return 0; + +dl_free: + devlink_free(priv->devlink); + + return ret; +} + +static void stmmac_unregister_devlink(struct stmmac_priv *priv) +{ + if (!priv->devlink) + return; + + devlink_unregister(priv->devlink); + devlink_params_unregister(priv->devlink, stmmac_devlink_params, + ARRAY_SIZE(stmmac_devlink_params)); + devlink_free(priv->devlink); +} + +struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev) +{ + struct plat_stmmacenet_data *plat_dat; + int i; + + plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL); + if (!plat_dat) + return NULL; + + /* Set the defaults: + * - phy autodetection + * - determine GMII_Address CR field from CSR clock + * - allow MTU up to JUMBO_LEN + * - hash table size + * - one unicast filter entry + */ + plat_dat->phy_addr = -1; + plat_dat->clk_csr = -1; + plat_dat->maxmtu = JUMBO_LEN; + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + plat_dat->unicast_filter_entries = 1; + + /* Set the mtl defaults */ + plat_dat->tx_queues_to_use = 1; + plat_dat->rx_queues_to_use = 1; + + /* Setup the default RX queue channel map */ + for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++) + plat_dat->rx_queues_cfg[i].chan = i; + + return plat_dat; +} +EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc); + +static int __stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res) { struct net_device *ndev = NULL; struct stmmac_priv *priv; @@ -7511,7 +7762,7 @@ int stmmac_dvr_probe(struct device *device, if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) ndev->hw_features |= NETIF_F_GSO_UDP_L4; priv->tso = true; dev_info(priv->device, "TSO feature enabled\n"); @@ -7520,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device, if (priv->dma_cap.sphen && !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { ndev->hw_features |= NETIF_F_GRO; - priv->sph_cap = true; - priv->sph = priv->sph_cap; + priv->sph_capable = true; + priv->sph_active = priv->sph_capable; dev_info(priv->device, "SPH feature enabled\n"); } @@ -7564,7 +7815,7 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + if (dwmac_is_xmac(priv->plat->core_type)) { ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; priv->hw->hw_vlan_en = true; } @@ -7592,22 +7843,23 @@ int stmmac_dvr_probe(struct device *device, /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; - if (priv->plat->has_xgmac) + + if (priv->plat->core_type == DWMAC_CORE_XGMAC) ndev->max_mtu = XGMAC_JUMBO_LEN; - else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) + else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00) ndev->max_mtu = JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); - /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu - * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. + + /* Warn if the platform's maxmtu is smaller than the minimum MTU, + * otherwise clamp the maximum MTU above to the platform's maxmtu. */ - if ((priv->plat->maxmtu < ndev->max_mtu) && - (priv->plat->maxmtu >= ndev->min_mtu)) - ndev->max_mtu = priv->plat->maxmtu; - else if (priv->plat->maxmtu < ndev->min_mtu) + if (priv->plat->maxmtu < ndev->min_mtu) dev_warn(priv->device, "%s: warning: maxmtu having invalid value (%d)\n", __func__, priv->plat->maxmtu); + else if (priv->plat->maxmtu < ndev->max_mtu) + ndev->max_mtu = priv->plat->maxmtu; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -7637,12 +7889,16 @@ int stmmac_dvr_probe(struct device *device, if (ret) goto error_pcs_setup; - ret = stmmac_phy_setup(priv); + ret = stmmac_phylink_setup(priv); if (ret) { netdev_err(ndev, "failed to setup phy (%d)\n", ret); goto error_phy_setup; } + ret = stmmac_register_devlink(priv); + if (ret) + goto error_devlink_setup; + ret = register_netdev(ndev); if (ret) { dev_err(priv->device, "%s: ERROR %i registering the device\n", @@ -7665,6 +7921,8 @@ int stmmac_dvr_probe(struct device *device, return ret; error_netdev_register: + stmmac_unregister_devlink(priv); +error_devlink_setup: phylink_destroy(priv->phylink); error_phy_setup: stmmac_pcs_clean(ndev); @@ -7679,6 +7937,34 @@ error_wq_init: return ret; } + +/** + * stmmac_dvr_probe + * @dev: device pointer + * @plat_dat: platform data pointer + * @res: stmmac resource pointer + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + * Return: + * returns 0 on success, otherwise errno. + */ +int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *res) +{ + int ret; + + if (plat_dat->init) { + ret = plat_dat->init(dev, plat_dat->bsp_priv); + if (ret) + return ret; + } + + ret = __stmmac_dvr_probe(dev, plat_dat, res); + if (ret && plat_dat->exit) + plat_dat->exit(dev, plat_dat->bsp_priv); + + return ret; +} EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** @@ -7701,6 +7987,8 @@ void stmmac_dvr_remove(struct device *dev) #ifdef CONFIG_DEBUG_FS stmmac_exit_fs(ndev); #endif + stmmac_unregister_devlink(priv); + phylink_destroy(priv->phylink); if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); @@ -7715,6 +8003,9 @@ void stmmac_dvr_remove(struct device *dev) pm_runtime_disable(dev); pm_runtime_put_noidle(dev); + + if (priv->plat->exit) + priv->plat->exit(dev, priv->plat->bsp_priv); } EXPORT_SYMBOL_GPL(stmmac_dvr_remove); @@ -7755,7 +8046,7 @@ int stmmac_suspend(struct device *dev) priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); /* Enable Power down mode by programming the PMT regs */ - if (stmmac_wol_enabled_mac(priv)) { + if (priv->wolopts) { stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { @@ -7766,10 +8057,7 @@ int stmmac_suspend(struct device *dev) mutex_unlock(&priv->lock); rtnl_lock(); - if (stmmac_wol_enabled_phy(priv)) - phylink_speed_down(priv->phylink, false); - - phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv)); + phylink_suspend(priv->phylink, !!priv->wolopts); rtnl_unlock(); if (stmmac_fpe_supported(priv)) @@ -7845,7 +8133,7 @@ int stmmac_resume(struct device *dev) * this bit because it can generate problems while resuming * from another devices (e.g. serial console). */ - if (stmmac_wol_enabled_mac(priv)) { + if (priv->wolopts) { mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); mutex_unlock(&priv->lock); @@ -7907,9 +8195,6 @@ int stmmac_resume(struct device *dev) * workqueue thread, which will race with initialisation. */ phylink_resume(priv->phylink); - if (stmmac_wol_enabled_phy(priv)) - phylink_speed_up(priv->phylink); - rtnl_unlock(); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index f408737f6fc7..1e82850f2a25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -301,7 +301,7 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg) struct stmmac_priv *priv = netdev_priv(bus->priv); u32 cmd; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) cmd = MII_GMAC4_READ; else cmd = 0; @@ -344,7 +344,7 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg, struct stmmac_priv *priv = netdev_priv(bus->priv); u32 cmd; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) cmd = MII_GMAC4_WRITE; else cmd = MII_ADDR_GWRITE; @@ -417,7 +417,7 @@ int stmmac_mdio_reset(struct mii_bus *bus) * on MDC, so perform a dummy mdio read. To be updated for GMAC4 * if needed. */ - if (!priv->plat->has_gmac4) + if (priv->plat->core_type != DWMAC_CORE_GMAC4) writel(0, priv->ioaddr + mii_address); #endif return 0; @@ -528,7 +528,7 @@ static u32 stmmac_clk_csr_set(struct stmmac_priv *priv) value = 0; } - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { if (clk_rate > 400000000) value = 0x5; else if (clk_rate > 350000000) @@ -583,8 +583,9 @@ int stmmac_mdio_register(struct net_device *ndev) struct device_node *mdio_node = priv->plat->mdio_node; struct device *dev = ndev->dev.parent; struct fwnode_handle *fixed_node; + int max_addr = PHY_MAX_ADDR - 1; struct fwnode_handle *fwnode; - int addr, found, max_addr; + struct phy_device *phydev; if (!mdio_bus_data) return 0; @@ -600,7 +601,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->name = "stmmac"; - if (priv->plat->has_xgmac) { + if (priv->plat->core_type == DWMAC_CORE_XGMAC) { new_bus->read = &stmmac_xgmac2_mdio_read_c22; new_bus->write = &stmmac_xgmac2_mdio_write_c22; new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45; @@ -608,25 +609,20 @@ int stmmac_mdio_register(struct net_device *ndev) if (priv->synopsys_id < DWXGMAC_CORE_2_20) { /* Right now only C22 phys are supported */ - max_addr = MII_XGMAC_MAX_C22ADDR + 1; + max_addr = MII_XGMAC_MAX_C22ADDR; /* Check if DT specified an unsupported phy addr */ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR) dev_err(dev, "Unsupported phy_addr (max=%d)\n", MII_XGMAC_MAX_C22ADDR); - } else { - /* XGMAC version 2.20 onwards support 32 phy addr */ - max_addr = PHY_MAX_ADDR; } } else { new_bus->read = &stmmac_mdio_read_c22; new_bus->write = &stmmac_mdio_write_c22; - if (priv->plat->has_gmac4) { + if (priv->plat->core_type == DWMAC_CORE_GMAC4) { new_bus->read_c45 = &stmmac_mdio_read_c45; new_bus->write_c45 = &stmmac_mdio_write_c45; } - - max_addr = PHY_MAX_ADDR; } if (mdio_bus_data->needs_reset) @@ -649,7 +645,7 @@ int stmmac_mdio_register(struct net_device *ndev) } /* Looks like we need a dummy read for XGMAC only and C45 PHYs */ - if (priv->plat->has_xgmac) + if (priv->plat->core_type == DWMAC_CORE_XGMAC) stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0); /* If fixed-link is set, skip PHY scanning */ @@ -668,41 +664,31 @@ int stmmac_mdio_register(struct net_device *ndev) if (priv->plat->phy_node || mdio_node) goto bus_register_done; - found = 0; - for (addr = 0; addr < max_addr; addr++) { - struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); - - if (!phydev) - continue; - - /* - * If an IRQ was provided to be assigned after - * the bus probe, do it here. - */ - if (!mdio_bus_data->irqs && - (mdio_bus_data->probed_phy_irq > 0)) { - new_bus->irq[addr] = mdio_bus_data->probed_phy_irq; - phydev->irq = mdio_bus_data->probed_phy_irq; - } - - /* - * If we're going to bind the MAC to this PHY bus, - * and no PHY number was provided to the MAC, - * use the one probed here. - */ - if (priv->plat->phy_addr == -1) - priv->plat->phy_addr = addr; - - phy_attached_info(phydev); - found = 1; - } - - if (!found && !mdio_node) { + phydev = phy_find_first(new_bus); + if (!phydev || phydev->mdio.addr > max_addr) { dev_warn(dev, "No PHY found\n"); err = -ENODEV; goto no_phy_found; } + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if (!mdio_bus_data->irqs && mdio_bus_data->probed_phy_irq > 0) { + new_bus->irq[phydev->mdio.addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, and no PHY number + * was provided to the MAC, use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = phydev->mdio.addr; + + phy_attached_info(phydev); + bus_register_done: priv->mii = new_bus; @@ -734,3 +720,17 @@ int stmmac_mdio_unregister(struct net_device *ndev) return 0; } + +void stmmac_mdio_lock(struct stmmac_priv *priv) +{ + if (priv->mii) + mutex_lock(&priv->mii->mdio_lock); +} +EXPORT_SYMBOL_GPL(stmmac_mdio_lock); + +void stmmac_mdio_unlock(struct stmmac_priv *priv) +{ + if (priv->mii) + mutex_unlock(&priv->mii->mdio_lock); +} +EXPORT_SYMBOL_GPL(stmmac_mdio_unlock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 4e3aa611fda8..270ad066ced3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -14,6 +14,7 @@ #include <linux/dmi.h> #include "stmmac.h" +#include "stmmac_libpci.h" struct stmmac_pci_info { int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); @@ -23,30 +24,10 @@ static void common_default_data(struct plat_stmmacenet_data *plat) { /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->clk_csr = STMMAC_CSR_20_35M; - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->force_sf_dma_mode = 1; plat->mdio_bus_data->needs_reset = true; - - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - - /* Set default number of RX and TX queues to use */ - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - - /* Disable Priority config by default */ - plat->tx_queues_cfg[0].use_prio = false; - plat->rx_queues_cfg[0].use_prio = false; - - /* Disable RX queues routing by default */ - plat->rx_queues_cfg[0].pkt_route = 0x0; } static int stmmac_default_data(struct pci_dev *pdev, @@ -76,27 +57,17 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, int i; plat->clk_csr = STMMAC_CSR_250_300M; - plat->has_gmac4 = 1; + plat->core_type = DWMAC_CORE_GMAC4; plat->force_sf_dma_mode = 1; plat->flags |= STMMAC_FLAG_TSO_EN; plat->pmt = 1; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - - /* Set the maxmtu to a default of JUMBO_LEN */ - plat->maxmtu = JUMBO_LEN; - /* Set default number of RX and TX queues to use */ plat->tx_queues_to_use = 4; plat->rx_queues_to_use = 4; plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; for (i = 0; i < plat->tx_queues_to_use; i++) { - plat->tx_queues_cfg[i].use_prio = false; plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; plat->tx_queues_cfg[i].weight = 25; if (i > 0) @@ -104,15 +75,10 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, } plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; - for (i = 0; i < plat->rx_queues_to_use; i++) { - plat->rx_queues_cfg[i].use_prio = false; + for (i = 0; i < plat->rx_queues_to_use; i++) plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB; - plat->rx_queues_cfg[i].pkt_route = 0x0; - plat->rx_queues_cfg[i].chan = i; - } plat->bus_id = 1; - plat->phy_addr = -1; plat->phy_interface = PHY_INTERFACE_MODE_GMII; plat->dma_cfg->pbl = 32; @@ -127,10 +93,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev, plat->axi->axi_rd_osr_lmt = 31; plat->axi->axi_fb = false; - plat->axi->axi_blen[0] = 4; - plat->axi->axi_blen[1] = 8; - plat->axi->axi_blen[2] = 16; - plat->axi->axi_blen[3] = 32; + plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 | + DMA_AXI_BLEN16 | DMA_AXI_BLEN32; return 0; } @@ -139,37 +103,6 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = { .setup = snps_gmac5_default_data, }; -static int stmmac_pci_suspend(struct device *dev, void *bsp_priv) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - ret = pci_save_state(pdev); - if (ret) - return ret; - - pci_disable_device(pdev); - pci_wake_from_d3(pdev, true); - return 0; -} - -static int stmmac_pci_resume(struct device *dev, void *bsp_priv) -{ - struct pci_dev *pdev = to_pci_dev(dev); - int ret; - - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); - - ret = pci_enable_device(pdev); - if (ret) - return ret; - - pci_set_master(pdev); - - return 0; -} - /** * stmmac_pci_probe * @@ -191,7 +124,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev, int ret; int i; - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + plat = stmmac_plat_dat_alloc(&pdev->dev); if (!plat) return -ENOMEM; @@ -249,8 +182,8 @@ static int stmmac_pci_probe(struct pci_dev *pdev, plat->safety_feat_cfg->prtyen = 1; plat->safety_feat_cfg->tmouten = 1; - plat->suspend = stmmac_pci_suspend; - plat->resume = stmmac_pci_resume; + plat->suspend = stmmac_pci_plat_suspend; + plat->resume = stmmac_pci_plat_resume; return stmmac_dvr_probe(&pdev->dev, plat, &res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c new file mode 100644 index 000000000000..e2f531c11986 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "stmmac.h" +#include "stmmac_pcs.h" + +static int dwmac_integrated_pcs_enable(struct phylink_pcs *pcs) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + stmmac_mac_irq_modify(spcs->priv, 0, spcs->int_mask); + + return 0; +} + +static void dwmac_integrated_pcs_disable(struct phylink_pcs *pcs) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + stmmac_mac_irq_modify(spcs->priv, spcs->int_mask, 0); +} + +static void dwmac_integrated_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, + struct phylink_link_state *state) +{ + state->link = false; +} + +static int dwmac_integrated_pcs_config(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct stmmac_pcs *spcs = phylink_pcs_to_stmmac_pcs(pcs); + + dwmac_ctrl_ane(spcs->base, 0, 1, spcs->priv->hw->reverse_sgmii_enable); + + return 0; +} + +static const struct phylink_pcs_ops dwmac_integrated_pcs_ops = { + .pcs_enable = dwmac_integrated_pcs_enable, + .pcs_disable = dwmac_integrated_pcs_disable, + .pcs_get_state = dwmac_integrated_pcs_get_state, + .pcs_config = dwmac_integrated_pcs_config, +}; + +int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset, + u32 int_mask) +{ + struct stmmac_pcs *spcs; + + spcs = devm_kzalloc(priv->device, sizeof(*spcs), GFP_KERNEL); + if (!spcs) + return -ENOMEM; + + spcs->priv = priv; + spcs->base = priv->ioaddr + offset; + spcs->int_mask = int_mask; + spcs->pcs.ops = &dwmac_integrated_pcs_ops; + + __set_bit(PHY_INTERFACE_MODE_SGMII, spcs->pcs.supported_interfaces); + + priv->integrated_pcs = spcs; + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index 4a684c97dfae..cda93894168e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -9,6 +9,7 @@ #ifndef __STMMAC_PCS_H__ #define __STMMAC_PCS_H__ +#include <linux/phylink.h> #include <linux/slab.h> #include <linux/io.h> #include "common.h" @@ -46,6 +47,24 @@ #define GMAC_ANE_RFE_SHIFT 12 #define GMAC_ANE_ACK BIT(14) +struct stmmac_priv; + +struct stmmac_pcs { + struct stmmac_priv *priv; + void __iomem *base; + u32 int_mask; + struct phylink_pcs pcs; +}; + +static inline struct stmmac_pcs * +phylink_pcs_to_stmmac_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct stmmac_pcs, pcs); +} + +int stmmac_integrated_pcs_init(struct stmmac_priv *priv, unsigned int offset, + u32 int_mask); + /** * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR * @ioaddr: IO registers pointer @@ -82,13 +101,12 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, * @reg: Base address of the AN Control Register. * @ane: to enable the auto-negotiation * @srgmi_ral: to manage MAC-2-MAC SGMII connections. - * @loopback: to cause the PHY to loopback tx data into rx path. * Description: this is the main function to configure the AN control register * and init the ANE, select loopback (usually for debugging purpose) and * configure SGMII RAL. */ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, - bool srgmi_ral, bool loopback) + bool srgmi_ral) { u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); @@ -104,9 +122,6 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, if (srgmi_ral) value |= GMAC_AN_CTRL_SGMRAL; - if (loopback) - value |= GMAC_AN_CTRL_ELE; - writel(value, ioaddr + GMAC_AN_CTRL(reg)); } #endif /* __STMMAC_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 27bcaae07a7f..8979a50b5507 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -95,6 +95,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) { struct device_node *np; struct stmmac_axi *axi; + u32 axi_blen[AXI_BLEN]; np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); if (!np) @@ -117,7 +118,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_wr_osr_lmt = 1; if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) axi->axi_rd_osr_lmt = 1; - of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + of_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN); + stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN); of_node_put(np); return axi; @@ -137,13 +139,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev, u8 queue = 0; int ret = 0; - /* For backwards-compatibility with device trees that don't have any - * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back - * to one RX and TX queues each. - */ - plat->rx_queues_to_use = 1; - plat->tx_queues_to_use = 1; - /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need * to always set this, otherwise Queue will be classified as AVB * (because MTL_QUEUE_AVB = 0). @@ -162,9 +157,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev, } /* Processing RX queues common config */ - if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", - &plat->rx_queues_to_use)) - plat->rx_queues_to_use = 1; + of_property_read_u32(rx_node, "snps,rx-queues-to-use", + &plat->rx_queues_to_use); if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; @@ -185,18 +179,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev, else plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; - if (of_property_read_u32(q_node, "snps,map-to-dma-channel", - &plat->rx_queues_cfg[queue].chan)) - plat->rx_queues_cfg[queue].chan = queue; + of_property_read_u32(q_node, "snps,map-to-dma-channel", + &plat->rx_queues_cfg[queue].chan); /* TODO: Dynamic mapping to be included in the future */ - if (of_property_read_u32(q_node, "snps,priority", - &plat->rx_queues_cfg[queue].prio)) { - plat->rx_queues_cfg[queue].prio = 0; - plat->rx_queues_cfg[queue].use_prio = false; - } else { + if (!of_property_read_u32(q_node, "snps,priority", + &plat->rx_queues_cfg[queue].prio)) plat->rx_queues_cfg[queue].use_prio = true; - } /* RX queue specific packet type routing */ if (of_property_read_bool(q_node, "snps,route-avcp")) @@ -209,8 +198,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev, plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; else if (of_property_read_bool(q_node, "snps,route-multi-broad")) plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; - else - plat->rx_queues_cfg[queue].pkt_route = 0x0; queue++; } @@ -221,9 +208,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev, } /* Processing TX queues common config */ - if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", - &plat->tx_queues_to_use)) - plat->tx_queues_to_use = 1; + of_property_read_u32(tx_node, "snps,tx-queues-to-use", + &plat->tx_queues_to_use); if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; @@ -268,13 +254,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev, plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; } - if (of_property_read_u32(q_node, "snps,priority", - &plat->tx_queues_cfg[queue].prio)) { - plat->tx_queues_cfg[queue].prio = 0; - plat->tx_queues_cfg[queue].use_prio = false; - } else { + if (!of_property_read_u32(q_node, "snps,priority", + &plat->tx_queues_cfg[queue].prio)) plat->tx_queues_cfg[queue].use_prio = true; - } plat->tx_queues_cfg[queue].coe_unsupported = of_property_read_bool(q_node, "snps,coe-unsupported"); @@ -436,7 +418,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) void *ret; int rc; - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + plat = stmmac_plat_dat_alloc(&pdev->dev); if (!plat) return ERR_PTR(-ENOMEM); @@ -480,13 +462,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->bus_id = ++bus_id; } - /* Default to phy auto-detection */ - plat->phy_addr = -1; - - /* Default to get clk_csr from stmmac_clk_csr_set(), - * or get clk_csr from device tree. - */ - plat->clk_csr = -1; if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr)) of_property_read_u32(np, "clk_csr", &plat->clk_csr); @@ -515,17 +490,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; } - /* Set the maxmtu to a default of JUMBO_LEN in case the - * parameter is not present in the device tree. - */ - plat->maxmtu = JUMBO_LEN; - - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat->unicast_filter_entries = 1; - /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -552,12 +516,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) &pdev->dev, plat->unicast_filter_entries); plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( &pdev->dev, plat->multicast_filter_bins); - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->pmt = 1; } if (of_device_is_compatible(np, "snps,dwmac-3.40a")) { - plat->has_gmac = 1; + plat->core_type = DWMAC_CORE_GMAC; plat->enh_desc = 1; plat->tx_coe = 1; plat->bugged_jumbo = 1; @@ -565,8 +529,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) } if (of_device_compatible_match(np, stmmac_gmac4_compats)) { - plat->has_gmac4 = 1; - plat->has_gmac = 0; + plat->core_type = DWMAC_CORE_GMAC4; plat->pmt = 1; if (of_property_read_bool(np, "snps,tso")) plat->flags |= STMMAC_FLAG_TSO_EN; @@ -580,7 +543,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) } if (of_device_is_compatible(np, "snps,dwxgmac")) { - plat->has_xgmac = 1; + plat->core_type = DWMAC_CORE_XGMAC; plat->pmt = 1; if (of_property_read_bool(np, "snps,tso")) plat->flags |= STMMAC_FLAG_TSO_EN; @@ -786,40 +749,40 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); /** * stmmac_pltfr_init - * @pdev: pointer to the platform device + * @dev: pointer to the device structure * @plat: driver data platform structure * Description: Call the platform's init callback (if any) and propagate * the return value. */ -static int stmmac_pltfr_init(struct platform_device *pdev, +static int stmmac_pltfr_init(struct device *dev, struct plat_stmmacenet_data *plat) { int ret = 0; if (plat->init) - ret = plat->init(pdev, plat->bsp_priv); + ret = plat->init(dev, plat->bsp_priv); return ret; } /** * stmmac_pltfr_exit - * @pdev: pointer to the platform device + * @dev: pointer to the device structure * @plat: driver data platform structure * Description: Call the platform's exit callback (if any). */ -static void stmmac_pltfr_exit(struct platform_device *pdev, +static void stmmac_pltfr_exit(struct device *dev, struct plat_stmmacenet_data *plat) { if (plat->exit) - plat->exit(pdev, plat->bsp_priv); + plat->exit(dev, plat->bsp_priv); } static int stmmac_plat_suspend(struct device *dev, void *bsp_priv) { struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev)); - stmmac_pltfr_exit(to_platform_device(dev), priv->plat); + stmmac_pltfr_exit(dev, priv->plat); return 0; } @@ -828,7 +791,7 @@ static int stmmac_plat_resume(struct device *dev, void *bsp_priv) { struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev)); - return stmmac_pltfr_init(to_platform_device(dev), priv->plat); + return stmmac_pltfr_init(dev, priv->plat); } /** @@ -843,24 +806,12 @@ int stmmac_pltfr_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res) { - int ret; - if (!plat->suspend && plat->exit) plat->suspend = stmmac_plat_suspend; if (!plat->resume && plat->init) plat->resume = stmmac_plat_resume; - ret = stmmac_pltfr_init(pdev, plat); - if (ret) - return ret; - - ret = stmmac_dvr_probe(&pdev->dev, plat, res); - if (ret) { - stmmac_pltfr_exit(pdev, plat); - return ret; - } - - return ret; + return stmmac_dvr_probe(&pdev->dev, plat, res); } EXPORT_SYMBOL_GPL(stmmac_pltfr_probe); @@ -902,12 +853,7 @@ EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe); */ void stmmac_pltfr_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct plat_stmmacenet_data *plat = priv->plat; - stmmac_dvr_remove(&pdev->dev); - stmmac_pltfr_exit(pdev, plat); } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); @@ -970,7 +916,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) if (!netif_running(ndev)) return 0; - if (!stmmac_wol_enabled_mac(priv)) { + if (!priv->wolopts) { /* Disable clock in case of PWM is off */ clk_disable_unprepare(priv->plat->clk_ptp_ref); @@ -991,7 +937,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (!netif_running(ndev)) return 0; - if (!stmmac_wol_enabled_mac(priv)) { + if (!priv->wolopts) { /* enable the clk previously disabled */ ret = pm_runtime_force_resume(dev); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 993ff4e87e55..3e30172fa129 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -57,7 +57,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) bool xmac, est_rst = false; int ret; - xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; + xmac = dwmac_is_xmac(priv->plat->core_type); if (delta < 0) { neg_adj = 1; @@ -344,7 +344,7 @@ void stmmac_ptp_register(struct stmmac_priv *priv) /* Calculate the clock domain crossing (CDC) error if necessary */ priv->plat->cdc_error_adj = 0; - if (priv->plat->has_gmac4) + if (priv->plat->core_type == DWMAC_CORE_GMAC4) priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; /* Update the ptp clock parameters based on feature discovery, when diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index a01bc394d1ac..e90a2c469b9a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1721,7 +1721,7 @@ static int stmmac_test_sph(struct stmmac_priv *priv) struct stmmac_packet_attrs attr = { }; int ret; - if (!priv->sph) + if (!priv->sph_active) return -EOPNOTSUPP; /* Check for UDP first */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 3b4d4696afe9..d78652718599 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -262,10 +262,10 @@ static int tc_init(struct stmmac_priv *priv) unsigned int count; int ret, i; - if (dma_cap->l3l4fnum) { - priv->flow_entries_max = dma_cap->l3l4fnum; + priv->flow_entries_max = dma_cap->l3l4fnum; + if (priv->flow_entries_max) { priv->flow_entries = devm_kcalloc(priv->device, - dma_cap->l3l4fnum, + priv->flow_entries_max, sizeof(*priv->flow_entries), GFP_KERNEL); if (!priv->flow_entries) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c index ff02a79c00d4..b18404dd5a8b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c @@ -122,7 +122,8 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev, /* Extended Rx VLAN Filter Enable */ for (i = 0; i < hw->num_vlan; i++) { - if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) { + if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) && + ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) { ret = vlan_write_filter(dev, hw, i, 0); if (!ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index aa6f16d3df64..d7e4db7224b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, bpf_prog_put(old_prog); /* Disable RX SPH for XDP operation */ - priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); + priv->sph_active = priv->sph_capable && !stmmac_xdp_is_enabled(priv); if (if_running && need_update) stmmac_xdp_open(dev); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 110eb2da8dbc..5924db6be3fe 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1788,28 +1788,28 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, } static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; - struct hwtstamp_config cfg; - if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) + if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) { + NL_SET_ERR_MSG(extack, "Time stamping is not supported"); return -EOPNOTSUPP; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + } /* TX HW timestamp */ - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; default: + NL_SET_ERR_MSG(extack, "TX mode is not supported"); return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: port->rx_ts_enabled = false; break; @@ -1826,17 +1826,19 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: port->rx_ts_enabled = true; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_NTP_ALL: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -EOPNOTSUPP; default: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -ERANGE; } - port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); + port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON); /* cfg TX timestamp */ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << @@ -1872,25 +1874,24 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); - struct hwtstamp_config cfg; if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = port->tx_ts_enabled ? + cfg->flags = 0; + cfg->tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | + cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, @@ -1901,13 +1902,6 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - switch (cmd) { - case SIOCSHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_set(ndev, req); - case SIOCGHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_get(ndev, req); - } - return phylink_mii_ioctl(port->slave.phylink, req, cmd); } @@ -1991,6 +1985,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, .ndo_bpf = am65_cpsw_ndo_bpf, .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, + .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get, + .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set, }; static void am65_cpsw_disable_phy(struct phy *phy) @@ -3072,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) } static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); struct am65_cpsw_common *common = dl_priv->common; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 8b9e2078c602..ab88d4c02cbd 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1618,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = { }; static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; @@ -1753,7 +1754,8 @@ exit: } static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 68507126be8e..48f85a3649b2 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -234,7 +234,6 @@ static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) ret = mdiobb_read_c22(bus, phy, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -251,7 +250,6 @@ static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, ret = mdiobb_write_c22(bus, phy, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -268,7 +266,6 @@ static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_read_c45(bus, phy, devad, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -285,7 +282,6 @@ static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_write_c45(bus, phy, devad, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -332,7 +328,6 @@ static int davinci_mdio_common_reset(struct davinci_mdio_data *data) data->bus->phy_mask = phy_mask; done: - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return 0; @@ -441,7 +436,6 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; } @@ -478,7 +472,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; @@ -548,8 +541,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; - int ret, addr; int autosuspend_delay_ms = -1; + int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -652,14 +645,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) goto bail_out; /* scan and dump the bus */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = mdiobus_get_phy(data->bus, addr); - if (phy) { - dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->mdio.addr, phydev_name(phy), - phy->drv ? phy->drv->name : "unknown"); - } - } + mdiobus_for_each_phy(data->bus, phy) + dev_info(dev, "phy[%d]: device %s, driver %s\n", + phy->mdio.addr, phydev_name(phy), + phy->drv ? phy->drv->name : "unknown"); return 0; diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 57e5f1c88f50..090aa74d3ce7 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) } EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); +static int emac_xsk_xmit_zc(struct prueth_emac *emac, + unsigned int q_idx) +{ + struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx]; + struct xsk_buff_pool *pool = tx_chn->xsk_pool; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *host_desc; + dma_addr_t dma_desc, dma_buf; + struct prueth_swdata *swdata; + struct xdp_desc xdp_desc; + int num_tx = 0, pkt_len; + int descs_avail, ret; + u32 *epib; + int i; + + descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool); + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (descs_avail <= MAX_SKB_FRAGS) + return 0; + + descs_avail -= MAX_SKB_FRAGS; + + for (i = 0; i < descs_avail; i++) { + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + pkt_len = xdp_desc.len; + xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len); + + host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (unlikely(!host_desc)) + break; + + cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(host_desc, 0); + epib = host_desc->epib; + epib[0] = 0; + epib[1] = 0; + cppi5_hdesc_set_pktlen(host_desc, pkt_len); + cppi5_desc_set_tags_ids(&host_desc->hdr, 0, + (emac->port_id | (q_idx << 8))); + + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); + cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, + pkt_len); + + swdata = cppi5_hdesc_get_swdata(host_desc); + swdata->type = PRUETH_SWDATA_XSK; + + dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, + host_desc); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, + host_desc, dma_desc); + + if (ret) { + ndev->stats.tx_errors++; + k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); + break; + } + + num_tx++; + } + + xsk_tx_release(tx_chn->xsk_pool); + return num_tx; +} + void prueth_xmit_free(struct prueth_tx_chn *tx_chn, struct cppi5_host_desc_t *desc) { struct cppi5_host_desc_t *first_desc, *next_desc; dma_addr_t buf_dma, next_desc_dma; + struct prueth_swdata *swdata; u32 buf_dma_len; first_desc = desc; next_desc = first_desc; + swdata = cppi5_hdesc_get_swdata(first_desc); + if (swdata->type == PRUETH_SWDATA_XSK) + goto free_pool; cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); @@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); } +free_pool: k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } EXPORT_SYMBOL_GPL(prueth_xmit_free); @@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, struct prueth_swdata *swdata; struct prueth_tx_chn *tx_chn; unsigned int total_bytes = 0; + int xsk_frames_done = 0; struct xdp_frame *xdpf; + unsigned int pkt_len; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, total_bytes += xdpf->len; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + pkt_len = cppi5_hdesc_get_pktlen(desc_tx); + dev_sw_netstats_tx_add(ndev, 1, pkt_len); + xsk_frames_done++; + break; default: prueth_xmit_free(tx_chn, desc_tx); ndev->stats.tx_dropped++; @@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, __netif_tx_unlock(netif_txq); } + if (tx_chn->xsk_pool) { + if (xsk_frames_done) + xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_chn->xsk_pool)) + xsk_set_tx_need_wakeup(tx_chn->xsk_pool); + + netif_txq = netdev_get_tx_queue(ndev, chn); + txq_trans_cond_update(netif_txq); + emac_xsk_xmit_zc(emac, chn); + } + return num_tx; } @@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) struct prueth_tx_chn *tx_chns = container_of(timer, struct prueth_tx_chn, tx_hrtimer); - enable_irq(tx_chns->irq); + if (tx_chns->irq_disabled) { + tx_chns->irq_disabled = false; + enable_irq(tx_chns->irq); + } return HRTIMER_NORESTART; } @@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) ns_to_ktime(tx_chn->tx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(tx_chn->irq); + if (tx_chn->irq_disabled) { + tx_chn->irq_disabled = false; + enable_irq(tx_chn->irq); + } } } @@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id) { struct prueth_tx_chn *tx_chn = dev_id; + tx_chn->irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&tx_chn->napi_tx); @@ -362,6 +465,29 @@ fail: } EXPORT_SYMBOL_GPL(prueth_init_tx_chns); +static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, + struct device *dma_dev, + int size) +{ + struct page_pool_params pp_params = { 0 }; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = size; + pp_params.nid = dev_to_node(emac->prueth->dev); + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = dma_dev; + pp_params.napi = &emac->napi_rx; + pp_params.max_len = PAGE_SIZE; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + netdev_err(emac->ndev, "cannot create rx page pool\n"); + + return pool; +} + int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, char *name, u32 max_rflows, @@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac, struct device *dev = emac->prueth->dev; struct net_device *ndev = emac->ndev; u32 fdqring_id, hdesc_size; + struct page_pool *pool; int i, ret = 0, slice; int flow_id_base; @@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac, goto fail; } + pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num); + if (IS_ERR(pool)) { + ret = PTR_ERR(pool); + goto fail; + } + + rx_chn->pg_pool = pool; + flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); if (emac->is_sr1 && !strcmp(name, "rxmgm")) { emac->rx_mgm_flow_id_base = flow_id_base; @@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac, * emac_xmit_xdp_frame - transmits an XDP frame * @emac: emac device * @xdpf: data to transmit - * @page: page from page pool if already DMA mapped * @q_idx: queue id + * @buff_type: Type of buffer to be transmitted * * Return: XDP state */ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct xdp_frame *xdpf, - struct page *page, - unsigned int q_idx) + unsigned int q_idx, + enum prueth_tx_buff_type buff_type) { struct cppi5_host_desc_t *first_desc; struct net_device *ndev = emac->ndev; @@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; struct prueth_swdata *swdata; + struct page *page; u32 *epib; int ret; @@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, return ICSSG_XDP_CONSUMED; /* drop */ } - if (page) { /* already DMA mapped by page_pool */ + if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */ + page = virt_to_head_page(xdpf->data); + if (unlikely(!page)) { + netdev_err(ndev, "xdp tx: failed to get page from xdpf\n"); + goto drop_free_descs; + } buf_dma = page_pool_get_dma_addr(page); buf_dma += xdpf->headroom + sizeof(struct xdp_frame); } else { /* Map the linear buffer */ @@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); * emac_run_xdp - run an XDP program * @emac: emac device * @xdp: XDP buffer containing the frame - * @page: page with RX data if already DMA mapped * @len: Rx descriptor packet length * * Return: XDP state */ -static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, - struct page *page, u32 *len) +static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len) { struct net_device *ndev = emac->ndev; struct netdev_queue *netif_txq; @@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, q_idx = cpu % emac->tx_ch_num; netif_txq = netdev_get_tx_queue(ndev, q_idx); __netif_tx_lock(netif_txq, cpu); - result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); + result = emac_xmit_xdp_frame(emac, xdpf, q_idx, + PRUETH_TX_BUFF_TYPE_XDP_TX); __netif_tx_unlock(netif_txq); if (result == ICSSG_XDP_CONSUMED) { ndev->stats.tx_dropped++; @@ -689,11 +829,188 @@ drop: fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ndev->stats.rx_dropped++; - page_pool_recycle_direct(emac->rx_chns.pg_pool, page); return ICSSG_XDP_CONSUMED; } } +static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + struct xdp_buff *xdp) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma; + dma_addr_t buf_dma; + int buf_len; + + buf_dma = xsk_buff_xdp_get_dma(xdp); + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); + if (!desc_rx) { + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); + return -ENOMEM; + } + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); + + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); + buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); + swdata = cppi5_hdesc_get_swdata(desc_rx); + swdata->type = PRUETH_SWDATA_XSK; + swdata->data.xdp = xdp; + + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, + desc_rx, desc_dma); +} + +static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + struct xdp_buff *xdp; + int i, ret; + + for (i = 0; i < budget; i++) { + xdp = xsk_buff_alloc(rx_chn->xsk_pool); + if (!xdp) + break; + + ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp); + if (ret) { + netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n"); + xsk_buff_free(xdp); + break; + } + } + + return i; +} + +static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata) +{ + unsigned int headroom = xdp->data - xdp->data_hard_start; + unsigned int pkt_len = xdp->data_end - xdp->data; + struct net_device *ndev = emac->ndev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start); + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; + return; + } + + skb_reserve(skb, headroom); + skb_put(skb, pkt_len); + skb->dev = ndev; + + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; + skb->protocol = eth_type_trans(skb, ndev); + + skb_mark_for_recycle(skb); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; +} + +static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, + int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 buf_dma_len, pkt_len, port_id = 0; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma, buf_dma; + struct xdp_buff *xdp; + int xdp_status = 0; + int count = 0; + u32 *psdata; + int ret; + + while (count < budget) { + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx pop: failed: %d\n", ret); + break; + } + + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); + break; + } + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + if (swdata->type != PRUETH_SWDATA_XSK) { + netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + break; + } + + xdp = swdata->data.xdp; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + /* firmware adds 4 CRC bytes, strip them */ + pkt_len -= 4; + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + psdata = cppi5_hdesc_get_psdata(desc_rx); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + count++; + xsk_buff_set_size(xdp, pkt_len); + xsk_buff_dma_sync_for_cpu(xdp); + + if (prueth_xdp_is_enabled(emac)) { + ret = emac_run_xdp(emac, xdp, &pkt_len); + switch (ret) { + case ICSSG_XDP_PASS: + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + break; + case ICSSG_XDP_CONSUMED: + xsk_buff_free(xdp); + break; + case ICSSG_XDP_TX: + case ICSSG_XDP_REDIR: + xdp_status |= ret; + break; + } + } else { + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + } + } + + if (xdp_status & ICSSG_XDP_REDIR) + xdp_do_flush(); + + /* Allocate xsk buffers from the pool for the "count" number of + * packets processed in order to be able to receive more packets. + */ + ret = prueth_rx_alloc_zc(emac, count); + + if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) { + /* If the user space doesn't provide enough buffers then it must + * explicitly wake up the kernel when new buffers are available + */ + if (ret < count) + xsk_set_rx_need_wakeup(rx_chn->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_chn->xsk_pool); + } + + return count; +} + static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) { struct prueth_rx_chn *rx_chn = &emac->rx_chns; @@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) return ret; } - if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); return 0; + } desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); @@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) /* firmware adds 4 CRC bytes, strip them */ pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); /* if allocation fails we drop the packet but push the @@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) } pa = page_address(page); - if (emac->xdp_prog) { + if (prueth_xdp_is_enabled(emac)) { xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); - *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); + *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); if (*xdp_state != ICSSG_XDP_PASS) goto requeue; headroom = xdp.data - xdp.data_hard_start; @@ -804,24 +1122,29 @@ requeue: return ret; } -static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; struct prueth_swdata *swdata; struct page_pool *pool; + struct xdp_buff *xdp; struct page *page; pool = rx_chn->pg_pool; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - if (swdata->type == PRUETH_SWDATA_PAGE) { + if (rx_chn->xsk_pool) { + xdp = swdata->data.xdp; + xsk_buff_free(xdp); + } else { page = swdata->data.page; page_pool_recycle_direct(pool, page); } k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); } +EXPORT_SYMBOL_GPL(prueth_rx_cleanup); static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) { @@ -1025,10 +1348,11 @@ drop_stop_q_busy: } EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); -static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_tx_chn *tx_chn = data; struct cppi5_host_desc_t *desc_tx; + struct xsk_buff_pool *xsk_pool; struct prueth_swdata *swdata; struct xdp_frame *xdpf; struct sk_buff *skb; @@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) xdpf = swdata->data.xdpf; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + xsk_pool = tx_chn->xsk_pool; + xsk_tx_completed(xsk_pool, 1); + break; default: break; } prueth_xmit_free(tx_chn, desc_tx); } +EXPORT_SYMBOL_GPL(prueth_tx_cleanup); irqreturn_t prueth_rx_irq(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; + emac->rx_chns.irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&emac->napi_rx); @@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; int flow = emac->is_sr1 ? PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; int xdp_state_or = 0; int num_rx = 0; int cur_budget; @@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) int ret; while (flow--) { - cur_budget = budget - num_rx; - - while (cur_budget--) { - ret = emac_rx_packet(emac, flow, &xdp_state); - xdp_state_or |= xdp_state; - if (ret) - break; - num_rx++; + if (rx_chn->xsk_pool) { + num_rx = emac_rx_packet_zc(emac, flow, budget); + } else { + cur_budget = budget - num_rx; + + while (cur_budget--) { + ret = emac_rx_packet(emac, flow, &xdp_state); + xdp_state_or |= xdp_state; + if (ret) + break; + num_rx++; + } } if (num_rx >= budget) @@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) ns_to_ktime(emac->rx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(emac->rx_chns.irq[rx_flow]); + if (emac->rx_chns.irq_disabled) { + /* re-enable the RX IRQ */ + emac->rx_chns.irq_disabled = false; + enable_irq(emac->rx_chns.irq[rx_flow]); + } } } @@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) } EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); -static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, - struct device *dma_dev, - int size) -{ - struct page_pool_params pp_params = { 0 }; - struct page_pool *pool; - - pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = size; - pp_params.nid = dev_to_node(emac->prueth->dev); - pp_params.dma_dir = DMA_BIDIRECTIONAL; - pp_params.dev = dma_dev; - pp_params.napi = &emac->napi_rx; - pp_params.max_len = PAGE_SIZE; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - netdev_err(emac->ndev, "cannot create rx page pool\n"); - - return pool; -} - int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size) { - struct page_pool *pool; struct page *page; + int desc_avail; int i, ret; - pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); - if (IS_ERR(pool)) - return PTR_ERR(pool); - - chn->pg_pool = pool; + desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool); + if (desc_avail < chn->descs_num) + netdev_warn(emac->ndev, + "not enough RX descriptors available %d < %d\n", + desc_avail, chn->descs_num); - for (i = 0; i < chn->descs_num; i++) { - /* NOTE: we're not using memory efficiently here. - * 1 full page (4KB?) used here instead of - * PRUETH_MAX_PKT_SIZE (~1.5KB?) + if (chn->xsk_pool) { + /* get pages from xsk_pool and push to RX ring + * queue as much as possible */ - page = page_pool_dev_alloc_pages(pool); - if (!page) { - netdev_err(emac->ndev, "couldn't allocate rx page\n"); - ret = -ENOMEM; + ret = prueth_rx_alloc_zc(emac, desc_avail); + if (!ret) goto recycle_alloc_pg; - } + } else { + for (i = 0; i < desc_avail; i++) { + /* NOTE: we're not using memory efficiently here. + * 1 full page (4KB?) used here instead of + * PRUETH_MAX_PKT_SIZE (~1.5KB?) + */ + page = page_pool_dev_alloc_pages(chn->pg_pool); + if (!page) { + netdev_err(emac->ndev, "couldn't allocate rx page\n"); + ret = -ENOMEM; + goto recycle_alloc_pg; + } - ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); - if (ret < 0) { - netdev_err(emac->ndev, - "cannot submit page for rx chan %s ret %d\n", - chn->name, ret); - page_pool_recycle_direct(pool, page); - goto recycle_alloc_pg; + ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); + if (ret < 0) { + netdev_err(emac->ndev, + "cannot submit page for rx chan %s ret %d\n", + chn->name, ret); + page_pool_recycle_direct(chn->pg_pool, page); + goto recycle_alloc_pg; + } } } @@ -1223,15 +1548,13 @@ void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) } EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); -static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: emac->tx_ts_enabled = 0; break; @@ -1242,7 +1565,7 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: emac->rx_ts_enabled = 0; break; @@ -1262,43 +1585,28 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: emac->rx_ts_enabled = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config); -static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} + config->flags = 0; + config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return emac_get_ts_config(ndev, ifr); - case SIOCSHWTSTAMP: - return emac_set_ts_config(ndev, ifr); - default: - break; - } - - return phy_do_ioctl(ndev, ifr, cmd); + return 0; } -EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); +EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index e42d0fdefee1..f65041662173 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -47,6 +47,9 @@ NETIF_F_HW_HSR_TAG_INS | \ NETIF_F_HW_HSR_TAG_RM) +#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\ + DMA_ATTR_WEAK_ORDERING) + /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) @@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) container_of(timer, struct prueth_emac, rx_hrtimer); int rx_flow = PRUETH_RX_FLOW_DATA; - enable_irq(emac->rx_chns.irq[rx_flow]); + if (emac->rx_chns.irq_disabled) { + /* re-enable the RX IRQ */ + emac->rx_chns.irq_disabled = false; + enable_irq(emac->rx_chns.irq[rx_flow]); + } return HRTIMER_NORESTART; } @@ -566,31 +573,41 @@ const struct icss_iep_clockops prueth_iep_clockops = { .perout_enable = prueth_perout_enable, }; +static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) +{ + struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + + if (xdp_rxq_info_is_reg(rxq)) + xdp_rxq_info_unreg(rxq); +} + static int prueth_create_xdp_rxqs(struct prueth_emac *emac) { struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; struct page_pool *pool = emac->rx_chns.pg_pool; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; int ret; ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); if (ret) return ret; - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); - if (ret) - xdp_rxq_info_unreg(rxq); - - return ret; -} - -static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) -{ - struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + if (rx_chn->xsk_pool) { + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (ret) + goto xdp_unreg; + xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq); + } else { + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + goto xdp_unreg; + } - if (!xdp_rxq_info_is_reg(rxq)) - return; + return 0; - xdp_rxq_info_unreg(rxq); +xdp_unreg: + prueth_destroy_xdp_rxqs(emac); + return ret; } static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) @@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, return 0; } +static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id) +{ + struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id]; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + + if (emac->xsk_qid != queue_id) { + rx_chn->xsk_pool = NULL; + tx_chn->xsk_pool = NULL; + } else { + rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + } +} + +static void prueth_destroy_txq(struct prueth_emac *emac) +{ + int ret, i; + + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); + /* ensure new tdown_cnt value is visible */ + smp_mb__after_atomic(); + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + for (i = 0; i < emac->tx_ch_num; i++) + k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(emac->ndev, "tx teardown timeout\n"); + + for (i = 0; i < emac->tx_ch_num; i++) { + napi_disable(&emac->tx_chns[i].napi_tx); + hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); + k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, + &emac->tx_chns[i], + prueth_tx_cleanup); + k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); + } +} + +static void prueth_destroy_rxq(struct prueth_emac *emac) +{ + int i, ret; + + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); + + /* When RX DMA Channel Teardown is initiated, it will result in an + * interrupt and a Teardown Completion Marker (TDCM) is queued into + * the RX Completion queue. Acknowledging the interrupt involves + * popping the TDCM descriptor from the RX Completion queue via the + * RX NAPI Handler. To avoid timing out when waiting for the TDCM to + * be popped, schedule the RX NAPI handler to run immediately. + */ + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) { + if (napi_schedule_prep(&emac->napi_rx)) + __napi_schedule(&emac->napi_rx); + } + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(emac->ndev, "rx teardown timeout\n"); + + for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) { + napi_disable(&emac->napi_rx); + hrtimer_cancel(&emac->rx_hrtimer); + k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i, + &emac->rx_chns, + prueth_rx_cleanup); + } + + prueth_destroy_xdp_rxqs(emac); + k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn); +} + +static int prueth_create_txq(struct prueth_emac *emac) +{ + int ret, i; + + for (i = 0; i < emac->tx_ch_num; i++) { + ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); + if (ret) + goto reset_tx_chan; + napi_enable(&emac->tx_chns[i].napi_tx); + } + return 0; + +reset_tx_chan: + /* Since interface is not yet up, there is wouldn't be + * any SKB for completion. So set false to free_skb + */ + prueth_reset_tx_chan(emac, i, false); + return ret; +} + +static int prueth_create_rxq(struct prueth_emac *emac) +{ + int ret; + + ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + if (ret) + return ret; + + ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + if (ret) + goto reset_rx_chn; + + ret = prueth_create_xdp_rxqs(emac); + if (ret) + goto reset_rx_chn; + + napi_enable(&emac->napi_rx); + return 0; + +reset_rx_chn: + prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); + return ret; +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, static int emac_ndo_open(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); - int ret, i, num_data_chn = emac->tx_ch_num; + int ret, num_data_chn = emac->tx_ch_num; struct icssg_flow_cfg __iomem *flow_cfg; struct prueth *prueth = emac->prueth; int slice = prueth_emac_slice(emac); @@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev) return ret; } + emac->xsk_qid = -EINVAL; init_completion(&emac->cmd_complete); ret = prueth_init_tx_chns(emac); if (ret) { @@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev) goto stop; /* Prepare RX */ - ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + ret = prueth_create_rxq(emac); if (ret) goto free_tx_ts_irq; - ret = prueth_create_xdp_rxqs(emac); - if (ret) - goto reset_rx_chn; - - ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + ret = prueth_create_txq(emac); if (ret) - goto destroy_xdp_rxqs; - - for (i = 0; i < emac->tx_ch_num; i++) { - ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); - if (ret) - goto reset_tx_chan; - } - - /* Enable NAPI in Tx and Rx direction */ - for (i = 0; i < emac->tx_ch_num; i++) - napi_enable(&emac->tx_chns[i].napi_tx); - napi_enable(&emac->napi_rx); + goto destroy_rxq; /* start PHY */ phy_start(ndev->phydev); @@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev) return 0; -reset_tx_chan: - /* Since interface is not yet up, there is wouldn't be - * any SKB for completion. So set false to free_skb - */ - prueth_reset_tx_chan(emac, i, false); -destroy_xdp_rxqs: - prueth_destroy_xdp_rxqs(emac); -reset_rx_chn: - prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); +destroy_rxq: + prueth_destroy_rxq(emac); free_tx_ts_irq: free_irq(emac->tx_ts_irq, emac); stop: @@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; - int rx_flow = PRUETH_RX_FLOW_DATA; - int max_rx_flows; - int ret, i; /* inform the upper layers. */ netif_tx_stop_all_queues(ndev); @@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev) else __dev_mc_unsync(ndev, icssg_prueth_del_mcast); - atomic_set(&emac->tdown_cnt, emac->tx_ch_num); - /* ensure new tdown_cnt value is visible */ - smp_mb__after_atomic(); - /* tear down and disable UDMA channels */ - reinit_completion(&emac->tdown_complete); - for (i = 0; i < emac->tx_ch_num; i++) - k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); - - ret = wait_for_completion_timeout(&emac->tdown_complete, - msecs_to_jiffies(1000)); - if (!ret) - netdev_err(ndev, "tx teardown timeout\n"); - - prueth_reset_tx_chan(emac, emac->tx_ch_num, true); - for (i = 0; i < emac->tx_ch_num; i++) { - napi_disable(&emac->tx_chns[i].napi_tx); - hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); - } - - max_rx_flows = PRUETH_MAX_RX_FLOWS; - k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); - - prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); - prueth_destroy_xdp_rxqs(emac); - napi_disable(&emac->napi_rx); - hrtimer_cancel(&emac->rx_hrtimer); + prueth_destroy_txq(emac); + prueth_destroy_rxq(emac); cancel_work_sync(&emac->rx_mode_work); @@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev) free_irq(emac->tx_ts_irq, emac); - free_irq(emac->rx_chns.irq[rx_flow], emac); + free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac); prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); - prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); + prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS); prueth_cleanup_tx_chns(emac); prueth->emacs_initialized--; @@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame __netif_tx_lock(netif_txq, cpu); for (i = 0; i < n; i++) { xdpf = frames[i]; - err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); + err = emac_xmit_xdp_frame(emac, xdpf, q_idx, + PRUETH_TX_BUFF_TYPE_XDP_NDO); if (err != ICSSG_XDP_TX) { ndev->stats.tx_dropped++; break; @@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) return 0; } +static int prueth_xsk_pool_enable(struct prueth_emac *emac, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 frame_size; + int ret; + + if (queue_id >= PRUETH_MAX_RX_FLOWS || + queue_id >= emac->tx_ch_num) { + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id); + return -EINVAL; + } + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < PRUETH_MAX_PKT_SIZE) + return -EOPNOTSUPP; + + ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR); + if (ret) { + netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret); + return ret; + } + + if (netif_running(emac->ndev)) { + /* stop packets from wire for graceful teardown */ + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + if (ret) + return ret; + prueth_destroy_rxq(emac); + } + + emac->xsk_qid = queue_id; + prueth_set_xsk_pool(emac, queue_id); + + if (netif_running(emac->ndev)) { + ret = prueth_create_rxq(emac); + if (ret) { + netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret); + return ret; + } + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + if (ret) { + prueth_destroy_rxq(emac); + return ret; + } + ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX); + if (ret) + return ret; + } + + return 0; +} + +static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id) +{ + struct xsk_buff_pool *pool; + int ret; + + if (queue_id >= PRUETH_MAX_RX_FLOWS || + queue_id >= emac->tx_ch_num) { + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id); + return -EINVAL; + } + + if (emac->xsk_qid != queue_id) { + netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id); + return -EINVAL; + } + + pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + if (!pool) { + netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id); + return -EINVAL; + } + + if (netif_running(emac->ndev)) { + /* stop packets from wire for graceful teardown */ + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + if (ret) + return ret; + prueth_destroy_rxq(emac); + } + + xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR); + emac->xsk_qid = -EINVAL; + prueth_set_xsk_pool(emac, queue_id); + + if (netif_running(emac->ndev)) { + ret = prueth_create_rxq(emac); + if (ret) { + netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret); + return ret; + } + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + if (ret) { + prueth_destroy_rxq(emac); + return ret; + } + } + + return 0; +} + /** * emac_ndo_bpf - implements ndo_bpf for icssg_prueth * @ndev: network adapter device @@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) switch (bpf->command) { case XDP_SETUP_PROG: return emac_xdp_setup(emac, bpf); + case XDP_SETUP_XSK_POOL: + return bpf->xsk.pool ? + prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) : + prueth_xsk_pool_disable(emac, bpf->xsk.queue_id); default: return -EINVAL; } } +int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid]; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + + if (emac->xsk_qid != qid) { + netdev_err(ndev, "XSK queue %d not registered\n", qid); + return -EINVAL; + } + + if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) { + netdev_err(ndev, "Invalid XSK queue ID %d\n", qid); + return -EINVAL; + } + + if (!tx_chn->xsk_pool) { + netdev_err(ndev, "XSK pool not registered for queue %d\n", qid); + return -EINVAL; + } + + if (!rx_chn->xsk_pool) { + netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid); + return -EINVAL; + } + + if (flags & XDP_WAKEUP_TX) { + if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) { + if (likely(napi_schedule_prep(&tx_chn->napi_tx))) + __napi_schedule(&tx_chn->napi_tx); + } + } + + if (flags & XDP_WAKEUP_RX) { + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) { + if (likely(napi_schedule_prep(&emac->napi_rx))) + __napi_schedule(&emac->napi_rx); + } + } + + return 0; +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -1168,7 +1410,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, .ndo_fix_features = emac_ndo_fix_features, @@ -1176,6 +1418,9 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, .ndo_bpf = emac_ndo_bpf, .ndo_xdp_xmit = emac_xdp_xmit, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, + .ndo_xsk_wakeup = prueth_xsk_wakeup, }; static int prueth_netdev_init(struct prueth *prueth, @@ -1248,8 +1493,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } @@ -1310,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth, xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT); + NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_XSK_ZEROCOPY); netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index ca8a22a4a5da..10eadd356650 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -38,6 +38,8 @@ #include <net/devlink.h> #include <net/xdp.h> #include <net/page_pool/helpers.h> +#include <net/xsk_buff_pool.h> +#include <net/xdp_sock_drv.h> #include "icssg_config.h" #include "icss_iep.h" @@ -126,6 +128,8 @@ struct prueth_tx_chn { char name[32]; struct hrtimer tx_hrtimer; unsigned long tx_pace_timeout_ns; + struct xsk_buff_pool *xsk_pool; + bool irq_disabled; }; struct prueth_rx_chn { @@ -138,6 +142,8 @@ struct prueth_rx_chn { char name[32]; struct page_pool *pg_pool; struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; + bool irq_disabled; }; enum prueth_swdata_type { @@ -146,6 +152,12 @@ enum prueth_swdata_type { PRUETH_SWDATA_PAGE, PRUETH_SWDATA_CMD, PRUETH_SWDATA_XDPF, + PRUETH_SWDATA_XSK, +}; + +enum prueth_tx_buff_type { + PRUETH_TX_BUFF_TYPE_XDP_TX, + PRUETH_TX_BUFF_TYPE_XDP_NDO, }; struct prueth_swdata { @@ -155,6 +167,7 @@ struct prueth_swdata { struct page *page; u32 cmd; struct xdp_frame *xdpf; + struct xdp_buff *xdp; } data; }; @@ -241,6 +254,7 @@ struct prueth_emac { struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID]; struct bpf_prog *xdp_prog; struct xdp_attachment_info xdpi; + int xsk_qid; }; /* The buf includes headroom compatible with both skb and xdpf */ @@ -479,7 +493,11 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable); void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config); +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, @@ -495,7 +513,14 @@ void prueth_put_cores(struct prueth *prueth, int slice); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns); u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct xdp_frame *xdpf, - struct page *page, - unsigned int q_idx); + unsigned int q_idx, + enum prueth_tx_buff_type buff_type); +void prueth_rx_cleanup(void *data, dma_addr_t desc_dma); +void prueth_tx_cleanup(void *data, dma_addr_t desc_dma); +int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags); +static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac) +{ + return !!READ_ONCE(emac->xdp_prog); +} #endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 5e225310c9de..7bb4f0d850cc 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -747,9 +747,11 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, }; static int prueth_netdev_init(struct prueth *prueth, @@ -816,8 +818,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 7007eb8bed36..b9cbd3b4a8a2 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -207,6 +207,11 @@ struct netcp_module { int (*del_vid)(void *intf_priv, int vid); int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); int (*set_rx_mode)(void *intf_priv, bool promisc); + int (*hwtstamp_get)(void *intf_priv, + struct kernel_hwtstamp_config *cfg); + int (*hwtstamp_set)(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); /* used internally */ struct list_head module_list; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 5ee13db568f0..5ed1c46bbcb1 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1781,6 +1781,62 @@ static int netcp_ndo_stop(struct net_device *ndev) return 0; } +static int netcp_ndo_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_get) + continue; + + err = module->hwtstamp_get(intf_modpriv->module_priv, config); + break; + } + + return err; +} + +static int netcp_ndo_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int ret = -1, err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_set) + continue; + + err = module->hwtstamp_set(intf_modpriv->module_priv, config, + extack); + if ((err < 0) && (err != -EOPNOTSUPP)) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "At least one module failed to setup HW timestamps"); + ret = err; + goto out; + } + if (err == 0) + ret = err; + } + +out: + return (ret == 0) ? 0 : err; +} + static int netcp_ndo_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { @@ -1952,6 +2008,8 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_tx_timeout = netcp_ndo_tx_timeout, .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, + .ndo_hwtstamp_get = netcp_ndo_hwtstamp_get, + .ndo_hwtstamp_set = netcp_ndo_hwtstamp_set, }; static int netcp_create_interface(struct netcp_device *netcp_device, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 55a1a96cd834..8f46e9be76b1 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2591,20 +2591,26 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info) return 0; } -static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_get(void *intf_priv, struct kernel_hwtstamp_config *cfg) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; + + gbe_dev = gbe_intf->gbe_dev; - if (!cpts) + if (!gbe_dev->cpts) + return -EOPNOTSUPP; + + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = gbe_dev->rx_ts_enabled; + cfg->flags = 0; + cfg->tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = gbe_dev->rx_ts_enabled; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_hwtstamp(struct gbe_intf *gbe_intf) @@ -2637,19 +2643,23 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf) writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2)); } -static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; - if (!cpts) + gbe_dev = gbe_intf->gbe_dev; + + if (!gbe_dev->cpts) return -EOPNOTSUPP; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) + return phy->mii_ts->hwtstamp_set(phy->mii_ts, cfg, extack); - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: gbe_dev->tx_ts_enabled = 0; break; @@ -2660,7 +2670,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE; break; @@ -2668,7 +2678,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -2680,7 +2690,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; @@ -2688,7 +2698,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) gbe_hwtstamp(gbe_intf); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_register_cpts(struct gbe_priv *gbe_dev) @@ -2745,12 +2755,15 @@ static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev) { } -static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_get(void *intf_priv, + struct kernel_hwtstamp_config *cfg) { return -EOPNOTSUPP; } -static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_set(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -2816,15 +2829,6 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) struct gbe_intf *gbe_intf = intf_priv; struct phy_device *phy = gbe_intf->slave->phy; - if (!phy_has_hwtstamp(phy)) { - switch (cmd) { - case SIOCGHWTSTAMP: - return gbe_hwtstamp_get(gbe_intf, req); - case SIOCSHWTSTAMP: - return gbe_hwtstamp_set(gbe_intf, req); - } - } - if (phy) return phy_mii_ioctl(phy, req, cmd); @@ -3824,6 +3828,8 @@ static struct netcp_module gbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static struct netcp_module xgbe_module = { @@ -3841,6 +3847,8 @@ static struct netcp_module xgbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static int __init keystone_gbe_init(void) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 591866fc9055..d35d1f3c10a1 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -364,6 +364,7 @@ static int gelic_card_init_chain(struct gelic_card *card, * gelic_descr_prepare_rx - reinitializes a rx descriptor * @card: card structure * @descr: descriptor to re-init + * @napi_mode: is it running in napi poll * * return 0 on success, <0 on failure * @@ -374,7 +375,8 @@ static int gelic_card_init_chain(struct gelic_card *card, * must be a multiple of GELIC_NET_RXBUF_ALIGN. */ static int gelic_descr_prepare_rx(struct gelic_card *card, - struct gelic_descr *descr) + struct gelic_descr *descr, + bool napi_mode) { static const unsigned int rx_skb_size = ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + @@ -392,7 +394,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, descr->hw_regs.payload.dev_addr = 0; descr->hw_regs.payload.size = 0; - descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); + if (napi_mode) + descr->skb = napi_alloc_skb(&card->napi, rx_skb_size); + else + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); if (!descr->skb) { descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ return -ENOMEM; @@ -464,7 +469,7 @@ static int gelic_card_fill_rx_chain(struct gelic_card *card) do { if (!descr->skb) { - ret = gelic_descr_prepare_rx(card, descr); + ret = gelic_descr_prepare_rx(card, descr, false); if (ret) goto rewind; } @@ -964,7 +969,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr, netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ - netif_receive_skb(skb); + napi_gro_receive(&card->napi, skb); } /** @@ -1069,7 +1074,7 @@ refill: /* * this call can fail, propagate the error */ - prepare_rx_ret = gelic_descr_prepare_rx(card, descr); + prepare_rx_ret = gelic_descr_prepare_rx(card, descr, true); if (prepare_rx_ret) return prepare_rx_ret; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 06f401bd975c..f362e51c73ee 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -51,6 +51,11 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = { WX_STAT("fdir_miss", stats.fdirmiss), }; +static const struct wx_stats wx_gstrings_rsc_stats[] = { + WX_STAT("rsc_aggregated", rsc_count), + WX_STAT("rsc_flushed", rsc_flush), +}; + /* drivers allocates num_tx_queues and num_rx_queues symmetrically so * we set the num_rx_queues to evaluate to num_tx_queues. This is * used because we do not have a good way to get the max number of @@ -64,16 +69,21 @@ static const struct wx_stats wx_gstrings_fdir_stats[] = { (sizeof(struct wx_queue_stats) / sizeof(u64))) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) +#define WX_RSC_STATS_LEN ARRAY_SIZE(wx_gstrings_rsc_stats) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) int wx_get_sset_count(struct net_device *netdev, int sset) { struct wx *wx = netdev_priv(netdev); + int len = WX_STATS_LEN; switch (sset) { case ETH_SS_STATS: - return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? - WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + len += WX_FDIR_STATS_LEN; + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + len += WX_RSC_STATS_LEN; + return len; default: return -EOPNOTSUPP; } @@ -94,6 +104,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) for (i = 0; i < WX_FDIR_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); } + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + for (i = 0; i < WX_RSC_STATS_LEN; i++) + ethtool_puts(&p, wx_gstrings_rsc_stats[i].stat_string); + } for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -131,6 +145,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, } } + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + for (k = 0; k < WX_RSC_STATS_LEN; k++) { + p = (char *)wx + wx_gstrings_rsc_stats[k].stat_offset; + data[i++] = *(u64 *)p; + } + } + for (j = 0; j < netdev->num_tx_queues; j++) { ring = wx->tx_ring[j]; if (!ring) { @@ -219,9 +240,6 @@ int wx_nway_reset(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml40) - return -EOPNOTSUPP; - return phylink_ethtool_nway_reset(wx->phylink); } EXPORT_SYMBOL(wx_nway_reset); @@ -240,9 +258,6 @@ int wx_set_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml40) - return -EOPNOTSUPP; - return phylink_ethtool_ksettings_set(wx->phylink, cmd); } EXPORT_SYMBOL(wx_set_link_ksettings); @@ -252,9 +267,6 @@ void wx_get_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml40) - return; - phylink_ethtool_get_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_get_pauseparam); @@ -264,9 +276,6 @@ int wx_set_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_aml40) - return -EOPNOTSUPP; - return phylink_ethtool_set_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_set_pauseparam); @@ -322,6 +331,40 @@ int wx_get_coalesce(struct net_device *netdev, } EXPORT_SYMBOL(wx_get_coalesce); +static void wx_update_rsc(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + bool need_reset = false; + + /* nothing to do if LRO or RSC are not enabled */ + if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags) || + !(netdev->features & NETIF_F_LRO)) + return; + + /* check the feature flag value and enable RSC if necessary */ + if (wx->rx_itr_setting == 1 || + wx->rx_itr_setting > WX_MIN_RSC_ITR) { + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, + "rx-usecs value high enough to re-enable RSC\n"); + + need_reset = true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + dev_info(&wx->pdev->dev, + "rx-usecs set too low, disabling RSC\n"); + + need_reset = true; + } + + /* reset the device to apply the new RSC setting */ + if (need_reset && wx->do_reset) + wx->do_reset(netdev); +} + int wx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, @@ -414,6 +457,8 @@ int wx_set_coalesce(struct net_device *netdev, wx_write_eitr(q_vector); } + wx_update_rsc(wx); + return 0; } EXPORT_SYMBOL(wx_set_coalesce); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index b37d6cfbfbe9..58b8300e3d2c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1779,7 +1779,9 @@ EXPORT_SYMBOL(wx_set_rx_mode); static void wx_set_rx_buffer_len(struct wx *wx) { struct net_device *netdev = wx->netdev; + struct wx_ring *rx_ring; u32 mhadd, max_frame; + int i; max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; /* adjust max frame to be at least the size of a standard frame */ @@ -1789,6 +1791,19 @@ static void wx_set_rx_buffer_len(struct wx *wx) mhadd = rd32(wx, WX_PSR_MAX_SZ); if (max_frame != mhadd) wr32(wx, WX_PSR_MAX_SZ, max_frame); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < wx->num_rx_queues; i++) { + rx_ring = wx->rx_ring[i]; + rx_ring->rx_buf_len = WX_RXBUFFER_2K; +#if (PAGE_SIZE < 8192) + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + rx_ring->rx_buf_len = WX_RXBUFFER_3K; +#endif + } } /** @@ -1865,11 +1880,27 @@ static void wx_configure_srrctl(struct wx *wx, srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; + srrctl |= rx_ring->rx_buf_len >> WX_PX_RR_CFG_BSIZEPKT_SHIFT; wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); } +static void wx_configure_rscctl(struct wx *wx, + struct wx_ring *ring) +{ + u8 reg_idx = ring->reg_idx; + u32 rscctrl; + + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + return; + + rscctrl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + rscctrl |= WX_PX_RR_CFG_RSC; + rscctrl |= WX_PX_RR_CFG_MAX_RSCBUF_16; + + wr32(wx, WX_PX_RR_CFG(reg_idx), rscctrl); +} + static void wx_configure_tx_ring(struct wx *wx, struct wx_ring *ring) { @@ -1905,6 +1936,15 @@ static void wx_configure_tx_ring(struct wx *wx, memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); + if (ring->headwb_mem) { + wr32(wx, WX_PX_TR_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(wx, WX_PX_TR_HEAD_ADDRH(reg_idx), + upper_32_bits(ring->headwb_dma)); + + txdctl |= WX_PX_TR_CFG_HEAD_WB; + } + /* enable queue */ wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl); @@ -1935,6 +1975,10 @@ static void wx_configure_rx_ring(struct wx *wx, rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT; rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT; + + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) + rxdctl |= WX_PX_RR_CFG_DESC_MERGE; + wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl); /* reset head and tail pointers */ @@ -1943,6 +1987,7 @@ static void wx_configure_rx_ring(struct wx *wx, ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx); wx_configure_srrctl(wx, ring); + wx_configure_rscctl(wx, ring); /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, @@ -2181,7 +2226,9 @@ void wx_configure_rx(struct wx *wx) /* RSC Setup */ psrctl = rd32(wx, WX_PSR_CTL); psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */ - psrctl |= WX_PSR_CTL_RSC_DIS; + psrctl &= ~WX_PSR_CTL_RSC_DIS; + if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + psrctl |= WX_PSR_CTL_RSC_DIS; wr32(wx, WX_PSR_CTL, psrctl); } @@ -2190,6 +2237,12 @@ void wx_configure_rx(struct wx *wx) /* set_rx_buffer_len must be called before ring initialization */ wx_set_rx_buffer_len(wx); + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) { + wr32(wx, WX_RDM_DCACHE_CTL, WX_RDM_DCACHE_CTL_EN); + wr32m(wx, WX_RDM_RSC_CTL, + WX_RDM_RSC_CTL_FREE_CTL | WX_RDM_RSC_CTL_FREE_CNT_DIS, + WX_RDM_RSC_CTL_FREE_CTL); + } /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -2806,6 +2859,18 @@ void wx_update_stats(struct wx *wx) wx->hw_csum_rx_error = hw_csum_rx_error; wx->hw_csum_rx_good = hw_csum_rx_good; + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + + for (i = 0; i < wx->num_rx_queues; i++) { + rsc_count += wx->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += wx->rx_ring[i]->rx_stats.rsc_flush; + } + wx->rsc_count = rsc_count; + wx->rsc_flush = rsc_flush; + } + for (i = 0; i < wx->num_tx_queues; i++) { struct wx_ring *tx_ring = wx->tx_ring[i]; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 3adf7048320a..32cadafa4b3b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -235,7 +235,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, { unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = WX_RX_BUFSZ; + unsigned int truesize = wx_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif @@ -341,7 +341,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, - WX_RX_BUFSZ, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_desc->read.pkt_addr = @@ -404,6 +404,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { + struct wx *wx = rx_ring->q_vector->wx; u32 ntc = rx_ring->next_to_clean + 1; /* fetch, update, and store next to clean */ @@ -412,6 +413,24 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, prefetch(WX_RX_DESC(rx_ring, ntc)); + /* update RSC append count if present */ + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(WX_RXD_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= WX_RXD_RSCCNT_SHIFT; + WX_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= WX_RXD_NEXTP_MASK; + ntc >>= WX_RXD_NEXTP_SHIFT; + } + } + /* if we are the last buffer then there is nothing else to do */ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))) return false; @@ -582,6 +601,33 @@ static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, } } +static void wx_set_rsc_gso_size(struct wx_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + WX_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void wx_update_rsc_stats(struct wx_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!WX_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += WX_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + wx_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + WX_CB(skb)->append_cnt = 0; +} + /** * wx_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -598,6 +644,9 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring, { struct wx *wx = netdev_priv(rx_ring->netdev); + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + wx_update_rsc_stats(rx_ring, skb); + wx_rx_hash(rx_ring, rx_desc, skb); wx_rx_checksum(rx_ring, rx_desc, skb); @@ -735,9 +784,22 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, /* prevent any other reads prior to eop_desc */ smp_rmb(); - /* if DD is not set pending work has not been completed */ - if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) + if (tx_ring->headwb_mem) { + u32 head = *tx_ring->headwb_mem; + + if (head == tx_ring->next_to_clean) + break; + else if (head > tx_ring->next_to_clean && + !(tx_buffer->next_eop >= tx_ring->next_to_clean && + tx_buffer->next_eop < head)) + break; + else if (!(tx_buffer->next_eop >= tx_ring->next_to_clean || + tx_buffer->next_eop < head)) + break; + } else if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) { + /* if DD is not set pending work has not been completed */ break; + } /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; @@ -1075,6 +1137,10 @@ static int wx_tx_map(struct wx_ring *tx_ring, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; + /* set next_eop for amlite tx head wb */ + if (tx_ring->headwb_mem) + first->next_eop = i; + i++; if (i == tx_ring->count) i = 0; @@ -2532,7 +2598,7 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - WX_RX_BUFSZ, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); /* free resources associated with mapping */ @@ -2683,6 +2749,16 @@ void wx_clean_all_tx_rings(struct wx *wx) } EXPORT_SYMBOL(wx_clean_all_tx_rings); +static void wx_free_headwb_resources(struct wx_ring *tx_ring) +{ + if (!tx_ring->headwb_mem) + return; + + dma_free_coherent(tx_ring->dev, sizeof(u32), + tx_ring->headwb_mem, tx_ring->headwb_dma); + tx_ring->headwb_mem = NULL; +} + /** * wx_free_tx_resources - Free Tx Resources per Queue * @tx_ring: Tx descriptor ring for a specific queue @@ -2702,6 +2778,8 @@ static void wx_free_tx_resources(struct wx_ring *tx_ring) dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; + + wx_free_headwb_resources(tx_ring); } /** @@ -2731,13 +2809,14 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring) struct page_pool_params pp_params = { .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rx_ring->count, + .order = wx_rx_pg_order(rx_ring), + .pool_size = rx_ring->count * rx_ring->rx_buf_len / + wx_rx_pg_size(rx_ring), .nid = dev_to_node(rx_ring->dev), .dev = rx_ring->dev, .dma_dir = DMA_FROM_DEVICE, .offset = 0, - .max_len = PAGE_SIZE, + .max_len = wx_rx_pg_size(rx_ring), }; rx_ring->page_pool = page_pool_create(&pp_params); @@ -2840,6 +2919,24 @@ err_setup_rx: return err; } +static void wx_setup_headwb_resources(struct wx_ring *tx_ring) +{ + struct wx *wx = netdev_priv(tx_ring->netdev); + + if (!test_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags)) + return; + + if (!tx_ring->q_vector) + return; + + tx_ring->headwb_mem = dma_alloc_coherent(tx_ring->dev, + sizeof(u32), + &tx_ring->headwb_dma, + GFP_KERNEL); + if (!tx_ring->headwb_mem) + dev_info(tx_ring->dev, "Allocate headwb memory failed, disable it\n"); +} + /** * wx_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -2880,6 +2977,8 @@ static int wx_setup_tx_resources(struct wx_ring *tx_ring) if (!tx_ring->desc) goto err; + wx_setup_headwb_resources(tx_ring); + tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -3026,8 +3125,25 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { + if (!(features & NETIF_F_LRO)) { + if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) + need_reset = true; + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + } else if (!(test_bit(WX_FLAG_RSC_ENABLED, wx->flags))) { + if (wx->rx_itr_setting == 1 || + wx->rx_itr_setting > WX_MIN_RSC_ITR) { + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); + need_reset = true; + } else if (changed & NETIF_F_LRO) { + dev_info(&wx->pdev->dev, + "rx-usecs set too low, disable RSC\n"); + } + } + } + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) - return 0; + goto out; /* Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. @@ -3053,6 +3169,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) break; } +out: if (need_reset && wx->do_reset) wx->do_reset(netdev); @@ -3102,6 +3219,14 @@ netdev_features_t wx_fix_features(struct net_device *netdev, } } + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) + features &= ~NETIF_F_LRO; + return features; } EXPORT_SYMBOL(wx_fix_features); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c index c6d158cd70da..493da5fffdb6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -122,6 +122,10 @@ static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) WX_CFG_PORT_CTL_NUM_VT_MASK, value); + /* Disable RSC when in SR-IOV mode */ + clear_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); + return ret; } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 2f8319e03182..29e5c5470c94 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -83,8 +83,13 @@ /*********************** Receive DMA registers **************************/ #define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define WX_RDM_RSC_CTL 0x1200C +#define WX_RDM_RSC_CTL_FREE_CNT_DIS BIT(8) +#define WX_RDM_RSC_CTL_FREE_CTL BIT(7) #define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) #define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) +#define WX_RDM_DCACHE_CTL 0x120A8 +#define WX_RDM_DCACHE_CTL_EN BIT(0) #define WX_RDM_DRP_PKT 0x12500 #define WX_RDM_PKT_CNT 0x12504 #define WX_RDM_BYTE_CNT_LSB 0x12508 @@ -421,6 +426,7 @@ enum WX_MSCA_CMD_value { #define WX_7K_ITR 595 #define WX_12K_ITR 336 #define WX_20K_ITR 200 +#define WX_MIN_RSC_ITR 24 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_AML_MAX_EITR 0x00000FFFU #define WX_EM_MAX_EITR 0x00007FFCU @@ -431,12 +437,15 @@ enum WX_MSCA_CMD_value { #define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40)) #define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40)) #define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) +#define WX_PX_TR_HEAD_ADDRL(_i) (0x03028 + ((_i) * 0x40)) +#define WX_PX_TR_HEAD_ADDRH(_i) (0x0302C + ((_i) * 0x40)) /* Transmit Config masks */ #define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ #define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ #define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ #define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ #define WX_PX_TR_CFG_THRE_SHIFT 8 +#define WX_PX_TR_CFG_HEAD_WB BIT(27) /* Receive DMA Registers */ #define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40)) @@ -448,7 +457,10 @@ enum WX_MSCA_CMD_value { /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_DROP_EN BIT(30) +#define WX_PX_RR_CFG_RSC BIT(29) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) +#define WX_PX_RR_CFG_MAX_RSCBUF_16 FIELD_PREP(GENMASK(24, 23), 3) +#define WX_PX_RR_CFG_DESC_MERGE BIT(19) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) #define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8) @@ -544,14 +556,9 @@ enum WX_MSCA_CMD_value { /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ #define WX_RXBUFFER_2K 2048 +#define WX_RXBUFFER_3K 3072 #define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */ -#if MAX_SKB_FRAGS < 8 -#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024) -#else -#define WX_RX_BUFSZ WX_RXBUFFER_2K -#endif - #define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define WX_MAX_DATA_PER_TXD BIT(14) @@ -643,6 +650,12 @@ enum wx_l2_ptypes { #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) + +#define WX_RXD_RSCCNT_MASK GENMASK(20, 17) +#define WX_RXD_RSCCNT_SHIFT 17 +#define WX_RXD_NEXTP_MASK GENMASK(19, 4) +#define WX_RXD_NEXTP_SHIFT 4 + /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ @@ -1005,6 +1018,7 @@ struct wx_tx_buffer { DEFINE_DMA_UNMAP_LEN(len); __be16 protocol; u32 tx_flags; + u32 next_eop; }; struct wx_rx_buffer { @@ -1029,6 +1043,8 @@ struct wx_rx_queue_stats { u64 csum_good_cnt; u64 csum_err; u64 alloc_rx_buff_failed; + u64 rsc_count; + u64 rsc_flush; }; /* iterator for handling rings in ring container */ @@ -1056,6 +1072,8 @@ struct wx_ring { }; u8 __iomem *tail; dma_addr_t dma; /* phys. address of descriptor ring */ + dma_addr_t headwb_dma; + u32 *headwb_mem; unsigned int size; /* length in bytes */ u16 count; /* amount of descriptors */ @@ -1069,6 +1087,7 @@ struct wx_ring { */ u16 next_to_use; u16 next_to_clean; + u16 rx_buf_len; union { u16 next_to_alloc; struct { @@ -1225,13 +1244,16 @@ enum wx_pf_flags { WX_FLAG_FDIR_HASH, WX_FLAG_FDIR_PERFECT, WX_FLAG_RSC_CAPABLE, + WX_FLAG_RSC_ENABLED, WX_FLAG_RX_HWTSTAMP_ENABLED, WX_FLAG_RX_HWTSTAMP_IN_REGISTER, WX_FLAG_PTP_PPS_ENABLED, WX_FLAG_NEED_LINK_CONFIG, - WX_FLAG_NEED_SFP_RESET, + WX_FLAG_NEED_MODULE_RESET, WX_FLAG_NEED_UPDATE_LINK, WX_FLAG_NEED_DO_RESET, + WX_FLAG_RX_MERGE_ENABLED, + WX_FLAG_TXHEAD_WB_ENABLED, WX_PF_FLAGS_NBITS /* must be last */ }; @@ -1271,8 +1293,6 @@ struct wx { /* PHY stuff */ bool notify_down; - int adv_speed; - int adv_duplex; unsigned int link; int speed; int duplex; @@ -1340,6 +1360,8 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + u64 rsc_count; + u64 rsc_flush; unsigned int num_vfs; struct vf_data_storage *vfinfo; struct vf_macvlans vf_mvs; @@ -1471,4 +1493,15 @@ static inline int wx_set_state_reset(struct wx *wx) return 0; } +static inline unsigned int wx_rx_pg_order(struct wx_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len == WX_RXBUFFER_3K) + return 1; +#endif + return 0; +} + +#define wx_rx_pg_size(_ring) (PAGE_SIZE << wx_rx_pg_order(_ring)) + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h index 3f16de0fa427..eb6ca3fe4e97 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_vf.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h @@ -74,6 +74,7 @@ #define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f) #define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12) #define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f) +#define WX_VXRXDCTL_DESC_MERGE BIT(19) #define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23) #define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f) #define WX_VXRXDCTL_RSCEN BIT(29) @@ -91,6 +92,9 @@ #define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f) #define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f) #define WX_VXTXDCTL_FLUSH BIT(26) +#define WX_VXTXDCTL_HEAD_WB BIT(27) +#define WX_VXTXD_HEAD_ADDRL(r) (0x3028 + (0x40 * (r))) +#define WX_VXTXD_HEAD_ADDRH(r) (0x302C + (0x40 * (r))) #define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g) #define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c index a87887b9f8ee..aa8be036956c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c @@ -132,6 +132,15 @@ static void wx_configure_tx_ring_vf(struct wx *wx, struct wx_ring *ring) txdctl |= WX_VXTXDCTL_BUFLEN(wx_buf_len(ring->count)); txdctl |= WX_VXTXDCTL_ENABLE; + if (ring->headwb_mem) { + wr32(wx, WX_VXTXD_HEAD_ADDRL(reg_idx), + ring->headwb_dma & DMA_BIT_MASK(32)); + wr32(wx, WX_VXTXD_HEAD_ADDRH(reg_idx), + upper_32_bits(ring->headwb_dma)); + + txdctl |= WX_VXTXDCTL_HEAD_WB; + } + /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); @@ -272,6 +281,9 @@ void wx_configure_rx_ring_vf(struct wx *wx, struct wx_ring *ring) rxdctl |= WX_VXRXDCTL_RSCMAX(0); rxdctl |= WX_VXRXDCTL_RSCEN; + if (test_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags)) + rxdctl |= WX_VXRXDCTL_DESC_MERGE; + wr32(wx, WX_VXRXDCTL(reg_idx), rxdctl); /* pf/vf reuse */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c index dc87ccad9652..62d7f47d4f8d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c @@ -17,10 +17,15 @@ void txgbe_gpio_init_aml(struct wx *wx) { - u32 status; + u32 status, mod_rst; + + if (wx->mac.type == wx_mac_aml40) + mod_rst = TXGBE_GPIOBIT_4; + else + mod_rst = TXGBE_GPIOBIT_2; - wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); - wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3); + wr32(wx, WX_GPIO_INTTYPE_LEVEL, mod_rst); + wr32(wx, WX_GPIO_INTEN, mod_rst); status = rd32(wx, WX_GPIO_INTSTATUS); for (int i = 0; i < 6; i++) { @@ -33,20 +38,20 @@ irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data) { struct txgbe *txgbe = data; struct wx *wx = txgbe->wx; - u32 status; + u32 status, mod_rst; + + if (wx->mac.type == wx_mac_aml40) + mod_rst = TXGBE_GPIOBIT_4; + else + mod_rst = TXGBE_GPIOBIT_2; wr32(wx, WX_GPIO_INTMASK, 0xFF); status = rd32(wx, WX_GPIO_INTSTATUS); - if (status & TXGBE_GPIOBIT_2) { - set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); - wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2); + if (status & mod_rst) { + set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags); + wr32(wx, WX_GPIO_EOI, mod_rst); wx_service_event_schedule(wx); } - if (status & TXGBE_GPIOBIT_3) { - set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); - wx_service_event_schedule(wx); - wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3); - } wr32(wx, WX_GPIO_INTMASK, 0); return IRQ_HANDLED; @@ -56,7 +61,7 @@ int txgbe_test_hostif(struct wx *wx) { struct txgbe_hic_ephy_getlink buffer; - if (wx->mac.type != wx_mac_aml) + if (wx->mac.type == wx_mac_sp) return 0; buffer.hdr.cmd = FW_PHY_GET_LINK_CMD; @@ -68,15 +73,49 @@ int txgbe_test_hostif(struct wx *wx) WX_HI_COMMAND_TIMEOUT, true); } -static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer) +int txgbe_read_eeprom_hostif(struct wx *wx, + struct txgbe_hic_i2c_read *buffer, + u32 length, u8 *data) { - buffer->hdr.cmd = FW_READ_SFP_INFO_CMD; + u32 dword_len, offset, value, i; + int err; + + buffer->hdr.cmd = FW_READ_EEPROM_CMD; buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) - sizeof(struct wx_hic_hdr); buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + err = wx_host_interface_command(wx, (u32 *)buffer, + sizeof(struct txgbe_hic_i2c_read), + WX_HI_COMMAND_TIMEOUT, false); + if (err != 0) + return err; + + /* buffer length offset to read return data */ + offset = sizeof(struct txgbe_hic_i2c_read) >> 2; + dword_len = round_up(length, 4) >> 2; + + for (i = 0; i < dword_len; i++) { + value = rd32a(wx, WX_FW2SW_MBOX, i + offset); + le32_to_cpus(&value); + + memcpy(data, &value, 4); + data += 4; + } + + return 0; +} + +static int txgbe_identify_module_hostif(struct wx *wx, + struct txgbe_hic_get_module_info *buffer) +{ + buffer->hdr.cmd = FW_GET_MODULE_INFO_CMD; + buffer->hdr.buf_len = sizeof(struct txgbe_hic_get_module_info) - + sizeof(struct wx_hic_hdr); + buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + return wx_host_interface_command(wx, (u32 *)buffer, - sizeof(struct txgbe_hic_i2c_read), + sizeof(struct txgbe_hic_get_module_info), WX_HI_COMMAND_TIMEOUT, true); } @@ -90,12 +129,18 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; switch (speed) { + case SPEED_40000: + buffer.speed = TXGBE_LINK_SPEED_40GB_FULL; + break; case SPEED_25000: buffer.speed = TXGBE_LINK_SPEED_25GB_FULL; break; case SPEED_10000: buffer.speed = TXGBE_LINK_SPEED_10GB_FULL; break; + default: + buffer.speed = TXGBE_LINK_SPEED_UNKNOWN; + break; } buffer.fec_mode = TXGBE_PHY_FEC_AUTO; @@ -106,28 +151,33 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int WX_HI_COMMAND_TIMEOUT, true); } -static void txgbe_get_link_capabilities(struct wx *wx) +static void txgbe_get_link_capabilities(struct wx *wx, int *speed, + int *autoneg, int *duplex) { struct txgbe *txgbe = wx->priv; - if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces)) - wx->adv_speed = SPEED_25000; - else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces)) - wx->adv_speed = SPEED_10000; + if (test_bit(PHY_INTERFACE_MODE_XLGMII, txgbe->link_interfaces)) + *speed = SPEED_40000; + else if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->link_interfaces)) + *speed = SPEED_25000; + else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->link_interfaces)) + *speed = SPEED_10000; else - wx->adv_speed = SPEED_UNKNOWN; + *speed = SPEED_UNKNOWN; - wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ? - DUPLEX_HALF : DUPLEX_FULL; + *autoneg = phylink_test(txgbe->advertising, Autoneg); + *duplex = *speed == SPEED_UNKNOWN ? DUPLEX_HALF : DUPLEX_FULL; } -static void txgbe_get_phy_link(struct wx *wx, int *speed) +static void txgbe_get_mac_link(struct wx *wx, int *speed) { u32 status; status = rd32(wx, TXGBE_CFG_PORT_ST); if (!(status & TXGBE_CFG_PORT_ST_LINK_UP)) *speed = SPEED_UNKNOWN; + else if (status & TXGBE_CFG_PORT_ST_LINK_AML_40G) + *speed = SPEED_40000; else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G) *speed = SPEED_25000; else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G) @@ -138,23 +188,11 @@ static void txgbe_get_phy_link(struct wx *wx, int *speed) int txgbe_set_phy_link(struct wx *wx) { - int speed, err; - u32 gpio; + int speed, autoneg, duplex, err; - /* Check RX signal */ - gpio = rd32(wx, WX_GPIO_EXT); - if (gpio & TXGBE_GPIOBIT_3) - return -ENODEV; + txgbe_get_link_capabilities(wx, &speed, &autoneg, &duplex); - txgbe_get_link_capabilities(wx); - if (wx->adv_speed == SPEED_UNKNOWN) - return -ENODEV; - - txgbe_get_phy_link(wx, &speed); - if (speed == wx->adv_speed) - return 0; - - err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex); + err = txgbe_set_phy_link_hostif(wx, speed, autoneg, duplex); if (err) { wx_err(wx, "Failed to setup link\n"); return err; @@ -163,40 +201,128 @@ int txgbe_set_phy_link(struct wx *wx) return 0; } -static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id) +static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id) { __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; DECLARE_PHY_INTERFACE_MASK(interfaces); struct txgbe *txgbe = wx->priv; - if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE | - TXGBE_SFF_25GBASEER_CAPABLE | - TXGBE_SFF_25GBASELR_CAPABLE)) { - phylink_set(modes, 25000baseSR_Full); + if (id->cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) { + txgbe->link_port = PORT_DA; + phylink_set(modes, Autoneg); + if (id->com_25g_code == TXGBE_SFF_25GBASECR_91FEC || + id->com_25g_code == TXGBE_SFF_25GBASECR_74FEC || + id->com_25g_code == TXGBE_SFF_25GBASECR_NOFEC) { + phylink_set(modes, 25000baseCR_Full); + phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } else { + phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + } else if (id->cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) { + txgbe->link_port = PORT_DA; + phylink_set(modes, Autoneg); + phylink_set(modes, 25000baseCR_Full); __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + } else { + if (id->com_25g_code == TXGBE_SFF_25GBASESR_CAPABLE || + id->com_25g_code == TXGBE_SFF_25GBASEER_CAPABLE || + id->com_25g_code == TXGBE_SFF_25GBASELR_CAPABLE) { + txgbe->link_port = PORT_FIBRE; + phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) { + txgbe->link_port = PORT_FIBRE; + phylink_set(modes, 10000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) { + txgbe->link_port = PORT_FIBRE; + phylink_set(modes, 10000baseLR_Full); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } } - if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) { - phylink_set(modes, 10000baseSR_Full); - __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + + if (phy_interface_empty(interfaces)) { + wx_err(wx, "unsupported SFP module\n"); + return -EINVAL; } - if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) { - phylink_set(modes, 10000baseLR_Full); + + phylink_set(modes, Pause); + phylink_set(modes, Asym_Pause); + phylink_set(modes, FIBRE); + + if (!linkmode_equal(txgbe->link_support, modes)) { + linkmode_copy(txgbe->link_support, modes); + phy_interface_and(txgbe->link_interfaces, + wx->phylink_config.supported_interfaces, + interfaces); + linkmode_copy(txgbe->advertising, modes); + + set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + } + + return 0; +} + +static int txgbe_qsfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, }; + DECLARE_PHY_INTERFACE_MASK(interfaces); + struct txgbe *txgbe = wx->priv; + + if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) { + txgbe->link_port = PORT_DA; + phylink_set(modes, Autoneg); + phylink_set(modes, 40000baseCR4_Full); + phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces); __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); } + if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) { + txgbe->link_port = PORT_FIBRE; + phylink_set(modes, 40000baseSR4_Full); + __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces); + } + if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) { + txgbe->link_port = PORT_FIBRE; + phylink_set(modes, 40000baseLR4_Full); + __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces); + } + if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) { + txgbe->link_port = PORT_DA; + phylink_set(modes, Autoneg); + phylink_set(modes, 40000baseCR4_Full); + __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces); + } + if (id->transceiver_type & TXGBE_SFF_ETHERNET_RSRVD) { + if (id->sff_opt1 & TXGBE_SFF_ETHERNET_100G_CR4) { + txgbe->link_port = PORT_DA; + phylink_set(modes, Autoneg); + phylink_set(modes, 40000baseCR4_Full); + phylink_set(modes, 25000baseCR_Full); + phylink_set(modes, 10000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces); + } + } if (phy_interface_empty(interfaces)) { - wx_err(wx, "unsupported SFP module\n"); + wx_err(wx, "unsupported QSFP module\n"); return -EINVAL; } phylink_set(modes, Pause); phylink_set(modes, Asym_Pause); phylink_set(modes, FIBRE); - txgbe->link_port = PORT_FIBRE; - if (!linkmode_equal(txgbe->sfp_support, modes)) { - linkmode_copy(txgbe->sfp_support, modes); - phy_interface_and(txgbe->sfp_interfaces, + if (!linkmode_equal(txgbe->link_support, modes)) { + linkmode_copy(txgbe->link_support, modes); + phy_interface_and(txgbe->link_interfaces, wx->phylink_config.supported_interfaces, interfaces); linkmode_copy(txgbe->advertising, modes); @@ -207,47 +333,53 @@ static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id) return 0; } -int txgbe_identify_sfp(struct wx *wx) +int txgbe_identify_module(struct wx *wx) { - struct txgbe_hic_i2c_read buffer; - struct txgbe_sfp_id *id; + struct txgbe_hic_get_module_info buffer; + struct txgbe_sff_id *id; int err = 0; + u32 mod_abs; u32 gpio; + if (wx->mac.type == wx_mac_aml40) + mod_abs = TXGBE_GPIOBIT_4; + else + mod_abs = TXGBE_GPIOBIT_2; + gpio = rd32(wx, WX_GPIO_EXT); - if (gpio & TXGBE_GPIOBIT_2) + if (gpio & mod_abs) return -ENODEV; - err = txgbe_identify_sfp_hostif(wx, &buffer); + err = txgbe_identify_module_hostif(wx, &buffer); if (err) { - wx_err(wx, "Failed to identify SFP module\n"); + wx_err(wx, "Failed to identify module\n"); return err; } id = &buffer.id; - if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) { - wx_err(wx, "Invalid SFP module\n"); + if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP && + id->identifier != TXGBE_SFF_IDENTIFIER_QSFP && + id->identifier != TXGBE_SFF_IDENTIFIER_QSFP_PLUS && + id->identifier != TXGBE_SFF_IDENTIFIER_QSFP28) { + wx_err(wx, "Invalid module\n"); return -ENODEV; } - err = txgbe_sfp_to_linkmodes(wx, id); - if (err) - return err; - - if (gpio & TXGBE_GPIOBIT_3) - set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags); + if (id->transceiver_type == 0xFF) + return txgbe_sfp_to_linkmodes(wx, id); - return 0; + return txgbe_qsfp_to_linkmodes(wx, id); } void txgbe_setup_link(struct wx *wx) { struct txgbe *txgbe = wx->priv; - phy_interface_zero(txgbe->sfp_interfaces); - linkmode_zero(txgbe->sfp_support); + phy_interface_zero(txgbe->link_interfaces); + linkmode_zero(txgbe->link_support); - txgbe_identify_sfp(wx); + set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags); + wx_service_event_schedule(wx); } static void txgbe_get_link_state(struct phylink_config *config, @@ -256,7 +388,7 @@ static void txgbe_get_link_state(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); int speed; - txgbe_get_phy_link(wx, &speed); + txgbe_get_mac_link(wx, &speed); state->link = speed != SPEED_UNKNOWN; state->speed = speed; state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN; @@ -300,6 +432,9 @@ static void txgbe_mac_link_up_aml(struct phylink_config *config, txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; switch (speed) { + case SPEED_40000: + txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G; + break; case SPEED_25000: txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; break; @@ -364,7 +499,18 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe) MAC_SYM_PAUSE | MAC_ASYM_PAUSE; config->get_fixed_state = txgbe_get_link_state; - phy_mode = PHY_INTERFACE_MODE_25GBASER; + if (wx->mac.type == wx_mac_aml40) { + config->mac_capabilities |= MAC_40000FD; + phy_mode = PHY_INTERFACE_MODE_XLGMII; + __set_bit(PHY_INTERFACE_MODE_XLGMII, config->supported_interfaces); + state.speed = SPEED_40000; + state.duplex = DUPLEX_FULL; + } else { + phy_mode = PHY_INTERFACE_MODE_25GBASER; + state.speed = SPEED_25000; + state.duplex = DUPLEX_FULL; + } + __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces); __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces); @@ -372,8 +518,6 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe) if (IS_ERR(phylink)) return PTR_ERR(phylink); - state.speed = SPEED_25000; - state.duplex = DUPLEX_FULL; err = phylink_set_fixed_link(phylink, &state); if (err) { wx_err(wx, "Failed to set fixed link\n"); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h index 25d4971ca0d9..4f6df0ee860b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h @@ -7,8 +7,11 @@ void txgbe_gpio_init_aml(struct wx *wx); irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data); int txgbe_test_hostif(struct wx *wx); +int txgbe_read_eeprom_hostif(struct wx *wx, + struct txgbe_hic_i2c_read *buffer, + u32 length, u8 *data); int txgbe_set_phy_link(struct wx *wx); -int txgbe_identify_sfp(struct wx *wx); +int txgbe_identify_module(struct wx *wx); void txgbe_setup_link(struct wx *wx); int txgbe_phylink_init_aml(struct txgbe *txgbe); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index e285b088c7b2..f3cb00109529 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -10,6 +10,7 @@ #include "../libwx/wx_lib.h" #include "txgbe_type.h" #include "txgbe_fdir.h" +#include "txgbe_aml.h" #include "txgbe_ethtool.h" int txgbe_get_link_ksettings(struct net_device *netdev, @@ -19,9 +20,6 @@ int txgbe_get_link_ksettings(struct net_device *netdev, struct txgbe *txgbe = wx->priv; int err; - if (wx->mac.type == wx_mac_aml40) - return -EOPNOTSUPP; - err = wx_get_link_ksettings(netdev, cmd); if (err) return err; @@ -30,8 +28,9 @@ int txgbe_get_link_ksettings(struct net_device *netdev, return 0; cmd->base.port = txgbe->link_port; - cmd->base.autoneg = AUTONEG_DISABLE; - linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support); + cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + linkmode_copy(cmd->link_modes.supported, txgbe->link_support); linkmode_copy(cmd->link_modes.advertising, txgbe->advertising); return 0; @@ -536,6 +535,34 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static int +txgbe_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + struct txgbe_hic_i2c_read buffer; + int err; + + if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) + return -EOPNOTSUPP; + + buffer.length = cpu_to_be32(page_data->length); + buffer.offset = cpu_to_be32(page_data->offset); + buffer.page = page_data->page; + buffer.bank = page_data->bank; + buffer.i2c_address = page_data->i2c_address; + + err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length, + page_data->data); + if (err) { + wx_err(wx, "Failed to read module EEPROM\n"); + return err; + } + + return page_data->length; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | @@ -570,6 +597,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_msglevel = wx_set_msglevel, .get_ts_info = wx_get_ts_info, .get_ts_stats = wx_get_ptp_stats, + .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 3885283681ec..aa14958d439a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -23,7 +23,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues) { u32 misc_ien = TXGBE_PX_MISC_IEN_MASK; - if (wx->mac.type == wx_mac_aml) { + if (wx->mac.type != wx_mac_sp) { misc_ien |= TXGBE_PX_MISC_GPIO; txgbe_gpio_init_aml(wx); } @@ -201,10 +201,7 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe) void txgbe_free_misc_irq(struct txgbe *txgbe) { - if (txgbe->wx->mac.type == wx_mac_aml40) - return; - - if (txgbe->wx->mac.type == wx_mac_aml) + if (txgbe->wx->mac.type != wx_mac_sp) free_irq(txgbe->gpio_irq, txgbe); free_irq(txgbe->link_irq, txgbe); @@ -219,9 +216,6 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int hwirq, err; - if (wx->mac.type == wx_mac_aml40) - goto skip_sp_irq; - txgbe->misc.nirqs = TXGBE_IRQ_MAX; txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0, &txgbe_misc_irq_domain_ops, txgbe); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index c4c4d70d8466..0de051450a82 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -89,21 +89,21 @@ static int txgbe_enumerate_functions(struct wx *wx) return physfns; } -static void txgbe_sfp_detection_subtask(struct wx *wx) +static void txgbe_module_detection_subtask(struct wx *wx) { int err; - if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags)) + if (!test_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags)) return; - /* wait for SFP module ready */ + /* wait for SFF module ready */ msleep(200); - err = txgbe_identify_sfp(wx); + err = txgbe_identify_module(wx); if (err) return; - clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags); + clear_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags); } static void txgbe_link_config_subtask(struct wx *wx) @@ -128,7 +128,7 @@ static void txgbe_service_task(struct work_struct *work) { struct wx *wx = container_of(work, struct wx, service_task); - txgbe_sfp_detection_subtask(wx); + txgbe_module_detection_subtask(wx); txgbe_link_config_subtask(wx); wx_service_event_complete(wx); @@ -144,7 +144,6 @@ static void txgbe_init_service(struct wx *wx) static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; - u32 reg; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -155,12 +154,8 @@ static void txgbe_up_complete(struct wx *wx) switch (wx->mac.type) { case wx_mac_aml40: - reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); - reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; - reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G; - wr32(wx, WX_MAC_TX_CFG, reg); - txgbe_enable_sec_tx_path(wx); - netif_carrier_on(wx->netdev); + txgbe_setup_link(wx); + phylink_start(wx->phylink); break; case wx_mac_aml: /* Enable TX laser */ @@ -276,7 +271,7 @@ void txgbe_down(struct wx *wx) switch (wx->mac.type) { case wx_mac_aml40: - netif_carrier_off(wx->netdev); + phylink_stop(wx->phylink); break; case wx_mac_aml: phylink_stop(wx->phylink); @@ -398,6 +393,7 @@ static int txgbe_sw_init(struct wx *wx) wx->configure_fdir = txgbe_configure_fdir; set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + set_bit(WX_FLAG_RSC_ENABLED, wx->flags); set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags); /* enable itr by default in dynamic mode */ @@ -423,6 +419,8 @@ static int txgbe_sw_init(struct wx *wx) break; case wx_mac_aml: case wx_mac_aml40: + set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags); + set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags); set_bit(WX_FLAG_SWFW_RING, wx->flags); wx->swfw_index = 0; break; @@ -801,6 +799,8 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->hw_features |= NETIF_F_GRO; netdev->features |= NETIF_F_GRO; + netdev->hw_features |= NETIF_F_LRO; + netdev->features |= NETIF_F_LRO; netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 03f1b9bc604d..8ea7aa07ae4e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -579,7 +579,6 @@ int txgbe_init_phy(struct txgbe *txgbe) switch (wx->mac.type) { case wx_mac_aml40: - return 0; case wx_mac_aml: return txgbe_phylink_init_aml(txgbe); case wx_mac_sp: @@ -653,7 +652,6 @@ void txgbe_remove_phy(struct txgbe *txgbe) { switch (txgbe->wx->mac.type) { case wx_mac_aml40: - return; case wx_mac_aml: phylink_destroy(txgbe->wx->phylink); return; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 41915d7dd372..82433e9cb0e3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -98,6 +98,7 @@ /* Port cfg registers */ #define TXGBE_CFG_PORT_ST 0x14404 #define TXGBE_CFG_PORT_ST_LINK_UP BIT(0) +#define TXGBE_CFG_PORT_ST_LINK_AML_40G BIT(2) #define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3) #define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4) #define TXGBE_CFG_VXLAN 0x14410 @@ -314,10 +315,15 @@ void txgbe_up(struct wx *wx); int txgbe_setup_tc(struct net_device *dev, u8 tc); void txgbe_do_reset(struct net_device *netdev); +#define TXGBE_LINK_SPEED_UNKNOWN 0 #define TXGBE_LINK_SPEED_10GB_FULL 4 #define TXGBE_LINK_SPEED_25GB_FULL 0x10 +#define TXGBE_LINK_SPEED_40GB_FULL 0x20 #define TXGBE_SFF_IDENTIFIER_SFP 0x3 +#define TXGBE_SFF_IDENTIFIER_QSFP 0xC +#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define TXGBE_SFF_IDENTIFIER_QSFP28 0x11 #define TXGBE_SFF_DA_PASSIVE_CABLE 0x4 #define TXGBE_SFF_DA_ACTIVE_CABLE 0x8 #define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4 @@ -330,6 +336,12 @@ void txgbe_do_reset(struct net_device *netdev); #define TXGBE_SFF_25GBASECR_91FEC 0xB #define TXGBE_SFF_25GBASECR_74FEC 0xC #define TXGBE_SFF_25GBASECR_NOFEC 0xD +#define TXGBE_SFF_ETHERNET_RSRVD BIT(7) +#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3) +#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2) +#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1) +#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0) +#define TXGBE_SFF_ETHERNET_100G_CR4 0xB #define TXGBE_PHY_FEC_RS BIT(0) #define TXGBE_PHY_FEC_BASER BIT(1) @@ -340,9 +352,10 @@ void txgbe_do_reset(struct net_device *netdev); #define FW_PHY_GET_LINK_CMD 0xC0 #define FW_PHY_SET_LINK_CMD 0xC1 -#define FW_READ_SFP_INFO_CMD 0xC5 +#define FW_GET_MODULE_INFO_CMD 0xC5 +#define FW_READ_EEPROM_CMD 0xC6 -struct txgbe_sfp_id { +struct txgbe_sff_id { u8 identifier; /* A0H 0x00 */ u8 com_1g_code; /* A0H 0x06 */ u8 com_10g_code; /* A0H 0x03 */ @@ -352,12 +365,14 @@ struct txgbe_sfp_id { u8 vendor_oui0; /* A0H 0x25 */ u8 vendor_oui1; /* A0H 0x26 */ u8 vendor_oui2; /* A0H 0x27 */ - u8 reserved[3]; + u8 transceiver_type; /* A0H 0x83 */ + u8 sff_opt1; /* A0H 0xC0 */ + u8 reserved[5]; }; -struct txgbe_hic_i2c_read { +struct txgbe_hic_get_module_info { struct wx_hic_hdr hdr; - struct txgbe_sfp_id id; + struct txgbe_sff_id id; }; struct txgbe_hic_ephy_setlink { @@ -380,6 +395,16 @@ struct txgbe_hic_ephy_getlink { u8 resv[6]; }; +struct txgbe_hic_i2c_read { + struct wx_hic_hdr hdr; + __be32 offset; + __be32 length; + u8 page; + u8 bank; + u8 i2c_address; + u8 resv; +}; + #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ @@ -448,8 +473,8 @@ struct txgbe { int fdir_filter_count; spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ - DECLARE_PHY_INTERFACE_MASK(sfp_interfaces); - __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support); + DECLARE_PHY_INTERFACE_MASK(link_interfaces); + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_support); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u8 link_port; }; diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c index 72663e3c4205..37e4ec487afd 100644 --- a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c +++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c @@ -157,6 +157,18 @@ static int txgbevf_sw_init(struct wx *wx) wx->set_num_queues = txgbevf_set_num_queues; + switch (wx->mac.type) { + case wx_mac_sp: + break; + case wx_mac_aml: + case wx_mac_aml40: + set_bit(WX_FLAG_RX_MERGE_ENABLED, wx->flags); + set_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags); + break; + default: + break; + } + return 0; err_reset_hw: kfree(wx->vfinfo); |
