summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/airoha
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/airoha')
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c432
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h72
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c93
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c261
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c3
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h115
6 files changed, 623 insertions, 353 deletions
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 833dd911980b..75893c90a0a1 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -137,11 +137,11 @@ static void airoha_fe_maccr_init(struct airoha_eth *eth)
for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
- GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
- GDM_DROP_CRC_ERR);
+ GDM_TCP_CKSUM_MASK | GDM_UDP_CKSUM_MASK |
+ GDM_IP4_CKSUM_MASK | GDM_DROP_CRC_ERR_MASK);
- airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
- FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
+ airoha_fe_rmw(eth, REG_CDM_VLAN_CTRL(1), CDM_VLAN_MASK,
+ FIELD_PREP(CDM_VLAN_MASK, 0x8100));
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
}
@@ -297,8 +297,11 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
int q;
all_rsv = airoha_fe_get_pse_all_rsv(eth);
- /* hw misses PPE2 oq rsv */
- all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* hw misses PPE2 oq rsv */
+ all_rsv += PSE_RSV_PAGES *
+ pse_port_num_queues[FE_PSE_PORT_PPE2];
+ }
airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
@@ -335,13 +338,17 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
PSE_QUEUE_RSV_PAGES);
- /* PPE2 */
- for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
- if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
- airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
- PSE_QUEUE_RSV_PAGES);
- else
- airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2,
+ q, 0);
+ }
}
/* GMD4 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
@@ -396,46 +403,46 @@ static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
{
/* CDM1_CRSN_QSEL */
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
CDM_CRSN_QSEL_Q6));
- airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
- CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
- FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(1, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
CDM_CRSN_QSEL_Q1));
/* CDM2_CRSN_QSEL */
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_08 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_08),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_21 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_21),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_22 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_22),
CDM_CRSN_QSEL_Q1));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_24 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_24),
CDM_CRSN_QSEL_Q6));
- airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
- CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
- FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ airoha_fe_rmw(eth, REG_CDM_CRSN_QSEL(2, CRSN_25 >> 2),
+ CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM_CRSN_QSEL_REASON_MASK(CRSN_25),
CDM_CRSN_QSEL_Q1));
}
@@ -455,18 +462,18 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_wr(eth, REG_FE_PCE_CFG,
PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
/* set vip queue selection to ring 1 */
- airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
- FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
- airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
- FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(1), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
+ FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
/* set GDM4 source interface offset to 8 */
- airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
- GDM4_SPORT_OFF2_MASK |
- GDM4_SPORT_OFF1_MASK |
- GDM4_SPORT_OFF0_MASK,
- FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
- FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
- FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
+ airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
+ GDM_SPORT_OFF2_MASK |
+ GDM_SPORT_OFF1_MASK |
+ GDM_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
/* set PSE Page as 128B */
airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
@@ -492,8 +499,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_set(eth, REG_GDM_MISC_CFG,
GDM2_RDM_ACK_WAIT_PREF_MASK |
GDM2_CHN_VLD_MODE_MASK);
- airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK,
- FIELD_PREP(CDM2_OAM_QSEL_MASK, 15));
+ airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_OAM_QSEL_MASK,
+ FIELD_PREP(CDM_OAM_QSEL_MASK, 15));
/* init fragment and assemble Force Port */
/* NPU Core-3, NPU Bridge Channel-3 */
@@ -507,8 +514,8 @@ static int airoha_fe_init(struct airoha_eth *eth)
FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
- airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
- airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(3), GDM_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(4), GDM_PAD_EN_MASK);
airoha_fe_crsn_qsel_init(eth);
@@ -516,7 +523,7 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
/* default aging mode for mbi unlock issue */
- airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
+ airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
@@ -524,25 +531,6 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* disable IFC by default */
airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
- airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
- FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
- FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
- airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
- FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
- FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
-
/* enable 1:N vlan action, init vlan table */
airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
@@ -904,19 +892,13 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
DMA_TO_DEVICE);
- memset(e, 0, sizeof(*e));
+ e->dma_addr = 0;
+ list_add_tail(&e->list, &q->tx_list);
+
WRITE_ONCE(desc->msg0, 0);
WRITE_ONCE(desc->msg1, 0);
q->queued--;
- /* completion ring can report out-of-order indexes if hw QoS
- * is enabled and packets with different priority are queued
- * to same DMA ring. Take into account possible out-of-order
- * reports incrementing DMA ring tail pointer
- */
- while (q->tail != q->head && !q->entry[q->tail].dma_addr)
- q->tail = (q->tail + 1) % q->ndesc;
-
if (skb) {
u16 queue = skb_get_queue_mapping(skb);
struct netdev_queue *txq;
@@ -961,6 +943,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
q->ndesc = size;
q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
+ INIT_LIST_HEAD(&q->tx_list);
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
GFP_KERNEL);
@@ -973,9 +956,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
return -ENOMEM;
for (i = 0; i < q->ndesc; i++) {
- u32 val;
+ u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
- val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+ list_add_tail(&q->entry[i].list, &q->tx_list);
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
@@ -985,9 +968,9 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, 0));
airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
- FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, 0));
return 0;
}
@@ -1043,17 +1026,21 @@ static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
struct airoha_eth *eth = q->qdma->eth;
+ int i;
spin_lock_bh(&q->lock);
- while (q->queued) {
- struct airoha_queue_entry *e = &q->entry[q->tail];
+ for (i = 0; i < q->ndesc; i++) {
+ struct airoha_queue_entry *e = &q->entry[i];
+
+ if (!e->dma_addr)
+ continue;
dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
DMA_TO_DEVICE);
dev_kfree_skb_any(e->skb);
+ e->dma_addr = 0;
e->skb = NULL;
-
- q->tail = (q->tail + 1) % q->ndesc;
+ list_add_tail(&e->list, &q->tx_list);
q->queued--;
}
spin_unlock_bh(&q->lock);
@@ -1387,8 +1374,7 @@ static int airoha_hw_init(struct platform_device *pdev,
int err, i;
/* disable xsi */
- err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
- eth->xsi_rsts);
+ err = reset_control_bulk_assert(eth->soc->num_xsi_rsts, eth->xsi_rsts);
if (err)
return err;
@@ -1695,19 +1681,23 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
-static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+static int airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
{
- u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
struct airoha_eth *eth = port->qdma->eth;
- u32 chan = port->id == 3 ? 4 : 0;
+ u32 val, pse_port, chan, nbq;
+ int src_port;
/* Forward the traffic to the proper GDM port */
+ pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : FE_PSE_PORT_GDM4;
airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
- airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC_MASK);
/* Enable GDM2 loopback */
airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+
+ chan = port->id == AIROHA_GDM3_IDX ? airoha_is_7581(eth) ? 4 : 3 : 0;
airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
FIELD_PREP(LPBK_CHAN_MASK, chan) |
@@ -1722,36 +1712,36 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
- if (port->id == 3) {
- /* FIXME: handle XSI_PCE1_PORT */
- airoha_fe_rmw(eth, REG_FE_WAN_PORT,
- WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
- FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
- airoha_fe_rmw(eth,
- REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
- SP_CPORT_PCIE0_MASK,
- FIELD_PREP(SP_CPORT_PCIE0_MASK,
- FE_PSE_PORT_CDM2));
- } else {
- /* FIXME: handle XSI_USB_PORT */
+ /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */
+ nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0;
+ src_port = eth->soc->ops.get_src_port_id(port, nbq);
+ if (src_port < 0)
+ return src_port;
+
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, src_port));
+ val = src_port & SP_CPORT_DFT_MASK;
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(src_port >> fls(SP_CPORT_DFT_MASK)),
+ SP_CPORT_MASK(val),
+ FE_PSE_PORT_CDM2 << __ffs(SP_CPORT_MASK(val)));
+
+ if (port->id != AIROHA_GDM3_IDX && airoha_is_7581(eth))
airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
FC_ID_OF_SRC_PORT24_MASK,
FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
- airoha_fe_rmw(eth, REG_FE_WAN_PORT,
- WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
- FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
- airoha_fe_rmw(eth,
- REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
- SP_CPORT_ETH_MASK,
- FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
- }
+
+ return 0;
}
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->qdma->eth;
- u32 pse_port;
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ u32 pse_port, fe_cpu_port;
+ u8 ppe_id;
airoha_set_macaddr(port, dev->dev_addr);
@@ -1759,18 +1749,37 @@ static int airoha_dev_init(struct net_device *dev)
case 3:
case 4:
/* If GDM2 is active we can't enable loopback */
- if (!eth->ports[1])
- airhoha_set_gdm2_loopback(port);
+ if (!eth->ports[1]) {
+ int err;
+
+ err = airhoha_set_gdm2_loopback(port);
+ if (err)
+ return err;
+ }
fallthrough;
case 2:
- pse_port = FE_PSE_PORT_PPE2;
- break;
- default:
+ if (airoha_ppe_is_enabled(eth, 1)) {
+ /* For PPE2 always use secondary cpu port. */
+ fe_cpu_port = FE_PSE_PORT_CDM2;
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ }
+ fallthrough;
+ default: {
+ u8 qdma_id = qdma - &eth->qdma[0];
+
+ /* For PPE1 select cpu port according to the running QDMA. */
+ fe_cpu_port = qdma_id ? FE_PSE_PORT_CDM2 : FE_PSE_PORT_CDM1;
pse_port = FE_PSE_PORT_PPE1;
break;
}
+ }
airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+ ppe_id = pse_port == FE_PSE_PORT_PPE2 ? 1 : 0;
+ airoha_fe_rmw(eth, REG_PPE_DFT_CPORT0(ppe_id),
+ DFT_CPORT_MASK(port->id),
+ fe_cpu_port << __ffs(DFT_CPORT_MASK(port->id)));
return 0;
}
@@ -1873,14 +1882,32 @@ static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
#endif
}
+static int airoha_get_fe_port(struct airoha_gdm_port *port)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ struct airoha_eth *eth = qdma->eth;
+
+ switch (eth->soc->version) {
+ case 0x7583:
+ return port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
+ : port->id;
+ case 0x7581:
+ default:
+ return port->id == AIROHA_GDM4_IDX ? FE_PSE_PORT_GDM4
+ : port->id;
+ }
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
u32 nr_frags, tag, msg0, msg1, len;
+ struct airoha_queue_entry *e;
struct netdev_queue *txq;
struct airoha_queue *q;
+ LIST_HEAD(tx_list);
void *data;
int i, qid;
u16 index;
@@ -1913,7 +1940,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
}
}
- fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ fport = airoha_get_fe_port(port);
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
@@ -1926,7 +1953,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
txq = netdev_get_tx_queue(dev, qid);
nr_frags = 1 + skb_shinfo(skb)->nr_frags;
- if (q->queued + nr_frags > q->ndesc) {
+ if (q->queued + nr_frags >= q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
spin_unlock_bh(&q->lock);
@@ -1935,11 +1962,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
data = skb->data;
- index = q->head;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
- struct airoha_queue_entry *e = &q->entry[index];
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -1949,7 +1978,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
goto error_unmap;
- index = (index + 1) % q->ndesc;
+ list_move_tail(&e->list, &tx_list);
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
if (i < nr_frags - 1)
@@ -1962,15 +1998,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
- e->skb = i ? NULL : skb;
- e->dma_addr = addr;
- e->dma_len = len;
-
data = skb_frag_address(frag);
len = skb_frag_size(frag);
}
-
- q->head = index;
q->queued += i;
skb_tx_timestamp(skb);
@@ -1979,7 +2009,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
TX_RING_CPU_IDX_MASK,
- FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
if (q->ndesc - q->queued < q->free_thr)
netif_tx_stop_queue(txq);
@@ -1989,10 +2019,13 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
error_unmap:
- for (i--; i >= 0; i--) {
- index = (q->head + i) % q->ndesc;
- dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
- q->entry[index].dma_len, DMA_TO_DEVICE);
+ while (!list_empty(&tx_list)) {
+ e = list_first_entry(&tx_list, struct airoha_queue_entry,
+ list);
+ dma_unmap_single(dev->dev.parent, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ e->dma_addr = 0;
+ list_move_tail(&e->list, &q->tx_list);
}
spin_unlock_bh(&q->lock);
@@ -2022,8 +2055,12 @@ static void airoha_ethtool_get_mac_stats(struct net_device *dev,
airoha_update_hw_stats(port);
do {
start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->FramesTransmittedOK = port->stats.tx_ok_pkts;
+ stats->OctetsTransmittedOK = port->stats.tx_ok_bytes;
stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->FramesReceivedOK = port->stats.rx_ok_pkts;
+ stats->OctetsReceivedOK = port->stats.rx_ok_bytes;
stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
@@ -2766,6 +2803,7 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_drvinfo = airoha_ethtool_get_drvinfo,
.get_eth_mac_stats = airoha_ethtool_get_mac_stats,
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
+ .get_link = ethtool_op_get_link,
};
static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
@@ -2903,6 +2941,7 @@ free_metadata_dst:
static int airoha_probe(struct platform_device *pdev)
{
+ struct reset_control_bulk_data *xsi_rsts;
struct device_node *np;
struct airoha_eth *eth;
int i, err;
@@ -2911,6 +2950,10 @@ static int airoha_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->soc = of_device_get_match_data(&pdev->dev);
+ if (!eth->soc)
+ return -EINVAL;
+
eth->dev = &pdev->dev;
err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
@@ -2935,13 +2978,18 @@ static int airoha_probe(struct platform_device *pdev)
return err;
}
- eth->xsi_rsts[0].id = "xsi-mac";
- eth->xsi_rsts[1].id = "hsi0-mac";
- eth->xsi_rsts[2].id = "hsi1-mac";
- eth->xsi_rsts[3].id = "hsi-mac";
- eth->xsi_rsts[4].id = "xfp-mac";
+ xsi_rsts = devm_kcalloc(eth->dev,
+ eth->soc->num_xsi_rsts, sizeof(*xsi_rsts),
+ GFP_KERNEL);
+ if (!xsi_rsts)
+ return -ENOMEM;
+
+ eth->xsi_rsts = xsi_rsts;
+ for (i = 0; i < eth->soc->num_xsi_rsts; i++)
+ eth->xsi_rsts[i].id = eth->soc->xsi_rsts_names[i];
+
err = devm_reset_control_bulk_get_exclusive(eth->dev,
- ARRAY_SIZE(eth->xsi_rsts),
+ eth->soc->num_xsi_rsts,
eth->xsi_rsts);
if (err) {
dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
@@ -3029,8 +3077,90 @@ static void airoha_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
}
+static const char * const en7581_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "hsi-mac",
+ "xfp-mac",
+};
+
+static int airoha_en7581_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7581 SoC supports PCIe serdes on GDM3 port */
+ if (nbq == 4)
+ return HSGMII_LAN_7581_PCIE0_SRCPORT;
+ if (nbq == 5)
+ return HSGMII_LAN_7581_PCIE1_SRCPORT;
+ break;
+ case 4:
+ /* 7581 SoC supports eth and usb serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7581_ETH_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7581_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const an7583_xsi_rsts_names[] = {
+ "xsi-mac",
+ "hsi0-mac",
+ "hsi1-mac",
+ "xfp-mac",
+};
+
+static int airoha_an7583_get_src_port_id(struct airoha_gdm_port *port, int nbq)
+{
+ switch (port->id) {
+ case 3:
+ /* 7583 SoC supports eth serdes on GDM3 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_ETH_SRCPORT;
+ break;
+ case 4:
+ /* 7583 SoC supports PCIe and USB serdes on GDM4 port */
+ if (!nbq)
+ return HSGMII_LAN_7583_PCIE_SRCPORT;
+ if (nbq == 1)
+ return HSGMII_LAN_7583_USB_SRCPORT;
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct airoha_eth_soc_data en7581_soc_data = {
+ .version = 0x7581,
+ .xsi_rsts_names = en7581_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(en7581_xsi_rsts_names),
+ .num_ppe = 2,
+ .ops = {
+ .get_src_port_id = airoha_en7581_get_src_port_id,
+ },
+};
+
+static const struct airoha_eth_soc_data an7583_soc_data = {
+ .version = 0x7583,
+ .xsi_rsts_names = an7583_xsi_rsts_names,
+ .num_xsi_rsts = ARRAY_SIZE(an7583_xsi_rsts_names),
+ .num_ppe = 1,
+ .ops = {
+ .get_src_port_id = airoha_an7583_get_src_port_id,
+ },
+};
+
static const struct of_device_id of_airoha_match[] = {
- { .compatible = "airoha,en7581-eth" },
+ { .compatible = "airoha,en7581-eth", .data = &en7581_soc_data },
+ { .compatible = "airoha,an7583-eth", .data = &an7583_soc_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_airoha_match);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index cd13c1c1224f..fbbc58133364 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -21,7 +21,6 @@
#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 9216
#define AIROHA_MAX_PACKET_SIZE 2048
#define AIROHA_NUM_QOS_CHANNELS 4
@@ -48,20 +47,9 @@
#define QDMA_METER_IDX(_n) ((_n) & 0xff)
#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
-#define PPE_NUM 2
-#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
-#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
-#ifdef CONFIG_NET_AIROHA_FLOW_STATS
-#define PPE1_STATS_NUM_ENTRIES (4 * 1024)
-#else
-#define PPE1_STATS_NUM_ENTRIES 0
-#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
-#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES)
-#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
-#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
+#define PPE_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_STATS_NUM_ENTRIES (4 * 1024)
#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
-#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
-#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
#define PPE_ENTRY_SIZE 80
#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
@@ -79,10 +67,16 @@ enum {
};
enum {
- HSGMII_LAN_PCIE0_SRCPORT = 0x16,
- HSGMII_LAN_PCIE1_SRCPORT,
- HSGMII_LAN_ETH_SRCPORT,
- HSGMII_LAN_USB_SRCPORT,
+ HSGMII_LAN_7581_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_7581_PCIE1_SRCPORT,
+ HSGMII_LAN_7581_ETH_SRCPORT,
+ HSGMII_LAN_7581_USB_SRCPORT,
+};
+
+enum {
+ HSGMII_LAN_7583_ETH_SRCPORT = 0x16,
+ HSGMII_LAN_7583_PCIE_SRCPORT = 0x18,
+ HSGMII_LAN_7583_USB_SRCPORT,
};
enum {
@@ -111,6 +105,13 @@ enum {
CRSN_25 = 0x19,
};
+enum airoha_gdm_index {
+ AIROHA_GDM1_IDX = 1,
+ AIROHA_GDM2_IDX = 2,
+ AIROHA_GDM3_IDX = 3,
+ AIROHA_GDM4_IDX = 4,
+};
+
enum {
FE_PSE_PORT_CDM1,
FE_PSE_PORT_GDM1,
@@ -168,7 +169,10 @@ enum trtcm_param {
struct airoha_queue_entry {
union {
void *buf;
- struct sk_buff *skb;
+ struct {
+ struct list_head list;
+ struct sk_buff *skb;
+ };
};
dma_addr_t dma_addr;
u16 dma_len;
@@ -192,6 +196,8 @@ struct airoha_queue {
struct napi_struct napi;
struct page_pool *page_pool;
struct sk_buff *skb;
+
+ struct list_head tx_list;
};
struct airoha_tx_irq_queue {
@@ -554,7 +560,7 @@ struct airoha_ppe {
struct rhashtable l2_flows;
struct hlist_head *foe_flow;
- u16 foe_check_time[PPE_NUM_ENTRIES];
+ u16 *foe_check_time;
struct airoha_foe_stats *foe_stats;
dma_addr_t foe_stats_dma;
@@ -562,9 +568,21 @@ struct airoha_ppe {
struct dentry *debugfs_dir;
};
+struct airoha_eth_soc_data {
+ u16 version;
+ const char * const *xsi_rsts_names;
+ int num_xsi_rsts;
+ int num_ppe;
+ struct {
+ int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq);
+ } ops;
+};
+
struct airoha_eth {
struct device *dev;
+ const struct airoha_eth_soc_data *soc;
+
unsigned long state;
void __iomem *fe_regs;
@@ -574,7 +592,7 @@ struct airoha_eth {
struct rhashtable flow_table;
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+ struct reset_control_bulk_data *xsi_rsts;
struct net_device *napi_dev;
@@ -617,15 +635,27 @@ static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
return port->id == 1;
}
+static inline bool airoha_is_7581(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7581;
+}
+
+static inline bool airoha_is_7583(struct airoha_eth *eth)
+{
+ return eth->soc->version == 0x7583;
+}
+
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index);
void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
u16 hash, bool rx_wlan);
int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash);
void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 8c883f2b2d36..68b7f9684dc7 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -16,6 +16,8 @@
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_AN7583_FIRMWARE_DATA "airoha/an7583_npu_data.bin"
+#define NPU_AN7583_FIRMWARE_RV32 "airoha/an7583_npu_rv32.bin"
#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
#define NPU_DUMP_SIZE 512
@@ -103,6 +105,16 @@ enum {
QDMA_WAN_PON_XDSL,
};
+struct airoha_npu_fw {
+ const char *name;
+ int max_size;
+};
+
+struct airoha_npu_soc_data {
+ struct airoha_npu_fw fw_rv32;
+ struct airoha_npu_fw fw_data;
+};
+
#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
#define MBOX_MSG_STATIC_BUF BIT(5)
#define MBOX_MSG_STATUS GENMASK(4, 2)
@@ -182,49 +194,53 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
return ret;
}
-static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
- struct resource *res)
+static int airoha_npu_load_firmware(struct device *dev, void __iomem *addr,
+ const struct airoha_npu_fw *fw_info)
{
const struct firmware *fw;
- void __iomem *addr;
int ret;
- ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
+ ret = request_firmware(&fw, fw_info->name, dev);
if (ret)
return ret == -ENOENT ? -EPROBE_DEFER : ret;
- if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
+ if (fw->size > fw_info->max_size) {
dev_err(dev, "%s: fw size too overlimit (%zu)\n",
- NPU_EN7581_FIRMWARE_RV32, fw->size);
+ fw_info->name, fw->size);
ret = -E2BIG;
goto out;
}
- addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto out;
- }
-
memcpy_toio(addr, fw->data, fw->size);
+out:
release_firmware(fw);
- ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
- if (ret)
- return ret == -ENOENT ? -EPROBE_DEFER : ret;
+ return ret;
+}
- if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
- dev_err(dev, "%s: fw size too overlimit (%zu)\n",
- NPU_EN7581_FIRMWARE_DATA, fw->size);
- ret = -E2BIG;
- goto out;
- }
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct resource *res)
+{
+ const struct airoha_npu_soc_data *soc;
+ void __iomem *addr;
+ int ret;
- memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
-out:
- release_firmware(fw);
+ soc = of_device_get_match_data(dev);
+ if (!soc)
+ return -EINVAL;
- return ret;
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ /* Load rv32 npu firmware */
+ ret = airoha_npu_load_firmware(dev, addr, &soc->fw_rv32);
+ if (ret)
+ return ret;
+
+ /* Load data npu firmware */
+ return airoha_npu_load_firmware(dev, base + REG_NPU_LOCAL_SRAM,
+ &soc->fw_data);
}
static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
@@ -597,8 +613,31 @@ void airoha_npu_put(struct airoha_npu *npu)
}
EXPORT_SYMBOL_GPL(airoha_npu_put);
+static const struct airoha_npu_soc_data en7581_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_EN7581_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_EN7581_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
+static const struct airoha_npu_soc_data an7583_npu_soc_data = {
+ .fw_rv32 = {
+ .name = NPU_AN7583_FIRMWARE_RV32,
+ .max_size = NPU_EN7581_FIRMWARE_RV32_MAX_SIZE,
+ },
+ .fw_data = {
+ .name = NPU_AN7583_FIRMWARE_DATA,
+ .max_size = NPU_EN7581_FIRMWARE_DATA_MAX_SIZE,
+ },
+};
+
static const struct of_device_id of_airoha_npu_match[] = {
- { .compatible = "airoha,en7581-npu" },
+ { .compatible = "airoha,en7581-npu", .data = &en7581_npu_soc_data },
+ { .compatible = "airoha,an7583-npu", .data = &an7583_npu_soc_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
@@ -737,6 +776,8 @@ module_platform_driver(airoha_npu_driver);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_AN7583_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 691361b25407..0caabb0c3aa0 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -32,9 +32,50 @@ static const struct rhashtable_params airoha_l2_flow_table_params = {
.automatic_shrinking = true,
};
-static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe)
{
- return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+ if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS))
+ return -EOPNOTSUPP;
+
+ if (airoha_is_7583(ppe->eth))
+ return -EOPNOTSUPP;
+
+ return PPE_STATS_NUM_ENTRIES;
+}
+
+static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe)
+{
+ int num_stats = airoha_ppe_get_num_stats_entries(ppe);
+
+ if (num_stats > 0) {
+ struct airoha_eth *eth = ppe->eth;
+
+ num_stats = num_stats * eth->soc->num_ppe;
+ }
+
+ return num_stats;
+}
+
+static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe)
+{
+ struct airoha_eth *eth = ppe->eth;
+
+ return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe;
+}
+
+u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe)
+{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
+ return sram_num_entries + PPE_DRAM_NUM_ENTRIES;
+}
+
+bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index)
+{
+ if (index >= eth->soc->num_ppe)
+ return false;
+
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK;
}
static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
@@ -46,14 +87,22 @@ static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
{
- u32 sram_tb_size, sram_num_entries, dram_num_entries;
+ u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries;
+ u32 sram_tb_size, dram_num_entries;
struct airoha_eth *eth = ppe->eth;
- int i;
+ int i, sram_num_stats_entries;
- sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+ sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry);
dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
- for (i = 0; i < PPE_NUM; i++) {
+ sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe);
+ if (sram_num_stats_entries > 0)
+ sram_ppe_num_data_entries -= sram_num_stats_entries;
+ sram_ppe_num_data_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries);
+
+ for (i = 0; i < eth->soc->num_ppe; i++) {
int p;
airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
@@ -85,10 +134,16 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK |
PPE_TB_CFG_KEEPALIVE_MASK |
PPE_TB_ENTRY_SIZE_MASK,
FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
- FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) |
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_ppe_num_data_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
@@ -101,35 +156,6 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
FIELD_PREP(FP1_EGRESS_MTU_MASK,
AIROHA_MAX_MTU));
}
-
- if (airoha_ppe2_is_enabled(eth)) {
- sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
- airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
- PPE_SRAM_TB_NUM_ENTRY_MASK |
- PPE_DRAM_TB_NUM_ENTRY_MASK,
- FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
- sram_num_entries) |
- FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
- dram_num_entries));
- airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
- PPE_SRAM_TB_NUM_ENTRY_MASK |
- PPE_DRAM_TB_NUM_ENTRY_MASK,
- FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
- sram_num_entries) |
- FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
- dram_num_entries));
- } else {
- sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
- airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
- PPE_SRAM_TB_NUM_ENTRY_MASK |
- PPE_DRAM_TB_NUM_ENTRY_MASK,
- FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
- sram_num_entries) |
- FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
- dram_num_entries));
- }
}
static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
@@ -282,7 +308,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
if (!airoha_is_valid_gdm_port(eth, port))
return -EINVAL;
- if (dsa_port >= 0)
+ if (dsa_port >= 0 || eth->ports[1])
pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
: port->id;
else
@@ -428,9 +454,11 @@ static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
return 0;
}
-static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *hwe)
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
u32 hash, hv1, hv2, hv3;
switch (type) {
@@ -468,25 +496,31 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
case PPE_PKT_TYPE_IPV6_6RD:
default:
WARN_ON_ONCE(1);
- return PPE_HASH_MASK;
+ return ppe_hash_mask;
}
hash = (hv1 & hv2) | ((~hv1) & hv3);
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash &= PPE_NUM_ENTRIES - 1;
+ hash &= ppe_hash_mask;
return hash;
}
-static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
+static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe,
+ u32 hash, u32 *index)
{
- if (!airoha_ppe2_is_enabled(ppe->eth))
- return hash;
+ int ppe_num_stats_entries;
- return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
- : hash;
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return ppe_num_stats_entries;
+
+ *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES
+ : hash;
+
+ return 0;
}
static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
@@ -500,9 +534,13 @@ static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
struct airoha_npu *npu)
{
- int i;
+ int i, ppe_num_stats_entries;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
- for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
+ for (i = 0; i < ppe_num_stats_entries; i++)
airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
}
@@ -513,10 +551,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
{
int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
u32 index, pse_port, val, *data, *ib2, *meter;
+ int ppe_num_stats_entries;
u8 nbq;
- index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
- if (index >= PPE_STATS_NUM_ENTRIES)
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
+
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
return;
if (type == PPE_PKT_TYPE_BRIDGE) {
@@ -557,17 +602,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
static struct airoha_foe_entry *
airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
+
lockdep_assert_held(&ppe_lock);
- if (hash < PPE_SRAM_NUM_ENTRIES) {
+ if (hash < sram_num_entries) {
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
struct airoha_eth *eth = ppe->eth;
- bool ppe2;
u32 val;
int i;
- ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
- hash >= PPE1_SRAM_NUM_ENTRIES;
airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
PPE_SRAM_CTRL_REQ_MASK);
@@ -577,7 +622,8 @@ airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
REG_PPE_RAM_CTRL(ppe2)))
return NULL;
- for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe);
+ i++)
hwe[i] = airoha_fe_rr(eth,
REG_PPE_RAM_ENTRY(ppe2, i));
}
@@ -614,10 +660,32 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
}
+static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES;
+ u32 *ptr = (u32 *)hwe, val;
+ int i;
+
+ for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++)
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]);
+
+ wmb();
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK);
+
+ return read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, ppe->eth,
+ REG_PPE_RAM_CTRL(ppe2));
+}
+
static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *e,
u32 hash, bool rx_wlan)
{
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
struct airoha_eth *eth = ppe->eth;
@@ -642,14 +710,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
if (!rx_wlan)
airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
- if (hash < PPE_SRAM_NUM_ENTRIES) {
- dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
- bool ppe2 = airoha_ppe2_is_enabled(eth) &&
- hash >= PPE1_SRAM_NUM_ENTRIES;
-
- err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
- hash, ppe2);
- }
+ if (hash < sram_num_entries)
+ err = airoha_ppe_foe_commit_sram_entry(ppe, hash);
unlock:
rcu_read_unlock();
@@ -772,7 +834,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
if (state == AIROHA_FOE_STATE_BIND)
goto unlock;
- index = airoha_ppe_foe_get_entry_hash(hwe);
+ index = airoha_ppe_foe_get_entry_hash(ppe, hwe);
hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
if (e->type == FLOW_TYPE_L2_SUBFLOW) {
state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
@@ -832,7 +894,7 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
if (type == PPE_PKT_TYPE_BRIDGE)
return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
- hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data);
e->type = FLOW_TYPE_L4;
e->hash = 0xffff;
@@ -1158,11 +1220,19 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
struct airoha_foe_stats64 *stats)
{
- u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
struct airoha_eth *eth = ppe->eth;
+ int ppe_num_stats_entries;
struct airoha_npu *npu;
+ u32 index;
+
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries < 0)
+ return;
- if (index >= PPE_STATS_NUM_ENTRIES)
+ if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index))
+ return;
+
+ if (index >= ppe_num_stats_entries)
return;
rcu_read_lock();
@@ -1225,20 +1295,22 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
return -EOPNOTSUPP;
}
-static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
- struct airoha_npu *npu)
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe)
{
- int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
+ u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe);
struct airoha_foe_entry *hwe = ppe->foe;
+ int i, err = 0;
- if (airoha_ppe2_is_enabled(ppe->eth))
- sram_num_entries = sram_num_entries / 2;
+ for (i = 0; i < sram_num_entries; i++) {
+ int err;
- for (i = 0; i < sram_num_entries; i++)
memset(&hwe[i], 0, sizeof(*hwe));
+ err = airoha_ppe_foe_commit_sram_entry(ppe, i);
+ if (err)
+ break;
+ }
- return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
- PPE_SRAM_NUM_ENTRIES);
+ return err;
}
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
@@ -1257,7 +1329,7 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_ppe_npu_get(eth);
struct airoha_ppe *ppe = eth->ppe;
- int err;
+ int err, ppe_num_stats_entries;
if (IS_ERR(npu))
return PTR_ERR(npu);
@@ -1266,18 +1338,15 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
- if (PPE_STATS_NUM_ENTRIES) {
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
- PPE_STATS_NUM_ENTRIES);
+ ppe_num_stats_entries);
if (err)
goto error_npu_put;
}
airoha_ppe_hw_init(ppe);
- err = airoha_ppe_flush_sram_entries(ppe, npu);
- if (err)
- goto error_npu_put;
-
airoha_ppe_foe_flow_stats_reset(ppe, npu);
rcu_assign_pointer(eth->npu, npu);
@@ -1313,9 +1382,10 @@ void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
u16 hash, bool rx_wlan)
{
struct airoha_ppe *ppe = dev->priv;
+ u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1;
u16 now, diff;
- if (hash > PPE_HASH_MASK)
+ if (hash > ppe_hash_mask)
return;
now = (u16)jiffies;
@@ -1405,8 +1475,9 @@ EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
int airoha_ppe_init(struct airoha_eth *eth)
{
+ int foe_size, err, ppe_num_stats_entries;
+ u32 ppe_num_entries;
struct airoha_ppe *ppe;
- int foe_size, err;
ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
@@ -1415,24 +1486,25 @@ int airoha_ppe_init(struct airoha_eth *eth)
ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
ppe->dev.ops.check_skb = airoha_ppe_check_skb;
ppe->dev.priv = ppe;
+ ppe->eth = eth;
+ eth->ppe = ppe;
- foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
+ foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry);
ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
GFP_KERNEL);
if (!ppe->foe)
return -ENOMEM;
- ppe->eth = eth;
- eth->ppe = ppe;
-
ppe->foe_flow = devm_kzalloc(eth->dev,
- PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
+ ppe_num_entries * sizeof(*ppe->foe_flow),
GFP_KERNEL);
if (!ppe->foe_flow)
return -ENOMEM;
- foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
- if (foe_size) {
+ ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
+ if (ppe_num_stats_entries > 0) {
+ foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats);
ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
&ppe->foe_stats_dma,
GFP_KERNEL);
@@ -1440,6 +1512,15 @@ int airoha_ppe_init(struct airoha_eth *eth)
return -ENOMEM;
}
+ ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries,
+ GFP_KERNEL);
+ if (!ppe->foe_check_time)
+ return -ENOMEM;
+
+ err = airoha_ppe_flush_sram_entries(ppe);
+ if (err)
+ return err;
+
err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
index 05a756233f6a..0112c41150bb 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -53,9 +53,10 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
[AIROHA_FOE_STATE_FIN] = "FIN",
};
struct airoha_ppe *ppe = m->private;
+ u32 ppe_num_entries = airoha_ppe_get_total_num_entries(ppe);
int i;
- for (i = 0; i < PPE_NUM_ENTRIES; i++) {
+ for (i = 0; i < ppe_num_entries; i++) {
const char *state_str, *type_str = "UNKNOWN";
void *src_addr = NULL, *dest_addr = NULL;
u16 *src_port = NULL, *dest_port = NULL;
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 69c5a143db8c..ed4e3407f4a0 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -23,6 +23,8 @@
#define GDM3_BASE 0x1100
#define GDM4_BASE 0x2500
+#define CDM_BASE(_n) \
+ ((_n) == 2 ? CDM2_BASE : CDM1_BASE)
#define GDM_BASE(_n) \
((_n) == 4 ? GDM4_BASE : \
(_n) == 3 ? GDM3_BASE : \
@@ -109,30 +111,24 @@
#define PATN_DP_MASK GENMASK(31, 16)
#define PATN_SP_MASK GENMASK(15, 0)
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
+#define REG_CDM_VLAN_CTRL(_n) CDM_BASE(_n)
+#define CDM_VLAN_MASK GENMASK(31, 16)
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+#define REG_CDM_FWD_CFG(_n) (CDM_BASE(_n) + 0x08)
+#define CDM_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM_VIP_QSEL_MASK GENMASK(24, 20)
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+#define REG_CDM_CRSN_QSEL(_n, _m) (CDM_BASE(_n) + 0x10 + ((_m) << 2))
+#define CDM_CRSN_QSEL_REASON_MASK(_n) \
GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_STRIP_CRC BIT(16)
+#define GDM_PAD_EN_MASK BIT(28)
+#define GDM_DROP_CRC_ERR_MASK BIT(23)
+#define GDM_IP4_CKSUM_MASK BIT(22)
+#define GDM_TCP_CKSUM_MASK BIT(21)
+#define GDM_UDP_CKSUM_MASK BIT(20)
+#define GDM_STRIP_CRC_MASK BIT(16)
#define GDM_UCFQ_MASK GENMASK(15, 12)
#define GDM_BCFQ_MASK GENMASK(11, 8)
#define GDM_MCFQ_MASK GENMASK(7, 4)
@@ -156,6 +152,10 @@
#define LBK_CHAN_MODE_MASK BIT(1)
#define LPBK_EN_MASK BIT(0)
+#define REG_GDM_CHN_RLS(_n) (GDM_BASE(_n) + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
@@ -168,10 +168,10 @@
#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define REG_FE_GDM_MIB_CFG(_n) (GDM_BASE(_n) + 0xf4)
#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_GDM_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM_RX_MIB_SPLIT_EN_MASK BIT(16)
#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
@@ -214,6 +214,33 @@
#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+#define REG_GDM_SRC_PORT_SET(_n) (GDM_BASE(_n) + 0x23c)
+#define GDM_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
#define PPE_GLO_CFG_BUSY_MASK BIT(31)
#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
@@ -326,44 +353,6 @@
#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG GDM4_BASE
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
#define REG_IP_FRAG_FP 0x2010
#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
@@ -383,10 +372,8 @@
#define REG_MC_VLAN_DATA 0x2108
#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
-#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
-#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
-#define SP_CPORT_USB_MASK GENMASK(7, 4)
-#define SP_CPORT_ETH_MASK GENMASK(7, 4)
+#define SP_CPORT_DFT_MASK GENMASK(2, 0)
+#define SP_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
#define REG_SRC_PORT_FC_MAP6 0x2298
#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)