diff options
Diffstat (limited to 'drivers/net/ethernet/airoha/airoha_ppe.c')
| -rw-r--r-- | drivers/net/ethernet/airoha/airoha_ppe.c | 261 |
1 files changed, 171 insertions, 90 deletions
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 691361b25407..0caabb0c3aa0 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -32,9 +32,50 @@ static const struct rhashtable_params airoha_l2_flow_table_params = { .automatic_shrinking = true, }; -static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) +static int airoha_ppe_get_num_stats_entries(struct airoha_ppe *ppe) { - return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; + if (!IS_ENABLED(CONFIG_NET_AIROHA_FLOW_STATS)) + return -EOPNOTSUPP; + + if (airoha_is_7583(ppe->eth)) + return -EOPNOTSUPP; + + return PPE_STATS_NUM_ENTRIES; +} + +static int airoha_ppe_get_total_num_stats_entries(struct airoha_ppe *ppe) +{ + int num_stats = airoha_ppe_get_num_stats_entries(ppe); + + if (num_stats > 0) { + struct airoha_eth *eth = ppe->eth; + + num_stats = num_stats * eth->soc->num_ppe; + } + + return num_stats; +} + +static u32 airoha_ppe_get_total_sram_num_entries(struct airoha_ppe *ppe) +{ + struct airoha_eth *eth = ppe->eth; + + return PPE_SRAM_NUM_ENTRIES * eth->soc->num_ppe; +} + +u32 airoha_ppe_get_total_num_entries(struct airoha_ppe *ppe) +{ + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + + return sram_num_entries + PPE_DRAM_NUM_ENTRIES; +} + +bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index) +{ + if (index >= eth->soc->num_ppe) + return false; + + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(index)) & PPE_GLO_CFG_EN_MASK; } static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) @@ -46,14 +87,22 @@ static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) static void airoha_ppe_hw_init(struct airoha_ppe *ppe) { - u32 sram_tb_size, sram_num_entries, dram_num_entries; + u32 sram_ppe_num_data_entries = PPE_SRAM_NUM_ENTRIES, sram_num_entries; + u32 sram_tb_size, dram_num_entries; struct airoha_eth *eth = ppe->eth; - int i; + int i, sram_num_stats_entries; - sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + sram_tb_size = sram_num_entries * sizeof(struct airoha_foe_entry); dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); - for (i = 0; i < PPE_NUM; i++) { + sram_num_stats_entries = airoha_ppe_get_num_stats_entries(ppe); + if (sram_num_stats_entries > 0) + sram_ppe_num_data_entries -= sram_num_stats_entries; + sram_ppe_num_data_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(sram_ppe_num_data_entries); + + for (i = 0; i < eth->soc->num_ppe; i++) { int p; airoha_fe_wr(eth, REG_PPE_TB_BASE(i), @@ -85,10 +134,16 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK | PPE_TB_CFG_KEEPALIVE_MASK | PPE_TB_ENTRY_SIZE_MASK, FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | - FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0) | + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_ppe_num_data_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); @@ -101,35 +156,6 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe) FIELD_PREP(FP1_EGRESS_MTU_MASK, AIROHA_MAX_MTU)); } - - if (airoha_ppe2_is_enabled(eth)) { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } else { - sram_num_entries = - PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES); - airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), - PPE_SRAM_TB_NUM_ENTRY_MASK | - PPE_DRAM_TB_NUM_ENTRY_MASK, - FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, - sram_num_entries) | - FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, - dram_num_entries)); - } } static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) @@ -282,7 +308,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth, if (!airoha_is_valid_gdm_port(eth, port)) return -EINVAL; - if (dsa_port >= 0) + if (dsa_port >= 0 || eth->ports[1]) pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; else @@ -428,9 +454,11 @@ static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, return 0; } -static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) +static u32 airoha_ppe_foe_get_entry_hash(struct airoha_ppe *ppe, + struct airoha_foe_entry *hwe) { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u32 hash, hv1, hv2, hv3; switch (type) { @@ -468,25 +496,31 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) case PPE_PKT_TYPE_IPV6_6RD: default: WARN_ON_ONCE(1); - return PPE_HASH_MASK; + return ppe_hash_mask; } hash = (hv1 & hv2) | ((~hv1) & hv3); hash = (hash >> 24) | ((hash & 0xffffff) << 8); hash ^= hv1 ^ hv2 ^ hv3; hash ^= hash >> 16; - hash &= PPE_NUM_ENTRIES - 1; + hash &= ppe_hash_mask; return hash; } -static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash) +static int airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, + u32 hash, u32 *index) { - if (!airoha_ppe2_is_enabled(ppe->eth)) - return hash; + int ppe_num_stats_entries; - return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES - : hash; + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return ppe_num_stats_entries; + + *index = hash >= ppe_num_stats_entries ? hash - PPE_STATS_NUM_ENTRIES + : hash; + + return 0; } static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, @@ -500,9 +534,13 @@ static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe, static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe, struct airoha_npu *npu) { - int i; + int i, ppe_num_stats_entries; + + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; - for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++) + for (i = 0; i < ppe_num_stats_entries; i++) airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i); } @@ -513,10 +551,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, { int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); u32 index, pse_port, val, *data, *ib2, *meter; + int ppe_num_stats_entries; u8 nbq; - index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); - if (index >= PPE_STATS_NUM_ENTRIES) + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; + + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; if (type == PPE_PKT_TYPE_BRIDGE) { @@ -557,17 +602,17 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe, static struct airoha_foe_entry * airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); + lockdep_assert_held(&ppe_lock); - if (hash < PPE_SRAM_NUM_ENTRIES) { + if (hash < sram_num_entries) { u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; struct airoha_eth *eth = ppe->eth; - bool ppe2; u32 val; int i; - ppe2 = airoha_ppe2_is_enabled(ppe->eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | PPE_SRAM_CTRL_REQ_MASK); @@ -577,7 +622,8 @@ airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash) REG_PPE_RAM_CTRL(ppe2))) return NULL; - for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) + for (i = 0; i < sizeof(struct airoha_foe_entry) / sizeof(*hwe); + i++) hwe[i] = airoha_fe_rr(eth, REG_PPE_RAM_ENTRY(ppe2, i)); } @@ -614,10 +660,32 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); } +static int airoha_ppe_foe_commit_sram_entry(struct airoha_ppe *ppe, u32 hash) +{ + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); + bool ppe2 = hash >= PPE_SRAM_NUM_ENTRIES; + u32 *ptr = (u32 *)hwe, val; + int i; + + for (i = 0; i < sizeof(*hwe) / sizeof(*ptr); i++) + airoha_fe_wr(ppe->eth, REG_PPE_RAM_ENTRY(ppe2, i), ptr[i]); + + wmb(); + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | + PPE_SRAM_CTRL_WR_MASK | PPE_SRAM_CTRL_REQ_MASK); + + return read_poll_timeout_atomic(airoha_fe_rr, val, + val & PPE_SRAM_CTRL_ACK_MASK, + 10, 100, false, ppe->eth, + REG_PPE_RAM_CTRL(ppe2)); +} + static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, struct airoha_foe_entry *e, u32 hash, bool rx_wlan) { + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); u32 ts = airoha_ppe_get_timestamp(ppe); struct airoha_eth *eth = ppe->eth; @@ -642,14 +710,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, if (!rx_wlan) airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash); - if (hash < PPE_SRAM_NUM_ENTRIES) { - dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); - bool ppe2 = airoha_ppe2_is_enabled(eth) && - hash >= PPE1_SRAM_NUM_ENTRIES; - - err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe), - hash, ppe2); - } + if (hash < sram_num_entries) + err = airoha_ppe_foe_commit_sram_entry(ppe, hash); unlock: rcu_read_unlock(); @@ -772,7 +834,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, if (state == AIROHA_FOE_STATE_BIND) goto unlock; - index = airoha_ppe_foe_get_entry_hash(hwe); + index = airoha_ppe_foe_get_entry_hash(ppe, hwe); hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { if (e->type == FLOW_TYPE_L2_SUBFLOW) { state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); @@ -832,7 +894,7 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, if (type == PPE_PKT_TYPE_BRIDGE) return airoha_ppe_foe_l2_flow_commit_entry(ppe, e); - hash = airoha_ppe_foe_get_entry_hash(&e->data); + hash = airoha_ppe_foe_get_entry_hash(ppe, &e->data); e->type = FLOW_TYPE_L4; e->hash = 0xffff; @@ -1158,11 +1220,19 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth, void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash, struct airoha_foe_stats64 *stats) { - u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash); struct airoha_eth *eth = ppe->eth; + int ppe_num_stats_entries; struct airoha_npu *npu; + u32 index; + + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries < 0) + return; - if (index >= PPE_STATS_NUM_ENTRIES) + if (airoha_ppe_foe_get_flow_stats_index(ppe, hash, &index)) + return; + + if (index >= ppe_num_stats_entries) return; rcu_read_lock(); @@ -1225,20 +1295,22 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth, return -EOPNOTSUPP; } -static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, - struct airoha_npu *npu) +static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe) { - int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; + u32 sram_num_entries = airoha_ppe_get_total_sram_num_entries(ppe); struct airoha_foe_entry *hwe = ppe->foe; + int i, err = 0; - if (airoha_ppe2_is_enabled(ppe->eth)) - sram_num_entries = sram_num_entries / 2; + for (i = 0; i < sram_num_entries; i++) { + int err; - for (i = 0; i < sram_num_entries; i++) memset(&hwe[i], 0, sizeof(*hwe)); + err = airoha_ppe_foe_commit_sram_entry(ppe, i); + if (err) + break; + } - return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, - PPE_SRAM_NUM_ENTRIES); + return err; } static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) @@ -1257,7 +1329,7 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) { struct airoha_npu *npu = airoha_ppe_npu_get(eth); struct airoha_ppe *ppe = eth->ppe; - int err; + int err, ppe_num_stats_entries; if (IS_ERR(npu)) return PTR_ERR(npu); @@ -1266,18 +1338,15 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth) if (err) goto error_npu_put; - if (PPE_STATS_NUM_ENTRIES) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma, - PPE_STATS_NUM_ENTRIES); + ppe_num_stats_entries); if (err) goto error_npu_put; } airoha_ppe_hw_init(ppe); - err = airoha_ppe_flush_sram_entries(ppe, npu); - if (err) - goto error_npu_put; - airoha_ppe_foe_flow_stats_reset(ppe, npu); rcu_assign_pointer(eth->npu, npu); @@ -1313,9 +1382,10 @@ void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb, u16 hash, bool rx_wlan) { struct airoha_ppe *ppe = dev->priv; + u32 ppe_hash_mask = airoha_ppe_get_total_num_entries(ppe) - 1; u16 now, diff; - if (hash > PPE_HASH_MASK) + if (hash > ppe_hash_mask) return; now = (u16)jiffies; @@ -1405,8 +1475,9 @@ EXPORT_SYMBOL_GPL(airoha_ppe_put_dev); int airoha_ppe_init(struct airoha_eth *eth) { + int foe_size, err, ppe_num_stats_entries; + u32 ppe_num_entries; struct airoha_ppe *ppe; - int foe_size, err; ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); if (!ppe) @@ -1415,24 +1486,25 @@ int airoha_ppe_init(struct airoha_eth *eth) ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb; ppe->dev.ops.check_skb = airoha_ppe_check_skb; ppe->dev.priv = ppe; + ppe->eth = eth; + eth->ppe = ppe; - foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + ppe_num_entries = airoha_ppe_get_total_num_entries(ppe); + foe_size = ppe_num_entries * sizeof(struct airoha_foe_entry); ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, GFP_KERNEL); if (!ppe->foe) return -ENOMEM; - ppe->eth = eth; - eth->ppe = ppe; - ppe->foe_flow = devm_kzalloc(eth->dev, - PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), + ppe_num_entries * sizeof(*ppe->foe_flow), GFP_KERNEL); if (!ppe->foe_flow) return -ENOMEM; - foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats); - if (foe_size) { + ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe); + if (ppe_num_stats_entries > 0) { + foe_size = ppe_num_stats_entries * sizeof(*ppe->foe_stats); ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_stats_dma, GFP_KERNEL); @@ -1440,6 +1512,15 @@ int airoha_ppe_init(struct airoha_eth *eth) return -ENOMEM; } + ppe->foe_check_time = devm_kzalloc(eth->dev, ppe_num_entries, + GFP_KERNEL); + if (!ppe->foe_check_time) + return -ENOMEM; + + err = airoha_ppe_flush_sram_entries(ppe); + if (err) + return err; + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); if (err) return err; |
