diff options
Diffstat (limited to 'drivers/net/dsa')
27 files changed, 6915 insertions, 1774 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 4d9af691b989..7eb301fd987d 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -154,4 +154,11 @@ config NET_DSA_VITESSE_VSC73XX_PLATFORM This enables support for the Vitesse VSC7385, VSC7388, VSC7395 and VSC7398 SparX integrated ethernet switches, connected over a CPU-attached address bus and work in memory-mapped I/O mode. + +config NET_DSA_YT921X + tristate "Motorcomm YT9215 ethernet switch chip support" + select NET_DSA_TAG_YT921X + help + This enables support for the Motorcomm YT9215 ethernet switch + chip. endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 0f8ff4a1a313..16de4ba3fa38 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o +obj-$(CONFIG_NET_DSA_YT921X) += yt921x.o obj-y += b53/ obj-y += hirschmann/ obj-y += lantiq/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index eb767edc4c13..a1a177713d99 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -632,6 +632,25 @@ static void b53_port_set_learning(struct b53_device *dev, int port, b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); } +static void b53_port_set_isolated(struct b53_device *dev, int port, + bool isolated) +{ + u8 offset; + u16 reg; + + if (is5325(dev)) + offset = B53_PROTECTED_PORT_SEL_25; + else + offset = B53_PROTECTED_PORT_SEL; + + b53_read16(dev, B53_CTRL_PAGE, offset, ®); + if (isolated) + reg |= BIT(port); + else + reg &= ~BIT(port); + b53_write16(dev, B53_CTRL_PAGE, offset, reg); +} + static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { struct b53_device *dev = ds->priv; @@ -652,6 +671,7 @@ int b53_setup_port(struct dsa_switch *ds, int port) b53_port_set_ucast_flood(dev, port, true); b53_port_set_mcast_flood(dev, port, true); b53_port_set_learning(dev, port, false); + b53_port_set_isolated(dev, port, false); /* Force all traffic to go to the CPU port to prevent the ASIC from * trying to forward to bridged ports on matching FDB entries, then @@ -852,10 +872,7 @@ static void b53_enable_stp(struct b53_device *dev) static u16 b53_default_pvid(struct b53_device *dev) { - if (is5325(dev) || is5365(dev)) - return 1; - else - return 0; + return 0; } static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) @@ -1679,9 +1696,6 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - if ((is5325(dev) || is5365(dev)) && vlan->vid == 0) - return -EOPNOTSUPP; - /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of * receiving VLAN tagged frames at all, we can still allow the port to * be configured for egress untagged. @@ -1830,49 +1844,83 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) return b53_arl_op_wait(dev); } -static int b53_arl_read(struct b53_device *dev, u64 mac, - u16 vid, struct b53_arl_entry *ent, u8 *idx) +static void b53_arl_read_entry_25(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) { - DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); - unsigned int i; - int ret; + u8 vid_entry; + u64 mac_vid; - ret = b53_arl_op_wait(dev); - if (ret) - return ret; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), + &vid_entry); + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_arl_to_entry_25(ent, mac_vid, vid_entry); +} - bitmap_zero(free_bins, dev->num_arl_bins); +static void b53_arl_write_entry_25(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u8 vid_entry; + u64 mac_vid; - /* Read the bins */ - for (i = 0; i < dev->num_arl_bins; i++) { - u64 mac_vid; - u32 fwd_entry; + b53_arl_from_entry_25(&mac_vid, &vid_entry, ent); + b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_VID_ENTRY_25(idx), vid_entry); + b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + mac_vid); +} - b53_read64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); - b53_read32(dev, B53_ARLIO_PAGE, - B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); - b53_arl_to_entry(ent, mac_vid, fwd_entry); +static void b53_arl_read_entry_89(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + u64 mac_vid; + u16 fwd_entry; - if (!(fwd_entry & ARLTBL_VALID)) { - set_bit(i, free_bins); - continue; - } - if ((mac_vid & ARLTBL_MAC_MASK) != mac) - continue; - if (dev->vlan_enabled && - ((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid) - continue; - *idx = i; - return 0; - } + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); + b53_arl_to_entry_89(ent, mac_vid, fwd_entry); +} - *idx = find_first_bit(free_bins, dev->num_arl_bins); - return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT; +static void b53_arl_write_entry_89(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_arl_from_entry_89(&mac_vid, &fwd_entry, ent); + b53_write64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); + b53_write16(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); } -static int b53_arl_read_25(struct b53_device *dev, u64 mac, - u16 vid, struct b53_arl_entry *ent, u8 *idx) +static void b53_arl_read_entry_95(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); +} + +static void b53_arl_write_entry_95(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_arl_from_entry(&mac_vid, &fwd_entry, ent); + b53_write64(dev, B53_ARLIO_PAGE, B53_ARLTBL_MAC_VID_ENTRY(idx), + mac_vid); + b53_write32(dev, B53_ARLIO_PAGE, B53_ARLTBL_DATA_ENTRY(idx), + fwd_entry); +} + +static int b53_arl_read(struct b53_device *dev, const u8 *mac, + u16 vid, struct b53_arl_entry *ent, u8 *idx) { DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES); unsigned int i; @@ -1886,21 +1934,15 @@ static int b53_arl_read_25(struct b53_device *dev, u64 mac, /* Read the bins */ for (i = 0; i < dev->num_arl_bins; i++) { - u64 mac_vid; - - b53_read64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); - - b53_arl_to_entry_25(ent, mac_vid); + b53_arl_read_entry(dev, ent, i); - if (!(mac_vid & ARLTBL_VALID_25)) { + if (!ent->is_valid) { set_bit(i, free_bins); continue; } - if ((mac_vid & ARLTBL_MAC_MASK) != mac) + if (!ether_addr_equal(ent->mac, mac)) continue; - if (dev->vlan_enabled && - ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid) + if (dev->vlan_enabled && ent->vid != vid) continue; *idx = i; return 0; @@ -1914,9 +1956,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, const unsigned char *addr, u16 vid, bool is_valid) { struct b53_arl_entry ent; - u32 fwd_entry; - u64 mac, mac_vid = 0; u8 idx = 0; + u64 mac; int ret; /* Convert the array into a 64-bit MAC */ @@ -1924,18 +1965,19 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* Perform a read for the given MAC and VID */ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); - if (!is5325m(dev)) - b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + if (!is5325m(dev)) { + if (is5325(dev) || is5365(dev)) + b53_write8(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + else + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + } /* Issue a read operation for this MAC */ ret = b53_arl_rw_op(dev, 1); if (ret) return ret; - if (is5325(dev) || is5365(dev)) - ret = b53_arl_read_25(dev, mac, vid, &ent, &idx); - else - ret = b53_arl_read(dev, mac, vid, &ent, &idx); + ret = b53_arl_read(dev, addr, vid, &ent, &idx); /* If this is a read, just finish now */ if (op) @@ -1952,7 +1994,6 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, /* We could not find a matching MAC, so reset to a new entry */ dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n", addr, vid, idx); - fwd_entry = 0; break; default: dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n", @@ -1979,17 +2020,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, ent.is_static = true; ent.is_age = false; memcpy(ent.mac, addr, ETH_ALEN); - if (is5325(dev) || is5365(dev)) - b53_arl_from_entry_25(&mac_vid, &ent); - else - b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); - - b53_write64(dev, B53_ARLIO_PAGE, - B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); - - if (!is5325(dev) && !is5365(dev)) - b53_write32(dev, B53_ARLIO_PAGE, - B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); + b53_arl_write_entry(dev, &ent, idx); return b53_arl_rw_op(dev, 0); } @@ -2024,18 +2055,53 @@ int b53_fdb_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_fdb_del); -static int b53_arl_search_wait(struct b53_device *dev) +static void b53_read_arl_srch_ctl(struct b53_device *dev, u8 *val) { - unsigned int timeout = 1000; - u8 reg, offset; + u8 offset; + + if (is5325(dev) || is5365(dev)) + offset = B53_ARL_SRCH_CTL_25; + else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || + is63xx(dev)) + offset = B53_ARL_SRCH_CTL_89; + else + offset = B53_ARL_SRCH_CTL; + + if (is63xx(dev)) { + u16 val16; + + b53_read16(dev, B53_ARLIO_PAGE, offset, &val16); + *val = val16 & 0xff; + } else { + b53_read8(dev, B53_ARLIO_PAGE, offset, val); + } +} + +static void b53_write_arl_srch_ctl(struct b53_device *dev, u8 val) +{ + u8 offset; if (is5325(dev) || is5365(dev)) offset = B53_ARL_SRCH_CTL_25; + else if (dev->chip_id == BCM5389_DEVICE_ID || is5397_98(dev) || + is63xx(dev)) + offset = B53_ARL_SRCH_CTL_89; else offset = B53_ARL_SRCH_CTL; + if (is63xx(dev)) + b53_write16(dev, B53_ARLIO_PAGE, offset, val); + else + b53_write8(dev, B53_ARLIO_PAGE, offset, val); +} + +static int b53_arl_search_wait(struct b53_device *dev) +{ + unsigned int timeout = 1000; + u8 reg; + do { - b53_read8(dev, B53_ARLIO_PAGE, offset, ®); + b53_read_arl_srch_ctl(dev, ®); if (!(reg & ARL_SRCH_STDN)) return -ENOENT; @@ -2048,28 +2114,53 @@ static int b53_arl_search_wait(struct b53_device *dev) return -ETIMEDOUT; } -static void b53_arl_search_rd(struct b53_device *dev, u8 idx, - struct b53_arl_entry *ent) +static void b53_arl_search_read_25(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) { u64 mac_vid; + u8 ext; - if (is5325(dev)) { - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); - } else if (is5365(dev)) { - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65, - &mac_vid); - b53_arl_to_entry_25(ent, mac_vid); - } else { - u32 fwd_entry; + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_EXT_25, &ext); + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25, + &mac_vid); + b53_arl_search_to_entry_25(ent, mac_vid, ext); +} - b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), - &mac_vid); - b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), - &fwd_entry); - b53_arl_to_entry(ent, mac_vid, fwd_entry); - } +static void b53_arl_search_read_89(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u16 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_89, + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_89, &fwd_entry); + b53_arl_to_entry_89(ent, mac_vid, fwd_entry); +} + +static void b53_arl_search_read_63xx(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u16 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_MACVID_63XX, + &mac_vid); + b53_read16(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSLT_63XX, &fwd_entry); + b53_arl_search_to_entry_63xx(ent, mac_vid, fwd_entry); +} + +static void b53_arl_search_read_95(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u32 fwd_entry; + u64 mac_vid; + + b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx), + &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx), + &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, @@ -2090,36 +2181,28 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, unsigned int count = 0, results_per_hit = 1; struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; - u8 offset; int ret; - u8 reg; if (priv->num_arl_bins > 2) results_per_hit = 2; mutex_lock(&priv->arl_mutex); - if (is5325(priv) || is5365(priv)) - offset = B53_ARL_SRCH_CTL_25; - else - offset = B53_ARL_SRCH_CTL; - /* Start search operation */ - reg = ARL_SRCH_STDN; - b53_write8(priv, B53_ARLIO_PAGE, offset, reg); + b53_write_arl_srch_ctl(priv, ARL_SRCH_STDN); do { ret = b53_arl_search_wait(priv); if (ret) break; - b53_arl_search_rd(priv, 0, &results[0]); + b53_arl_search_read(priv, 0, &results[0]); ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) break; if (results_per_hit == 2) { - b53_arl_search_rd(priv, 1, &results[1]); + b53_arl_search_read(priv, 1, &results[1]); ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) break; @@ -2340,7 +2423,7 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct b53_device *dev = ds->priv; - unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD); + unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD | BR_ISOLATED); if (!is5325(dev)) mask |= BR_LEARNING; @@ -2365,6 +2448,9 @@ int b53_br_flags(struct dsa_switch *ds, int port, if (flags.mask & BR_LEARNING) b53_port_set_learning(ds->priv, port, !!(flags.val & BR_LEARNING)); + if (flags.mask & BR_ISOLATED) + b53_port_set_isolated(ds->priv, port, + !!(flags.val & BR_ISOLATED)); return 0; } @@ -2645,6 +2731,30 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_change_mtu = b53_change_mtu, }; +static const struct b53_arl_ops b53_arl_ops_25 = { + .arl_read_entry = b53_arl_read_entry_25, + .arl_write_entry = b53_arl_write_entry_25, + .arl_search_read = b53_arl_search_read_25, +}; + +static const struct b53_arl_ops b53_arl_ops_89 = { + .arl_read_entry = b53_arl_read_entry_89, + .arl_write_entry = b53_arl_write_entry_89, + .arl_search_read = b53_arl_search_read_89, +}; + +static const struct b53_arl_ops b53_arl_ops_63xx = { + .arl_read_entry = b53_arl_read_entry_89, + .arl_write_entry = b53_arl_write_entry_89, + .arl_search_read = b53_arl_search_read_63xx, +}; + +static const struct b53_arl_ops b53_arl_ops_95 = { + .arl_read_entry = b53_arl_read_entry_95, + .arl_write_entry = b53_arl_write_entry_95, + .arl_search_read = b53_arl_search_read_95, +}; + struct b53_chip_data { u32 chip_id; const char *dev_name; @@ -2658,6 +2768,7 @@ struct b53_chip_data { u8 duplex_reg; u8 jumbo_pm_reg; u8 jumbo_size_reg; + const struct b53_arl_ops *arl_ops; }; #define B53_VTA_REGS \ @@ -2677,6 +2788,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, + .arl_ops = &b53_arl_ops_25, }, { .chip_id = BCM5365_DEVICE_ID, @@ -2687,6 +2799,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_buckets = 1024, .imp_port = 5, .duplex_reg = B53_DUPLEX_STAT_FE, + .arl_ops = &b53_arl_ops_25, }, { .chip_id = BCM5389_DEVICE_ID, @@ -2700,6 +2813,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM5395_DEVICE_ID, @@ -2713,6 +2827,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM5397_DEVICE_ID, @@ -2726,6 +2841,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM5398_DEVICE_ID, @@ -2739,6 +2855,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_89, }, { .chip_id = BCM53101_DEVICE_ID, @@ -2752,6 +2869,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53115_DEVICE_ID, @@ -2765,6 +2883,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53125_DEVICE_ID, @@ -2778,6 +2897,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53128_DEVICE_ID, @@ -2791,19 +2911,21 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM63XX_DEVICE_ID, .dev_name = "BCM63xx", .vlans = 4096, .enabled_ports = 0, /* pdata must provide them */ - .arl_bins = 4, - .arl_buckets = 1024, + .arl_bins = 1, + .arl_buckets = 4096, .imp_port = 8, .vta_regs = B53_VTA_REGS_63XX, .duplex_reg = B53_DUPLEX_STAT_63XX, .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, + .arl_ops = &b53_arl_ops_63xx, }, { .chip_id = BCM53010_DEVICE_ID, @@ -2817,6 +2939,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53011_DEVICE_ID, @@ -2830,6 +2953,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53012_DEVICE_ID, @@ -2843,6 +2967,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53018_DEVICE_ID, @@ -2856,6 +2981,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53019_DEVICE_ID, @@ -2869,6 +2995,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM58XX_DEVICE_ID, @@ -2882,6 +3009,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM583XX_DEVICE_ID, @@ -2895,6 +3023,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, /* Starfighter 2 */ { @@ -2909,6 +3038,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM7445_DEVICE_ID, @@ -2922,6 +3052,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM7278_DEVICE_ID, @@ -2935,6 +3066,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, { .chip_id = BCM53134_DEVICE_ID, @@ -2949,6 +3081,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + .arl_ops = &b53_arl_ops_95, }, }; @@ -2977,6 +3110,7 @@ static int b53_switch_init(struct b53_device *dev) dev->num_vlans = chip->vlans; dev->num_arl_bins = chip->arl_bins; dev->num_arl_buckets = chip->arl_buckets; + dev->arl_ops = chip->arl_ops; break; } } diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 458775f95164..bd6849e5bb93 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -58,6 +58,17 @@ struct b53_io_ops { bool link_up); }; +struct b53_arl_entry; + +struct b53_arl_ops { + void (*arl_read_entry)(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx); + void (*arl_write_entry)(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx); + void (*arl_search_read)(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent); +}; + #define B53_INVALID_LANE 0xff enum { @@ -127,6 +138,7 @@ struct b53_device { struct mutex stats_mutex; struct mutex arl_mutex; const struct b53_io_ops *ops; + const struct b53_arl_ops *arl_ops; /* chip specific data */ u32 chip_id; @@ -329,16 +341,30 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent, } static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent, - u64 mac_vid) + u64 mac_vid, u8 vid_entry) { memset(ent, 0, sizeof(*ent)); - ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) & - ARLTBL_DATA_PORT_ID_MASK_25; ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); ent->is_age = !!(mac_vid & ARLTBL_AGE_25); ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); u64_to_ether_addr(mac_vid, ent->mac); - ent->vid = mac_vid >> ARLTBL_VID_S_65; + ent->port = (mac_vid & ARLTBL_DATA_PORT_ID_MASK_25) >> + ARLTBL_DATA_PORT_ID_S_25; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; + ent->vid = vid_entry; +} + +static inline void b53_arl_to_entry_89(struct b53_arl_entry *ent, + u64 mac_vid, u16 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK_89; + ent->is_valid = !!(fwd_entry & ARLTBL_VALID_89); + ent->is_age = !!(fwd_entry & ARLTBL_AGE_89); + ent->is_static = !!(fwd_entry & ARLTBL_STATIC_89); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; } static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, @@ -355,20 +381,87 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, *fwd_entry |= ARLTBL_AGE; } -static inline void b53_arl_from_entry_25(u64 *mac_vid, +static inline void b53_arl_from_entry_25(u64 *mac_vid, u8 *vid_entry, const struct b53_arl_entry *ent) { *mac_vid = ether_addr_to_u64(ent->mac); - *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) << - ARLTBL_DATA_PORT_ID_S_25; - *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) << - ARLTBL_VID_S_65; + if (is_unicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT_25) + *mac_vid |= (u64)B53_CPU_PORT << ARLTBL_DATA_PORT_ID_S_25; + else + *mac_vid |= ((u64)ent->port << ARLTBL_DATA_PORT_ID_S_25) & + ARLTBL_DATA_PORT_ID_MASK_25; if (ent->is_valid) *mac_vid |= ARLTBL_VALID_25; if (ent->is_static) *mac_vid |= ARLTBL_STATIC_25; if (ent->is_age) *mac_vid |= ARLTBL_AGE_25; + *vid_entry = ent->vid; +} + +static inline void b53_arl_from_entry_89(u64 *mac_vid, u32 *fwd_entry, + const struct b53_arl_entry *ent) +{ + *mac_vid = ether_addr_to_u64(ent->mac); + *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; + *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK_89; + if (ent->is_valid) + *fwd_entry |= ARLTBL_VALID_89; + if (ent->is_static) + *fwd_entry |= ARLTBL_STATIC_89; + if (ent->is_age) + *fwd_entry |= ARLTBL_AGE_89; +} + +static inline void b53_arl_search_to_entry_25(struct b53_arl_entry *ent, + u64 mac_vid, u8 ext) +{ + memset(ent, 0, sizeof(*ent)); + ent->is_valid = !!(mac_vid & ARLTBL_VALID_25); + ent->is_age = !!(mac_vid & ARLTBL_AGE_25); + ent->is_static = !!(mac_vid & ARLTBL_STATIC_25); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = (mac_vid & ARL_SRCH_RSLT_VID_MASK_25) >> + ARL_SRCH_RSLT_VID_S_25; + ent->port = (mac_vid & ARL_SRCH_RSLT_PORT_ID_MASK_25) >> + ARL_SRCH_RSLT_PORT_ID_S_25; + if (is_multicast_ether_addr(ent->mac) && (ext & ARL_SRCH_RSLT_EXT_MC_MII)) + ent->port |= BIT(B53_CPU_PORT_25); + else if (!is_multicast_ether_addr(ent->mac) && ent->port == B53_CPU_PORT) + ent->port = B53_CPU_PORT_25; +} + +static inline void b53_arl_search_to_entry_63xx(struct b53_arl_entry *ent, + u64 mac_vid, u16 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + u64_to_ether_addr(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; + + ent->port = fwd_entry & ARL_SRST_PORT_ID_MASK_63XX; + ent->port >>= 1; + + ent->is_age = !!(fwd_entry & ARL_SRST_AGE_63XX); + ent->is_static = !!(fwd_entry & ARL_SRST_STATIC_63XX); + ent->is_valid = 1; +} + +static inline void b53_arl_read_entry(struct b53_device *dev, + struct b53_arl_entry *ent, u8 idx) +{ + dev->arl_ops->arl_read_entry(dev, ent, idx); +} + +static inline void b53_arl_write_entry(struct b53_device *dev, + const struct b53_arl_entry *ent, u8 idx) +{ + dev->arl_ops->arl_write_entry(dev, ent, idx); +} + +static inline void b53_arl_search_read(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + dev->arl_ops->arl_search_read(dev, idx, ent); } #ifdef CONFIG_BCM47XX diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8ce1ce72e938..54a278db67c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -119,6 +119,10 @@ #define B53_SWITCH_CTRL 0x22 #define B53_MII_DUMB_FWDG_EN BIT(6) +/* Protected Port Selection (16 bit) */ +#define B53_PROTECTED_PORT_SEL 0x24 +#define B53_PROTECTED_PORT_SEL_25 0x26 + /* (16 bit) */ #define B53_UC_FLOOD_MASK 0x32 #define B53_MC_FLOOD_MASK 0x34 @@ -325,11 +329,9 @@ #define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10) #define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 -#define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK 0xfff #define ARLTBL_DATA_PORT_ID_S_25 48 -#define ARLTBL_DATA_PORT_ID_MASK_25 0xf -#define ARLTBL_VID_S_65 53 +#define ARLTBL_DATA_PORT_ID_MASK_25 GENMASK_ULL(53, 48) #define ARLTBL_AGE_25 BIT_ULL(61) #define ARLTBL_STATIC_25 BIT_ULL(62) #define ARLTBL_VALID_25 BIT_ULL(63) @@ -342,12 +344,23 @@ #define ARLTBL_STATIC BIT(15) #define ARLTBL_VALID BIT(16) +/* BCM5389 ARL Table Data Entry N Register format (16 bit) */ +#define ARLTBL_DATA_PORT_ID_MASK_89 GENMASK(8, 0) +#define ARLTBL_TC_MASK_89 GENMASK(12, 10) +#define ARLTBL_AGE_89 BIT(13) +#define ARLTBL_STATIC_89 BIT(14) +#define ARLTBL_VALID_89 BIT(15) + +/* BCM5325/BCM565 ARL Table VID Entry N Registers (8 bit) */ +#define B53_ARLTBL_VID_ENTRY_25(n) ((0x2 * (n)) + 0x30) + /* Maximum number of bin entries in the ARL for all switches */ #define B53_ARLTBL_MAX_BIN_ENTRIES 4 /* ARL Search Control Register (8 bit) */ #define B53_ARL_SRCH_CTL 0x50 #define B53_ARL_SRCH_CTL_25 0x20 +#define B53_ARL_SRCH_CTL_89 0x30 #define ARL_SRCH_VLID BIT(0) #define ARL_SRCH_STDN BIT(7) @@ -355,22 +368,42 @@ #define B53_ARL_SRCH_ADDR 0x51 #define B53_ARL_SRCH_ADDR_25 0x22 #define B53_ARL_SRCH_ADDR_65 0x24 +#define B53_ARL_SRCH_ADDR_89 0x31 +#define B53_ARL_SRCH_ADDR_63XX 0x32 #define ARL_ADDR_MASK GENMASK(14, 0) /* ARL Search MAC/VID Result (64 bit) */ #define B53_ARL_SRCH_RSTL_0_MACVID 0x60 +#define B53_ARL_SRCH_RSLT_MACVID_89 0x33 +#define B53_ARL_SRCH_RSLT_MACVID_63XX 0x34 -/* Single register search result on 5325 */ +/* Single register search result on 5325/5365 */ #define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 -/* Single register search result on 5365 */ -#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30 +#define ARL_SRCH_RSLT_PORT_ID_S_25 48 +#define ARL_SRCH_RSLT_PORT_ID_MASK_25 GENMASK_ULL(52, 48) +#define ARL_SRCH_RSLT_VID_S_25 53 +#define ARL_SRCH_RSLT_VID_MASK_25 GENMASK_ULL(60, 53) + +/* BCM5325/5365 Search result extend register (8 bit) */ +#define B53_ARL_SRCH_RSLT_EXT_25 0x2c +#define ARL_SRCH_RSLT_EXT_MC_MII BIT(2) /* ARL Search Data Result (32 bit) */ #define B53_ARL_SRCH_RSTL_0 0x68 +/* BCM5389 ARL Search Data Result (16 bit) */ +#define B53_ARL_SRCH_RSLT_89 0x3b + #define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10)) #define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10)) +/* 63XX ARL Search Data Result (16 bit) */ +#define B53_ARL_SRCH_RSLT_63XX 0x3c +#define ARL_SRST_PORT_ID_MASK_63XX GENMASK(9, 1) +#define ARL_SRST_TC_MASK_63XX GENMASK(13, 11) +#define ARL_SRST_AGE_63XX BIT(14) +#define ARL_SRST_STATIC_63XX BIT(15) + /************************************************************************* * IEEE 802.1X Registers *************************************************************************/ diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 650d93226d9f..4a416f2717ba 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -441,11 +441,6 @@ out: static int __init dsa_loop_init(void) { - struct fixed_phy_status status = { - .link = 1, - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - }; unsigned int i; int ret; @@ -454,7 +449,7 @@ static int __init dsa_loop_init(void) return ret; for (i = 0; i < NUM_FIXED_PHYS; i++) - phydevs[i] = fixed_phy_register(&status, NULL); + phydevs[i] = fixed_phy_register_100fd(); ret = mdio_driver_register(&dsa_loop_drv); if (ret) { diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index e0b4758ca583..dd5f263ab984 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1926,6 +1926,8 @@ static const struct dsa_switch_ops hellcreek_ds_ops = { .port_vlan_filtering = hellcreek_vlan_filtering, .setup = hellcreek_setup, .teardown = hellcreek_teardown, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int hellcreek_probe(struct platform_device *pdev) diff --git a/drivers/net/dsa/ks8995.c b/drivers/net/dsa/ks8995.c index 5c4c83e00477..77d8b842693c 100644 --- a/drivers/net/dsa/ks8995.c +++ b/drivers/net/dsa/ks8995.c @@ -203,13 +203,13 @@ static const struct spi_device_id ks8995_id[] = { }; MODULE_DEVICE_TABLE(spi, ks8995_id); -static const struct of_device_id ks8895_spi_of_match[] = { +static const struct of_device_id ks8995_spi_of_match[] = { { .compatible = "micrel,ks8995" }, { .compatible = "micrel,ksz8864" }, { .compatible = "micrel,ksz8795" }, { }, }; -MODULE_DEVICE_TABLE(of, ks8895_spi_of_match); +MODULE_DEVICE_TABLE(of, ks8995_spi_of_match); static inline u8 get_chip_id(u8 val) { @@ -842,7 +842,7 @@ static void ks8995_remove(struct spi_device *spi) static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", - .of_match_table = ks8895_spi_of_match, + .of_match_table = ks8995_spi_of_match, }, .probe = ks8995_probe, .remove = ks8995_remove, diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig index 1cb053c823f7..4a9771be5d58 100644 --- a/drivers/net/dsa/lantiq/Kconfig +++ b/drivers/net/dsa/lantiq/Kconfig @@ -1,7 +1,24 @@ +config NET_DSA_LANTIQ_COMMON + tristate + select REGMAP + config NET_DSA_LANTIQ_GSWIP tristate "Lantiq / Intel GSWIP" depends on HAS_IOMEM select NET_DSA_TAG_GSWIP + select NET_DSA_LANTIQ_COMMON help This enables support for the Lantiq / Intel GSWIP 2.1 found in the xrx200 / VR9 SoC. + +config NET_DSA_MXL_GSW1XX + tristate "MaxLinear GSW1xx Ethernet switch support" + select NET_DSA_TAG_MXL_GSW1XX + select NET_DSA_LANTIQ_COMMON + help + This enables support for the MaxLinear GSW1xx family of 1GE switches + GSW120 4 port, 2 PHYs, RGMII & SGMII/2500Base-X + GSW125 4 port, 2 PHYs, RGMII & SGMII/2500Base-X, industrial temperature + GSW140 6 port, 4 PHYs, RGMII & SGMII/2500Base-X + GSW141 6 port, 4 PHYs, RGMII & SGMII + GSW145 6 port, 4 PHYs, RGMII & SGMII/2500Base-X, industrial temperature diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile index 849f85ebebd6..85fce605310b 100644 --- a/drivers/net/dsa/lantiq/Makefile +++ b/drivers/net/dsa/lantiq/Makefile @@ -1 +1,3 @@ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o +obj-$(CONFIG_NET_DSA_LANTIQ_COMMON) += lantiq_gswip_common.o +obj-$(CONFIG_NET_DSA_MXL_GSW1XX) += mxl-gsw1xx.o diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c index 2169c0814a48..57dd063c0740 100644 --- a/drivers/net/dsa/lantiq/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq/lantiq_gswip.c @@ -2,1282 +2,33 @@ /* * Lantiq / Intel GSWIP switch driver for VRX200, xRX300 and xRX330 SoCs * - * Copyright (C) 2010 Lantiq Deutschland - * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> - * - * The VLAN and bridge model the GSWIP hardware uses does not directly - * matches the model DSA uses. - * - * The hardware has 64 possible table entries for bridges with one VLAN - * ID, one flow id and a list of ports for each bridge. All entries which - * match the same flow ID are combined in the mac learning table, they - * act as one global bridge. - * The hardware does not support VLAN filter on the port, but on the - * bridge, this driver converts the DSA model to the hardware. - * - * The CPU gets all the exception frames which do not match any forwarding - * rule and the CPU port is also added to all bridges. This makes it possible - * to handle all the special cases easily in software. - * At the initialization the driver allocates one bridge table entry for - * each switch port which is used when the port is used without an - * explicit bridge. This prevents the frames from being forwarded - * between all LAN ports by default. + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland */ #include "lantiq_gswip.h" #include "lantiq_pce.h" +#include <linux/clk.h> #include <linux/delay.h> -#include <linux/etherdevice.h> #include <linux/firmware.h> -#include <linux/if_bridge.h> -#include <linux/if_vlan.h> -#include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/module.h> -#include <linux/of_mdio.h> -#include <linux/of_net.h> #include <linux/of_platform.h> -#include <linux/phy.h> -#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> #include <dt-bindings/mips/lantiq_rcu_gphy.h> +#include <net/dsa.h> + struct xway_gphy_match_data { char *fe_firmware_name; char *ge_firmware_name; }; -struct gswip_pce_table_entry { - u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index - u16 table; // PCE_TBL_CTRL.ADDR = pData->table - u16 key[8]; - u16 val[5]; - u16 mask; - u8 gmap; - bool type; - bool valid; - bool key_mode; -}; - -struct gswip_rmon_cnt_desc { - unsigned int size; - unsigned int offset; - const char *name; -}; - -#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} - -static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { - /** Receive Packet Count (only packets that are accepted and not discarded). */ - MIB_DESC(1, 0x1F, "RxGoodPkts"), - MIB_DESC(1, 0x23, "RxUnicastPkts"), - MIB_DESC(1, 0x22, "RxMulticastPkts"), - MIB_DESC(1, 0x21, "RxFCSErrorPkts"), - MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), - MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), - MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), - MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), - MIB_DESC(1, 0x20, "RxGoodPausePkts"), - MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), - MIB_DESC(1, 0x12, "Rx64BytePkts"), - MIB_DESC(1, 0x13, "Rx127BytePkts"), - MIB_DESC(1, 0x14, "Rx255BytePkts"), - MIB_DESC(1, 0x15, "Rx511BytePkts"), - MIB_DESC(1, 0x16, "Rx1023BytePkts"), - /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ - MIB_DESC(1, 0x17, "RxMaxBytePkts"), - MIB_DESC(1, 0x18, "RxDroppedPkts"), - MIB_DESC(1, 0x19, "RxFilteredPkts"), - MIB_DESC(2, 0x24, "RxGoodBytes"), - MIB_DESC(2, 0x26, "RxBadBytes"), - MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), - MIB_DESC(1, 0x0C, "TxGoodPkts"), - MIB_DESC(1, 0x06, "TxUnicastPkts"), - MIB_DESC(1, 0x07, "TxMulticastPkts"), - MIB_DESC(1, 0x00, "Tx64BytePkts"), - MIB_DESC(1, 0x01, "Tx127BytePkts"), - MIB_DESC(1, 0x02, "Tx255BytePkts"), - MIB_DESC(1, 0x03, "Tx511BytePkts"), - MIB_DESC(1, 0x04, "Tx1023BytePkts"), - /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ - MIB_DESC(1, 0x05, "TxMaxBytePkts"), - MIB_DESC(1, 0x08, "TxSingleCollCount"), - MIB_DESC(1, 0x09, "TxMultCollCount"), - MIB_DESC(1, 0x0A, "TxLateCollCount"), - MIB_DESC(1, 0x0B, "TxExcessCollCount"), - MIB_DESC(1, 0x0D, "TxPauseCount"), - MIB_DESC(1, 0x10, "TxDroppedPkts"), - MIB_DESC(2, 0x0E, "TxGoodBytes"), -}; - -static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->gswip + (offset * 4)); -} - -static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->gswip + (offset * 4)); -} - -static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_switch_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_switch_w(priv, val, offset); -} - -static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, - u32 cleared) -{ - u32 val; - - return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, - (val & cleared) == 0, 20, 50000); -} - -static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->mdio + (offset * 4)); -} - -static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->mdio + (offset * 4)); -} - -static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_mdio_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_mdio_w(priv, val, offset); -} - -static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) -{ - return __raw_readl(priv->mii + (offset * 4)); -} - -static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) -{ - __raw_writel(val, priv->mii + (offset * 4)); -} - -static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, - u32 offset) -{ - u32 val = gswip_mii_r(priv, offset); - - val &= ~(clear); - val |= set; - gswip_mii_w(priv, val, offset); -} - -static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, - int port) -{ - int reg_port; - - /* MII_CFG register only exists for MII ports */ - if (!(priv->hw_info->mii_ports & BIT(port))) - return; - - reg_port = port + priv->hw_info->mii_port_reg_offset; - - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(reg_port)); -} - -static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, - int port) -{ - int reg_port; - - /* MII_PCDU register only exists for MII ports */ - if (!(priv->hw_info->mii_ports & BIT(port))) - return; - - reg_port = port + priv->hw_info->mii_port_reg_offset; - - switch (reg_port) { - case 0: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); - break; - case 1: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); - break; - case 5: - gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); - break; - } -} - -static int gswip_mdio_poll(struct gswip_priv *priv) -{ - int cnt = 100; - - while (likely(cnt--)) { - u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); - - if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) - return 0; - usleep_range(20, 40); - } - - return -ETIMEDOUT; -} - -static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) -{ - struct gswip_priv *priv = bus->priv; - int err; - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); - gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | - ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | - (reg & GSWIP_MDIO_CTRL_REGAD_MASK), - GSWIP_MDIO_CTRL); - - return 0; -} - -static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) -{ - struct gswip_priv *priv = bus->priv; - int err; - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | - ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | - (reg & GSWIP_MDIO_CTRL_REGAD_MASK), - GSWIP_MDIO_CTRL); - - err = gswip_mdio_poll(priv); - if (err) { - dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); - return err; - } - - return gswip_mdio_r(priv, GSWIP_MDIO_READ); -} - -static int gswip_mdio(struct gswip_priv *priv) -{ - struct device_node *mdio_np, *switch_np = priv->dev->of_node; - struct device *dev = priv->dev; - struct mii_bus *bus; - int err = 0; - - mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio"); - if (!mdio_np) - mdio_np = of_get_child_by_name(switch_np, "mdio"); - - if (!of_device_is_available(mdio_np)) - goto out_put_node; - - bus = devm_mdiobus_alloc(dev); - if (!bus) { - err = -ENOMEM; - goto out_put_node; - } - - bus->priv = priv; - bus->read = gswip_mdio_rd; - bus->write = gswip_mdio_wr; - bus->name = "lantiq,xrx200-mdio"; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); - bus->parent = priv->dev; - - err = devm_of_mdiobus_register(dev, bus, mdio_np); - -out_put_node: - of_node_put(mdio_np); - - return err; -} - -static int gswip_pce_table_entry_read(struct gswip_priv *priv, - struct gswip_pce_table_entry *tbl) -{ - int i; - int err; - u16 crtl; - u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : - GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; - - mutex_lock(&priv->pce_table_lock); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS, - GSWIP_PCE_TBL_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - for (i = 0; i < ARRAY_SIZE(tbl->key); i++) - tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i)); - - for (i = 0; i < ARRAY_SIZE(tbl->val); i++) - tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i)); - - tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK); - - crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); - - tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); - tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); - tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; - - mutex_unlock(&priv->pce_table_lock); - - return 0; -} - -static int gswip_pce_table_entry_write(struct gswip_priv *priv, - struct gswip_pce_table_entry *tbl) -{ - int i; - int err; - u16 crtl; - u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : - GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; - - mutex_lock(&priv->pce_table_lock); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) { - mutex_unlock(&priv->pce_table_lock); - return err; - } - - gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR); - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode, - GSWIP_PCE_TBL_CTRL); - - for (i = 0; i < ARRAY_SIZE(tbl->key); i++) - gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i)); - - for (i = 0; i < ARRAY_SIZE(tbl->val); i++) - gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i)); - - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - tbl->table | addr_mode, - GSWIP_PCE_TBL_CTRL); - - gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK); - - crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL); - crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | - GSWIP_PCE_TBL_CTRL_GMAP_MASK); - if (tbl->type) - crtl |= GSWIP_PCE_TBL_CTRL_TYPE; - if (tbl->valid) - crtl |= GSWIP_PCE_TBL_CTRL_VLD; - crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; - crtl |= GSWIP_PCE_TBL_CTRL_BAS; - gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - - mutex_unlock(&priv->pce_table_lock); - - return err; -} - -/* Add the LAN port into a bridge with the CPU port by - * default. This prevents automatic forwarding of - * packages between the LAN ports when no explicit - * bridge is configured. - */ -static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - struct gswip_pce_table_entry vlan_mapping = {0,}; - int err; - - vlan_active.index = port + 1; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.key[0] = 0; /* vid */ - vlan_active.val[0] = port + 1 /* fid */; - vlan_active.valid = add; - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", err); - return err; - } - - if (!add) - return 0; - - vlan_mapping.index = port + 1; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - vlan_mapping.val[0] = 0 /* vid */; - vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds); - vlan_mapping.val[2] = 0; - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - return err; - } - - return 0; -} - -static int gswip_port_setup(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - int err; - - if (!dsa_is_cpu_port(ds, port)) { - err = gswip_add_single_port_br(priv, port, true); - if (err) - return err; - } - - return 0; -} - -static int gswip_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phydev) -{ - struct gswip_priv *priv = ds->priv; - - if (!dsa_is_cpu_port(ds, port)) { - u32 mdio_phy = 0; - - if (phydev) - mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); - } - - /* RMON Counter Enable for port */ - gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); - - /* enable port fetch/store dma & VLAN Modification */ - gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | - GSWIP_FDMA_PCTRL_VLANMOD_BOTH, - GSWIP_FDMA_PCTRLp(port)); - gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, - GSWIP_SDMA_PCTRLp(port)); - - return 0; -} - -static void gswip_port_disable(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, - GSWIP_FDMA_PCTRLp(port)); - gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, - GSWIP_SDMA_PCTRLp(port)); -} - -static int gswip_pce_load_microcode(struct gswip_priv *priv) -{ - int i; - int err; - - gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | - GSWIP_PCE_TBL_CTRL_OPMOD_MASK, - GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); - gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); - - for (i = 0; i < priv->hw_info->pce_microcode_size; i++) { - gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_0, - GSWIP_PCE_TBL_VAL(0)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_1, - GSWIP_PCE_TBL_VAL(1)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_2, - GSWIP_PCE_TBL_VAL(2)); - gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_3, - GSWIP_PCE_TBL_VAL(3)); - - /* start the table access: */ - gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, - GSWIP_PCE_TBL_CTRL); - err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, - GSWIP_PCE_TBL_CTRL_BAS); - if (err) - return err; - } - - /* tell the switch that the microcode is loaded */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, - GSWIP_PCE_GCTRL_0); - - return 0; -} - -static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - - /* Do not allow changing the VLAN filtering options while in bridge */ - if (bridge && !!(priv->port_vlan_filter & BIT(port)) != vlan_filtering) { - NL_SET_ERR_MSG_MOD(extack, - "Dynamic toggling of vlan_filtering not supported"); - return -EIO; - } - - if (vlan_filtering) { - /* Use tag based VLAN */ - gswip_switch_mask(priv, - GSWIP_PCE_VCTRL_VSR, - GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | - GSWIP_PCE_VCTRL_VEMR, - GSWIP_PCE_VCTRL(port)); - gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, - GSWIP_PCE_PCTRL_0p(port)); - } else { - /* Use port based VLAN */ - gswip_switch_mask(priv, - GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | - GSWIP_PCE_VCTRL_VEMR, - GSWIP_PCE_VCTRL_VSR, - GSWIP_PCE_VCTRL(port)); - gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM, - GSWIP_PCE_PCTRL_0p(port)); - } - - return 0; -} - -static int gswip_setup(struct dsa_switch *ds) -{ - unsigned int cpu_ports = dsa_cpu_ports(ds); - struct gswip_priv *priv = ds->priv; - struct dsa_port *cpu_dp; - int err, i; - - gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); - usleep_range(5000, 10000); - gswip_switch_w(priv, 0, GSWIP_SWRES); - - /* disable port fetch/store dma on all ports */ - for (i = 0; i < priv->hw_info->max_ports; i++) { - gswip_port_disable(ds, i); - gswip_port_vlan_filtering(ds, i, false, NULL); - } - - /* enable Switch */ - gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); - - err = gswip_pce_load_microcode(priv); - if (err) { - dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); - return err; - } - - /* Default unknown Broadcast/Multicast/Unicast port maps */ - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP1); - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP2); - gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP3); - - /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an - * interoperability problem with this auto polling mechanism because - * their status registers think that the link is in a different state - * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set - * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the - * auto polling state machine consider the link being negotiated with - * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads - * to the switch port being completely dead (RX and TX are both not - * working). - * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F - * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes - * it would work fine for a few minutes to hours and then stop, on - * other device it would no traffic could be sent or received at all. - * Testing shows that when PHY auto polling is disabled these problems - * go away. - */ - gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); - - /* Configure the MDIO Clock 2.5 MHz */ - gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); - - /* bring up the mdio bus */ - err = gswip_mdio(priv); - if (err) { - dev_err(priv->dev, "mdio bus setup failed\n"); - return err; - } - - /* Disable the xMII interface and clear it's isolation bit */ - for (i = 0; i < priv->hw_info->max_ports; i++) - gswip_mii_mask_cfg(priv, - GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, - 0, i); - - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - /* enable special tag insertion on cpu port */ - gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, - GSWIP_FDMA_PCTRLp(cpu_dp->index)); - - /* accept special tag in ingress direction */ - gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, - GSWIP_PCE_PCTRL_0p(cpu_dp->index)); - } - - gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, - GSWIP_BM_QUEUE_GCTRL); - - /* VLAN aware Switching */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); - - /* Flush MAC Table */ - gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0); - - err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, - GSWIP_PCE_GCTRL_0_MTFL); - if (err) { - dev_err(priv->dev, "MAC flushing didn't finish\n"); - return err; - } - - ds->mtu_enforcement_ingress = true; - - ds->configure_vlan_while_not_filtering = false; - - return 0; -} - -static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, - int port, - enum dsa_tag_protocol mp) -{ - struct gswip_priv *priv = ds->priv; - - return priv->hw_info->tag_protocol; -} - -static int gswip_vlan_active_create(struct gswip_priv *priv, - struct net_device *bridge, - int fid, u16 vid) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int idx = -1; - int err; - int i; - - /* Look for a free slot */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (!priv->vlans[i].bridge) { - idx = i; - break; - } - } - - if (idx == -1) - return -ENOSPC; - - if (fid == -1) - fid = idx; - - vlan_active.index = idx; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.key[0] = vid; - vlan_active.val[0] = fid; - vlan_active.valid = true; - - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", err); - return err; - } - - priv->vlans[idx].bridge = bridge; - priv->vlans[idx].vid = vid; - priv->vlans[idx].fid = fid; - - return idx; -} - -static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) -{ - struct gswip_pce_table_entry vlan_active = {0,}; - int err; - - vlan_active.index = idx; - vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; - vlan_active.valid = false; - err = gswip_pce_table_entry_write(priv, &vlan_active); - if (err) - dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); - priv->vlans[idx].bridge = NULL; - - return err; -} - -static int gswip_vlan_add_unaware(struct gswip_priv *priv, - struct net_device *bridge, int port) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - bool active_vlan_created = false; - int idx = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - idx = i; - break; - } - } - - /* If this bridge is not programmed yet, add a Active VLAN table - * entry in a free slot and prepare the VLAN mapping table entry. - */ - if (idx == -1) { - idx = gswip_vlan_active_create(priv, bridge, -1, 0); - if (idx < 0) - return idx; - active_vlan_created = true; - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - /* VLAN ID byte, maps to the VLAN ID of vlan active table */ - vlan_mapping.val[0] = 0; - } else { - /* Read the existing VLAN mapping entry from the switch */ - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", - err); - return err; - } - } - - /* Update the VLAN mapping entry and write it to the switch */ - vlan_mapping.val[1] |= dsa_cpu_ports(priv->ds); - vlan_mapping.val[1] |= BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - /* In case an Active VLAN was creaetd delete it again */ - if (active_vlan_created) - gswip_vlan_active_remove(priv, idx); - return err; - } - - gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); - return 0; -} - -static int gswip_vlan_add_aware(struct gswip_priv *priv, - struct net_device *bridge, int port, - u16 vid, bool untagged, - bool pvid) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - unsigned int cpu_ports = dsa_cpu_ports(priv->ds); - bool active_vlan_created = false; - int idx = -1; - int fid = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - if (fid != -1 && fid != priv->vlans[i].fid) - dev_err(priv->dev, "one bridge with multiple flow ids\n"); - fid = priv->vlans[i].fid; - if (priv->vlans[i].vid == vid) { - idx = i; - break; - } - } - } - - /* If this bridge is not programmed yet, add a Active VLAN table - * entry in a free slot and prepare the VLAN mapping table entry. - */ - if (idx == -1) { - idx = gswip_vlan_active_create(priv, bridge, fid, vid); - if (idx < 0) - return idx; - active_vlan_created = true; - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - /* VLAN ID byte, maps to the VLAN ID of vlan active table */ - vlan_mapping.val[0] = vid; - } else { - /* Read the existing VLAN mapping entry from the switch */ - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", - err); - return err; - } - } - - vlan_mapping.val[0] = vid; - /* Update the VLAN mapping entry and write it to the switch */ - vlan_mapping.val[1] |= cpu_ports; - vlan_mapping.val[2] |= cpu_ports; - vlan_mapping.val[1] |= BIT(port); - if (untagged) - vlan_mapping.val[2] &= ~BIT(port); - else - vlan_mapping.val[2] |= BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - /* In case an Active VLAN was creaetd delete it again */ - if (active_vlan_created) - gswip_vlan_active_remove(priv, idx); - return err; - } - - if (pvid) - gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port)); - - return 0; -} - -static int gswip_vlan_remove(struct gswip_priv *priv, - struct net_device *bridge, int port, - u16 vid, bool pvid, bool vlan_aware) -{ - struct gswip_pce_table_entry vlan_mapping = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int idx = -1; - int i; - int err; - - /* Check if there is already a page for this bridge */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge && - (!vlan_aware || priv->vlans[i].vid == vid)) { - idx = i; - break; - } - } - - if (idx == -1) { - dev_err(priv->dev, "bridge to leave does not exists\n"); - return -ENOENT; - } - - vlan_mapping.index = idx; - vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; - err = gswip_pce_table_entry_read(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); - return err; - } - - vlan_mapping.val[1] &= ~BIT(port); - vlan_mapping.val[2] &= ~BIT(port); - err = gswip_pce_table_entry_write(priv, &vlan_mapping); - if (err) { - dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); - return err; - } - - /* In case all ports are removed from the bridge, remove the VLAN */ - if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) { - err = gswip_vlan_active_remove(priv, idx); - if (err) { - dev_err(priv->dev, "failed to write active VLAN: %d\n", - err); - return err; - } - } - - /* GSWIP 2.2 (GRX300) and later program here the VID directly. */ - if (pvid) - gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port)); - - return 0; -} - -static int gswip_port_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge, - bool *tx_fwd_offload, - struct netlink_ext_ack *extack) -{ - struct net_device *br = bridge.dev; - struct gswip_priv *priv = ds->priv; - int err; - - /* When the bridge uses VLAN filtering we have to configure VLAN - * specific bridges. No bridge is configured here. - */ - if (!br_vlan_enabled(br)) { - err = gswip_vlan_add_unaware(priv, br, port); - if (err) - return err; - priv->port_vlan_filter &= ~BIT(port); - } else { - priv->port_vlan_filter |= BIT(port); - } - return gswip_add_single_port_br(priv, port, false); -} - -static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, - struct dsa_bridge bridge) -{ - struct net_device *br = bridge.dev; - struct gswip_priv *priv = ds->priv; - - gswip_add_single_port_br(priv, port, true); - - /* When the bridge uses VLAN filtering we have to configure VLAN - * specific bridges. No bridge is configured here. - */ - if (!br_vlan_enabled(br)) - gswip_vlan_remove(priv, br, port, 0, true, false); -} - -static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - unsigned int max_ports = priv->hw_info->max_ports; - int pos = max_ports; - int i, idx = -1; - - /* We only support VLAN filtering on bridges */ - if (!dsa_is_cpu_port(ds, port) && !bridge) - return -EOPNOTSUPP; - - /* Check if there is already a page for this VLAN */ - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge && - priv->vlans[i].vid == vlan->vid) { - idx = i; - break; - } - } - - /* If this VLAN is not programmed yet, we have to reserve - * one entry in the VLAN table. Make sure we start at the - * next position round. - */ - if (idx == -1) { - /* Look for a free slot */ - for (; pos < ARRAY_SIZE(priv->vlans); pos++) { - if (!priv->vlans[pos].bridge) { - idx = pos; - pos++; - break; - } - } - - if (idx == -1) { - NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table"); - return -ENOSPC; - } - } - - return 0; -} - -static int gswip_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - int err; - - err = gswip_port_vlan_prepare(ds, port, vlan, extack); - if (err) - return err; - - /* We have to receive all packets on the CPU port and should not - * do any VLAN filtering here. This is also called with bridge - * NULL and then we do not know for which bridge to configure - * this. - */ - if (dsa_is_cpu_port(ds, port)) - return 0; - - return gswip_vlan_add_aware(priv, bridge, port, vlan->vid, - untagged, pvid); -} - -static int gswip_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - - /* We have to receive all packets on the CPU port and should not - * do any VLAN filtering here. This is also called with bridge - * NULL and then we do not know for which bridge to configure - * this. - */ - if (dsa_is_cpu_port(ds, port)) - return 0; - - return gswip_vlan_remove(priv, bridge, port, vlan->vid, pvid, true); -} - -static void gswip_port_fast_age(struct dsa_switch *ds, int port) -{ - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - int i; - int err; - - for (i = 0; i < 2048; i++) { - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.index = i; - - err = gswip_pce_table_entry_read(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, "failed to read mac bridge: %d\n", - err); - return; - } - - if (!mac_bridge.valid) - continue; - - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) - continue; - - if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, - mac_bridge.val[0])) - continue; - - mac_bridge.valid = false; - err = gswip_pce_table_entry_write(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, "failed to write mac bridge: %d\n", - err); - return; - } - } -} - -static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -{ - struct gswip_priv *priv = ds->priv; - u32 stp_state; - - switch (state) { - case BR_STATE_DISABLED: - gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, - GSWIP_SDMA_PCTRLp(port)); - return; - case BR_STATE_BLOCKING: - case BR_STATE_LISTENING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; - break; - case BR_STATE_LEARNING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; - break; - case BR_STATE_FORWARDING: - stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; - break; - default: - dev_err(priv->dev, "invalid STP state: %d\n", state); - return; - } - - gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, - GSWIP_SDMA_PCTRLp(port)); - gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state, - GSWIP_PCE_PCTRL_0p(port)); -} - -static int gswip_port_fdb(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, bool add) -{ - struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned int max_ports = priv->hw_info->max_ports; - int fid = -1; - int i; - int err; - - /* Operation not supported on the CPU port, don't throw errors */ - if (!bridge) - return 0; - - for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { - if (priv->vlans[i].bridge == bridge) { - fid = priv->vlans[i].fid; - break; - } - } - - if (fid == -1) { - dev_err(priv->dev, "no FID found for bridge %s\n", - bridge->name); - return -EINVAL; - } - - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.key_mode = true; - mac_bridge.key[0] = addr[5] | (addr[4] << 8); - mac_bridge.key[1] = addr[3] | (addr[2] << 8); - mac_bridge.key[2] = addr[1] | (addr[0] << 8); - mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); - mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ - mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; - mac_bridge.valid = add; - - err = gswip_pce_table_entry_write(priv, &mac_bridge); - if (err) - dev_err(priv->dev, "failed to write mac bridge: %d\n", err); - - return err; -} - -static int gswip_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - return gswip_port_fdb(ds, port, addr, vid, true); -} - -static int gswip_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - return gswip_port_fdb(ds, port, addr, vid, false); -} - -static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, - dsa_fdb_dump_cb_t *cb, void *data) -{ - struct gswip_priv *priv = ds->priv; - struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned char addr[ETH_ALEN]; - int i; - int err; - - for (i = 0; i < 2048; i++) { - mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; - mac_bridge.index = i; - - err = gswip_pce_table_entry_read(priv, &mac_bridge); - if (err) { - dev_err(priv->dev, - "failed to read mac bridge entry %d: %d\n", - i, err); - return err; - } - - if (!mac_bridge.valid) - continue; - - addr[5] = mac_bridge.key[0] & 0xff; - addr[4] = (mac_bridge.key[0] >> 8) & 0xff; - addr[3] = mac_bridge.key[1] & 0xff; - addr[2] = (mac_bridge.key[1] >> 8) & 0xff; - addr[1] = mac_bridge.key[2] & 0xff; - addr[0] = (mac_bridge.key[2] >> 8) & 0xff; - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { - if (mac_bridge.val[0] & BIT(port)) { - err = cb(addr, 0, true, data); - if (err) - return err; - } - } else { - if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, - mac_bridge.val[0])) { - err = cb(addr, 0, false, data); - if (err) - return err; - } - } - } - return 0; -} - -static int gswip_port_max_mtu(struct dsa_switch *ds, int port) -{ - /* Includes 8 bytes for special header. */ - return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; -} - -static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) -{ - struct gswip_priv *priv = ds->priv; - - /* CPU port always has maximum mtu of user ports, so use it to set - * switch frame size, including 8 byte special header. - */ - if (dsa_is_cpu_port(ds, port)) { - new_mtu += 8; - gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, - GSWIP_MAC_FLEN); - } - - /* Enable MLEN for ports with non-standard MTUs, including the special - * header on the CPU port added above. - */ - if (new_mtu != ETH_DATA_LEN) - gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, - GSWIP_MAC_CTRL_2p(port)); - else - gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0, - GSWIP_MAC_CTRL_2p(port)); - - return 0; -} - static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { @@ -1346,327 +97,6 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, MAC_10 | MAC_100 | MAC_1000; } -static void gswip_phylink_get_caps(struct dsa_switch *ds, int port, - struct phylink_config *config) -{ - struct gswip_priv *priv = ds->priv; - - priv->hw_info->phylink_get_caps(ds, port, config); -} - -static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) -{ - u32 mdio_phy; - - if (link) - mdio_phy = GSWIP_MDIO_PHY_LINK_UP; - else - mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); -} - -static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, - phy_interface_t interface) -{ - u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; - - switch (speed) { - case SPEED_10: - mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; - - if (interface == PHY_INTERFACE_MODE_RMII) - mii_cfg = GSWIP_MII_CFG_RATE_M50; - else - mii_cfg = GSWIP_MII_CFG_RATE_M2P5; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; - break; - - case SPEED_100: - mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; - - if (interface == PHY_INTERFACE_MODE_RMII) - mii_cfg = GSWIP_MII_CFG_RATE_M50; - else - mii_cfg = GSWIP_MII_CFG_RATE_M25; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; - break; - - case SPEED_1000: - mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; - - mii_cfg = GSWIP_MII_CFG_RATE_M125; - - mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; - break; - } - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0, - GSWIP_MAC_CTRL_0p(port)); -} - -static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) -{ - u32 mac_ctrl_0, mdio_phy; - - if (duplex == DUPLEX_FULL) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; - mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; - } else { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; - mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; - } - - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0, - GSWIP_MAC_CTRL_0p(port)); - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); -} - -static void gswip_port_set_pause(struct gswip_priv *priv, int port, - bool tx_pause, bool rx_pause) -{ - u32 mac_ctrl_0, mdio_phy; - - if (tx_pause && rx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | - GSWIP_MDIO_PHY_FCONRX_EN; - } else if (tx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | - GSWIP_MDIO_PHY_FCONRX_DIS; - } else if (rx_pause) { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | - GSWIP_MDIO_PHY_FCONRX_EN; - } else { - mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; - mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | - GSWIP_MDIO_PHY_FCONRX_DIS; - } - - gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK, - mac_ctrl_0, GSWIP_MAC_CTRL_0p(port)); - gswip_mdio_mask(priv, - GSWIP_MDIO_PHY_FCONTX_MASK | - GSWIP_MDIO_PHY_FCONRX_MASK, - mdio_phy, GSWIP_MDIO_PHYp(port)); -} - -static void gswip_phylink_mac_config(struct phylink_config *config, - unsigned int mode, - const struct phylink_link_state *state) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - u32 miicfg = 0; - - miicfg |= GSWIP_MII_CFG_LDCLKDIS; - - switch (state->interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_1000BASEX: - case PHY_INTERFACE_MODE_2500BASEX: - return; - case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_INTERNAL: - miicfg |= GSWIP_MII_CFG_MODE_MIIM; - break; - case PHY_INTERFACE_MODE_REVMII: - miicfg |= GSWIP_MII_CFG_MODE_MIIP; - break; - case PHY_INTERFACE_MODE_RMII: - miicfg |= GSWIP_MII_CFG_MODE_RMIIM; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - miicfg |= GSWIP_MII_CFG_MODE_RGMII; - break; - case PHY_INTERFACE_MODE_GMII: - miicfg |= GSWIP_MII_CFG_MODE_GMII; - break; - default: - dev_err(dp->ds->dev, - "Unsupported interface: %d\n", state->interface); - return; - } - - gswip_mii_mask_cfg(priv, - GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | - GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, - miicfg, port); - - switch (state->interface) { - case PHY_INTERFACE_MODE_RGMII_ID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | - GSWIP_MII_PCDU_RXDLY_MASK, 0, port); - break; - case PHY_INTERFACE_MODE_RGMII_RXID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); - break; - case PHY_INTERFACE_MODE_RGMII_TXID: - gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); - break; - default: - break; - } -} - -static void gswip_phylink_mac_link_down(struct phylink_config *config, - unsigned int mode, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); - - if (!dsa_port_is_cpu(dp)) - gswip_port_set_link(priv, port, false); -} - -static void gswip_phylink_mac_link_up(struct phylink_config *config, - struct phy_device *phydev, - unsigned int mode, - phy_interface_t interface, - int speed, int duplex, - bool tx_pause, bool rx_pause) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - int port = dp->index; - - if (!dsa_port_is_cpu(dp)) { - gswip_port_set_link(priv, port, true); - gswip_port_set_speed(priv, port, speed, interface); - gswip_port_set_duplex(priv, port, duplex); - gswip_port_set_pause(priv, port, tx_pause, rx_pause); - } - - gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); -} - -static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, - uint8_t *data) -{ - int i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) - ethtool_puts(&data, gswip_rmon_cnt[i].name); -} - -static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, - u32 index) -{ - u32 result; - int err; - - gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); - gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | - GSWIP_BM_RAM_CTRL_OPMOD, - table | GSWIP_BM_RAM_CTRL_BAS, - GSWIP_BM_RAM_CTRL); - - err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, - GSWIP_BM_RAM_CTRL_BAS); - if (err) { - dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", - table, index); - return 0; - } - - result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); - result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; - - return result; -} - -static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *data) -{ - struct gswip_priv *priv = ds->priv; - const struct gswip_rmon_cnt_desc *rmon_cnt; - int i; - u64 high; - - for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { - rmon_cnt = &gswip_rmon_cnt[i]; - - data[i] = gswip_bcm_ram_entry_read(priv, port, - rmon_cnt->offset); - if (rmon_cnt->size == 2) { - high = gswip_bcm_ram_entry_read(priv, port, - rmon_cnt->offset + 1); - data[i] |= high << 32; - } - } -} - -static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) -{ - if (sset != ETH_SS_STATS) - return 0; - - return ARRAY_SIZE(gswip_rmon_cnt); -} - -static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct gswip_priv *priv = dp->ds->priv; - - if (priv->hw_info->mac_select_pcs) - return priv->hw_info->mac_select_pcs(config, interface); - - return NULL; -} - -static const struct phylink_mac_ops gswip_phylink_mac_ops = { - .mac_config = gswip_phylink_mac_config, - .mac_link_down = gswip_phylink_mac_link_down, - .mac_link_up = gswip_phylink_mac_link_up, - .mac_select_pcs = gswip_phylink_mac_select_pcs, -}; - -static const struct dsa_switch_ops gswip_switch_ops = { - .get_tag_protocol = gswip_get_tag_protocol, - .setup = gswip_setup, - .port_setup = gswip_port_setup, - .port_enable = gswip_port_enable, - .port_disable = gswip_port_disable, - .port_bridge_join = gswip_port_bridge_join, - .port_bridge_leave = gswip_port_bridge_leave, - .port_fast_age = gswip_port_fast_age, - .port_vlan_filtering = gswip_port_vlan_filtering, - .port_vlan_add = gswip_port_vlan_add, - .port_vlan_del = gswip_port_vlan_del, - .port_stp_state_set = gswip_port_stp_state_set, - .port_fdb_add = gswip_port_fdb_add, - .port_fdb_del = gswip_port_fdb_del, - .port_fdb_dump = gswip_port_fdb_dump, - .port_change_mtu = gswip_port_change_mtu, - .port_max_mtu = gswip_port_max_mtu, - .phylink_get_caps = gswip_phylink_get_caps, - .get_strings = gswip_get_strings, - .get_ethtool_stats = gswip_get_ethtool_stats, - .get_sset_count = gswip_get_sset_count, -}; - static const struct xway_gphy_match_data xrx200a1x_gphy_data = { .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", @@ -1887,33 +317,37 @@ remove_gphy: return err; } -static int gswip_validate_cpu_port(struct dsa_switch *ds) -{ - struct gswip_priv *priv = ds->priv; - struct dsa_port *cpu_dp; - int cpu_port = -1; - - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - if (cpu_port != -1) - return dev_err_probe(ds->dev, -EINVAL, - "only a single CPU port is supported\n"); - - cpu_port = cpu_dp->index; - } - - if (cpu_port == -1) - return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n"); +static const struct regmap_config sw_regmap_config = { + .name = "switch", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_SDMA_PCTRLp(6), +}; - if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports) - return dev_err_probe(ds->dev, -EINVAL, - "unsupported CPU port defined\n"); +static const struct regmap_config mdio_regmap_config = { + .name = "mdio", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_MDIO_PHYp(0), +}; - return 0; -} +static const struct regmap_config mii_regmap_config = { + .name = "mii", + .reg_bits = 32, + .val_bits = 32, + .reg_shift = REGMAP_UPSHIFT(2), + .val_format_endian = REGMAP_ENDIAN_NATIVE, + .max_register = GSWIP_MII_CFGp(6), +}; static int gswip_probe(struct platform_device *pdev) { struct device_node *np, *gphy_fw_np; + __iomem void *gswip, *mdio, *mii; struct device *dev = &pdev->dev; struct gswip_priv *priv; int err; @@ -1924,15 +358,27 @@ static int gswip_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->gswip = devm_platform_ioremap_resource(pdev, 0); + gswip = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(gswip)) + return PTR_ERR(gswip); + + mdio = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mdio)) + return PTR_ERR(mdio); + + mii = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(mii)) + return PTR_ERR(mii); + + priv->gswip = devm_regmap_init_mmio(dev, gswip, &sw_regmap_config); if (IS_ERR(priv->gswip)) return PTR_ERR(priv->gswip); - priv->mdio = devm_platform_ioremap_resource(pdev, 1); + priv->mdio = devm_regmap_init_mmio(dev, mdio, &mdio_regmap_config); if (IS_ERR(priv->mdio)) return PTR_ERR(priv->mdio); - priv->mii = devm_platform_ioremap_resource(pdev, 2); + priv->mii = devm_regmap_init_mmio(dev, mii, &mii_regmap_config); if (IS_ERR(priv->mii)) return PTR_ERR(priv->mii); @@ -1944,24 +390,9 @@ static int gswip_probe(struct platform_device *pdev) if (!priv->ds) return -ENOMEM; - priv->ds->dev = dev; - priv->ds->num_ports = priv->hw_info->max_ports; - priv->ds->priv = priv; - priv->ds->ops = &gswip_switch_ops; - priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; priv->dev = dev; - mutex_init(&priv->pce_table_lock); - version = gswip_switch_r(priv, GSWIP_VERSION); - - /* The hardware has the 'major/minor' version bytes in the wrong order - * preventing numerical comparisons. Construct a 16-bit unsigned integer - * having the REV field as most significant byte and the MOD field as - * least significant byte. This is effectively swapping the two bytes of - * the version variable, but other than using swab16 it doesn't affect - * the source variable. - */ - priv->version = GSWIP_VERSION_REV(version) << 8 | - GSWIP_VERSION_MOD(version); + + regmap_read(priv->gswip, GSWIP_VERSION, &version); np = dev->of_node; switch (version) { @@ -1991,25 +422,14 @@ static int gswip_probe(struct platform_device *pdev) "gphy fw probe failed\n"); } - err = dsa_register_switch(priv->ds); - if (err) { - dev_err_probe(dev, err, "dsa switch registration failed\n"); - goto gphy_fw_remove; - } - - err = gswip_validate_cpu_port(priv->ds); + err = gswip_probe_common(priv, version); if (err) - goto disable_switch; + goto gphy_fw_remove; platform_set_drvdata(pdev, priv); - dev_info(dev, "probed GSWIP version %lx mod %lx\n", - GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version)); return 0; -disable_switch: - gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); - dsa_unregister_switch(priv->ds); gphy_fw_remove: for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); @@ -2025,7 +445,7 @@ static void gswip_remove(struct platform_device *pdev) return; /* disable the switch */ - gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); + gswip_disable_switch(priv); dsa_unregister_switch(priv->ds); diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h index 2df9c8e8cfd0..9c38e51a75e8 100644 --- a/drivers/net/dsa/lantiq/lantiq_gswip.h +++ b/drivers/net/dsa/lantiq/lantiq_gswip.h @@ -2,6 +2,7 @@ #ifndef __LANTIQ_GSWIP_H #define __LANTIQ_GSWIP_H +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/phylink.h> @@ -81,6 +82,10 @@ #define GSWIP_MII_PCDU5 0x05 #define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) #define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) +#define GSWIP_MII_PCDU_TXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_TXDLY_MASK) +#define GSWIP_MII_PCDU_RXDLY(x) u16_encode_bits(((x) / 500), GSWIP_MII_PCDU_RXDLY_MASK) +#define GSWIP_MII_PCDU_RXDLY_DEFAULT 2000 /* picoseconds */ +#define GSWIP_MII_PCDU_TXDLY_DEFAULT 2000 /* picoseconds */ /* GSWIP Core Registers */ #define GSWIP_SWRES 0x000 @@ -157,8 +162,15 @@ #define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 #define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 #define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) +/* Ethernet Switch PCE Port Control Register 3 */ +#define GSWIP_PCE_PCTRL_3p(p) (0x483 + ((p) * 0xA)) +#define GSWIP_PCE_PCTRL_3_LNDIS BIT(15) /* Learning Disable */ #define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA)) #define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */ +#define GSWIP_PCE_VCTRL_VINR GENMASK(2, 1) /* VLAN Ingress Tag Rule */ +#define GSWIP_PCE_VCTRL_VINR_ALL 0 /* Admit tagged and untagged packets */ +#define GSWIP_PCE_VCTRL_VINR_TAGGED 1 /* Admit only tagged packets */ +#define GSWIP_PCE_VCTRL_VINR_UNTAGGED 2 /* Admit only untagged packets */ #define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */ #define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */ #define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */ @@ -186,6 +198,12 @@ #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */ #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ +#define GSWIP_MAC_CTRL_4p(p) (0x907 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_4_LPIEN BIT(7) /* LPI Mode Enable */ +#define GSWIP_MAC_CTRL_4_GWAIT_MASK GENMASK(14, 8) /* LPI Wait Time 1G */ +#define GSWIP_MAC_CTRL_4_GWAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_GWAIT_MASK) +#define GSWIP_MAC_CTRL_4_WAIT_MASK GENMASK(6, 0) /* LPI Wait Time 100M */ +#define GSWIP_MAC_CTRL_4_WAIT(t) u16_encode_bits((t), GSWIP_MAC_CTRL_4_WAIT_MASK) /* Ethernet Switch Fetch DMA Port Control Register */ #define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) @@ -210,6 +228,7 @@ #define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */ #define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */ #define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */ +#define GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID BIT(1) /* Valid bit */ #define XRX200_GPHY_FW_ALIGN (16 * 1024) @@ -222,6 +241,8 @@ */ #define GSWIP_MAX_PACKET_LENGTH 2400 +#define GSWIP_VLAN_UNAWARE_PVID 0 + struct gswip_pce_microcode { u16 val_3; u16 val_2; @@ -234,6 +255,7 @@ struct gswip_hw_info { unsigned int allowed_cpu_ports; unsigned int mii_ports; int mii_port_reg_offset; + bool supports_2500m; const struct gswip_pce_microcode (*pce_microcode)[]; size_t pce_microcode_size; enum dsa_tag_protocol tag_protocol; @@ -257,9 +279,9 @@ struct gswip_vlan { }; struct gswip_priv { - __iomem void *gswip; - __iomem void *mdio; - __iomem void *mii; + struct regmap *gswip; + struct regmap *mdio; + struct regmap *mii; const struct gswip_hw_info *hw_info; const struct xway_gphy_match_data *gphy_fw_name_cfg; struct dsa_switch *ds; @@ -268,9 +290,12 @@ struct gswip_priv { struct gswip_vlan vlans[64]; int num_gphy_fw; struct gswip_gphy_fw *gphy_fw; - u32 port_vlan_filter; struct mutex pce_table_lock; u16 version; }; +void gswip_disable_switch(struct gswip_priv *priv); + +int gswip_probe_common(struct gswip_priv *priv, u32 version); + #endif /* __LANTIQ_GSWIP_H */ diff --git a/drivers/net/dsa/lantiq/lantiq_gswip_common.c b/drivers/net/dsa/lantiq/lantiq_gswip_common.c new file mode 100644 index 000000000000..9da39edf8f57 --- /dev/null +++ b/drivers/net/dsa/lantiq/lantiq_gswip_common.c @@ -0,0 +1,1739 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel / MaxLinear GSWIP common function library + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + * + * The VLAN and bridge model the GSWIP hardware uses does not directly + * matches the model DSA uses. + * + * The hardware has 64 possible table entries for bridges with one VLAN + * ID, one flow id and a list of ports for each bridge. All entries which + * match the same flow ID are combined in the mac learning table, they + * act as one global bridge. + * The hardware does not support VLAN filter on the port, but on the + * bridge, this driver converts the DSA model to the hardware. + * + * The CPU gets all the exception frames which do not match any forwarding + * rule and the CPU port is also added to all bridges. This makes it possible + * to handle all the special cases easily in software. + * At the initialization the driver allocates one bridge table entry for + * each switch port which is used when the port is used without an + * explicit bridge. This prevents the frames from being forwarded + * between all LAN ports by default. + */ + +#include "lantiq_gswip.h" + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/regmap.h> +#include <net/dsa.h> + +struct gswip_pce_table_entry { + u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index + u16 table; // PCE_TBL_CTRL.ADDR = pData->table + u16 key[8]; + u16 val[5]; + u16 mask; + u8 gmap; + bool type; + bool valid; + bool key_mode; +}; + +struct gswip_rmon_cnt_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} + +static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { + /** Receive Packet Count (only packets that are accepted and not discarded). */ + MIB_DESC(1, 0x1F, "RxGoodPkts"), + MIB_DESC(1, 0x23, "RxUnicastPkts"), + MIB_DESC(1, 0x22, "RxMulticastPkts"), + MIB_DESC(1, 0x21, "RxFCSErrorPkts"), + MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), + MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), + MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), + MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), + MIB_DESC(1, 0x20, "RxGoodPausePkts"), + MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), + MIB_DESC(1, 0x12, "Rx64BytePkts"), + MIB_DESC(1, 0x13, "Rx127BytePkts"), + MIB_DESC(1, 0x14, "Rx255BytePkts"), + MIB_DESC(1, 0x15, "Rx511BytePkts"), + MIB_DESC(1, 0x16, "Rx1023BytePkts"), + /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x17, "RxMaxBytePkts"), + MIB_DESC(1, 0x18, "RxDroppedPkts"), + MIB_DESC(1, 0x19, "RxFilteredPkts"), + MIB_DESC(2, 0x24, "RxGoodBytes"), + MIB_DESC(2, 0x26, "RxBadBytes"), + MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), + MIB_DESC(1, 0x0C, "TxGoodPkts"), + MIB_DESC(1, 0x06, "TxUnicastPkts"), + MIB_DESC(1, 0x07, "TxMulticastPkts"), + MIB_DESC(1, 0x00, "Tx64BytePkts"), + MIB_DESC(1, 0x01, "Tx127BytePkts"), + MIB_DESC(1, 0x02, "Tx255BytePkts"), + MIB_DESC(1, 0x03, "Tx511BytePkts"), + MIB_DESC(1, 0x04, "Tx1023BytePkts"), + /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x05, "TxMaxBytePkts"), + MIB_DESC(1, 0x08, "TxSingleCollCount"), + MIB_DESC(1, 0x09, "TxMultCollCount"), + MIB_DESC(1, 0x0A, "TxLateCollCount"), + MIB_DESC(1, 0x0B, "TxExcessCollCount"), + MIB_DESC(1, 0x0D, "TxPauseCount"), + MIB_DESC(1, 0x10, "TxDroppedPkts"), + MIB_DESC(2, 0x0E, "TxGoodBytes"), +}; + +static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, + u32 cleared) +{ + u32 val; + + return regmap_read_poll_timeout(priv->gswip, offset, val, + !(val & cleared), 20, 50000); +} + +static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 mask, u32 set, + int port) +{ + int reg_port; + + /* MII_CFG register only exists for MII ports */ + if (!(priv->hw_info->mii_ports & BIT(port))) + return; + + reg_port = port + priv->hw_info->mii_port_reg_offset; + + regmap_write_bits(priv->mii, GSWIP_MII_CFGp(reg_port), mask, + set); +} + +static int gswip_mdio_poll(struct gswip_priv *priv) +{ + u32 ctrl; + + return regmap_read_poll_timeout(priv->mdio, GSWIP_MDIO_CTRL, ctrl, + !(ctrl & GSWIP_MDIO_CTRL_BUSY), 40, 4000); +} + +static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + regmap_write(priv->mdio, GSWIP_MDIO_WRITE, val); + regmap_write(priv->mdio, GSWIP_MDIO_CTRL, + GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK)); + + return 0; +} + +static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) +{ + struct gswip_priv *priv = bus->priv; + u32 val; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + regmap_write(priv->mdio, GSWIP_MDIO_CTRL, + GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK)); + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + err = regmap_read(priv->mdio, GSWIP_MDIO_READ, &val); + if (err) + return err; + + return val; +} + +static int gswip_mdio(struct gswip_priv *priv) +{ + struct device_node *mdio_np, *switch_np = priv->dev->of_node; + struct device *dev = priv->dev; + struct mii_bus *bus; + int err = 0; + + mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio"); + if (!mdio_np) + mdio_np = of_get_child_by_name(switch_np, "mdio"); + + if (!of_device_is_available(mdio_np)) + goto out_put_node; + + bus = devm_mdiobus_alloc(dev); + if (!bus) { + err = -ENOMEM; + goto out_put_node; + } + + bus->priv = priv; + bus->read = gswip_mdio_rd; + bus->write = gswip_mdio_wr; + bus->name = "lantiq,xrx200-mdio"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); + bus->parent = priv->dev; + + err = devm_of_mdiobus_register(dev, bus, mdio_np); + +out_put_node: + of_node_put(mdio_np); + + return err; +} + +static int gswip_pce_table_entry_read(struct gswip_priv *priv, + struct gswip_pce_table_entry *tbl) +{ + int i; + int err; + u32 crtl; + u32 tmp; + u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD : + GSWIP_PCE_TBL_CTRL_OPMOD_ADRD; + + mutex_lock(&priv->pce_table_lock); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + goto out_unlock; + + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index); + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK | + GSWIP_PCE_TBL_CTRL_BAS, + tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(tbl->key); i++) { + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_KEY(i), &tmp); + if (err) + goto out_unlock; + tbl->key[i] = tmp; + } + for (i = 0; i < ARRAY_SIZE(tbl->val); i++) { + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_VAL(i), &tmp); + if (err) + goto out_unlock; + tbl->val[i] = tmp; + } + + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_MASK, &tmp); + if (err) + goto out_unlock; + + tbl->mask = tmp; + err = regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl); + if (err) + goto out_unlock; + + tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE); + tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD); + tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7; + +out_unlock: + mutex_unlock(&priv->pce_table_lock); + + return err; +} + +static int gswip_pce_table_entry_write(struct gswip_priv *priv, + struct gswip_pce_table_entry *tbl) +{ + int i; + int err; + u32 crtl; + u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR : + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR; + + mutex_lock(&priv->pce_table_lock); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) { + mutex_unlock(&priv->pce_table_lock); + return err; + } + + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, tbl->index); + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + tbl->table | addr_mode); + + for (i = 0; i < ARRAY_SIZE(tbl->key); i++) + regmap_write(priv->gswip, GSWIP_PCE_TBL_KEY(i), tbl->key[i]); + + for (i = 0; i < ARRAY_SIZE(tbl->val); i++) + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(i), tbl->val[i]); + + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + tbl->table | addr_mode); + + regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, tbl->mask); + + regmap_read(priv->gswip, GSWIP_PCE_TBL_CTRL, &crtl); + crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD | + GSWIP_PCE_TBL_CTRL_GMAP_MASK); + if (tbl->type) + crtl |= GSWIP_PCE_TBL_CTRL_TYPE; + if (tbl->valid) + crtl |= GSWIP_PCE_TBL_CTRL_VLD; + crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK; + crtl |= GSWIP_PCE_TBL_CTRL_BAS; + regmap_write(priv->gswip, GSWIP_PCE_TBL_CTRL, crtl); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + + mutex_unlock(&priv->pce_table_lock); + + return err; +} + +/* Add the LAN port into a bridge with the CPU port by + * default. This prevents automatic forwarding of + * packages between the LAN ports when no explicit + * bridge is configured. + */ +static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + struct gswip_pce_table_entry vlan_mapping = {0,}; + int err; + + vlan_active.index = port + 1; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.key[0] = GSWIP_VLAN_UNAWARE_PVID; + vlan_active.val[0] = port + 1 /* fid */; + vlan_active.valid = add; + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", err); + return err; + } + + if (!add) + return 0; + + vlan_mapping.index = port + 1; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + vlan_mapping.val[0] = GSWIP_VLAN_UNAWARE_PVID; + vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds); + vlan_mapping.val[2] = 0; + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + return err; + } + + return 0; +} + +static int gswip_port_set_learning(struct gswip_priv *priv, int port, + bool enable) +{ + if (!GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + return -EOPNOTSUPP; + + /* learning disable bit */ + return regmap_update_bits(priv->gswip, GSWIP_PCE_PCTRL_3p(port), + GSWIP_PCE_PCTRL_3_LNDIS, + enable ? 0 : GSWIP_PCE_PCTRL_3_LNDIS); +} + +static int gswip_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + unsigned long supported = 0; + + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + supported |= BR_LEARNING; + + if (flags.mask & ~supported) + return -EINVAL; + + return 0; +} + +static int gswip_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + + if (flags.mask & BR_LEARNING) + return gswip_port_set_learning(priv, port, + !!(flags.val & BR_LEARNING)); + + return 0; +} + +static int gswip_port_setup(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + int err; + + if (!dsa_is_cpu_port(ds, port)) { + err = gswip_add_single_port_br(priv, port, true); + if (err) + return err; + } + + return 0; +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + u32 mdio_phy = 0; + + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_ADDR_MASK, + mdio_phy); + } + + /* RMON Counter Enable for port */ + regmap_write(priv->gswip, GSWIP_BM_PCFGp(port), GSWIP_BM_PCFG_CNTEN); + + /* enable port fetch/store dma & VLAN Modification */ + regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port), + GSWIP_FDMA_PCTRL_EN | GSWIP_FDMA_PCTRL_VLANMOD_BOTH); + regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + + return 0; +} + +static void gswip_port_disable(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + + regmap_clear_bits(priv->gswip, GSWIP_FDMA_PCTRLp(port), + GSWIP_FDMA_PCTRL_EN); + regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); +} + +static int gswip_pce_load_microcode(struct gswip_priv *priv) +{ + int i; + int err; + + regmap_write_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR); + regmap_write(priv->gswip, GSWIP_PCE_TBL_MASK, 0); + + for (i = 0; i < priv->hw_info->pce_microcode_size; i++) { + regmap_write(priv->gswip, GSWIP_PCE_TBL_ADDR, i); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(0), + (*priv->hw_info->pce_microcode)[i].val_0); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(1), + (*priv->hw_info->pce_microcode)[i].val_1); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(2), + (*priv->hw_info->pce_microcode)[i].val_2); + regmap_write(priv->gswip, GSWIP_PCE_TBL_VAL(3), + (*priv->hw_info->pce_microcode)[i].val_3); + + /* start the table access: */ + regmap_set_bits(priv->gswip, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + return err; + } + + /* tell the switch that the microcode is loaded */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MC_VALID); + + return 0; +} + +static void gswip_port_commit_pvid(struct gswip_priv *priv, int port) +{ + struct dsa_port *dp = dsa_to_port(priv->ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); + u32 vinr; + int idx; + + if (!dsa_port_is_user(dp)) + return; + + if (br) { + u16 pvid = GSWIP_VLAN_UNAWARE_PVID; + + if (br_vlan_enabled(br)) + br_vlan_get_pvid(br, &pvid); + + /* VLAN-aware bridge ports with no PVID will use Active VLAN + * index 0. The expectation is that this drops all untagged and + * VID-0 tagged ingress traffic. + */ + idx = 0; + for (int i = priv->hw_info->max_ports; + i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == br && + priv->vlans[i].vid == pvid) { + idx = i; + break; + } + } + } else { + /* The Active VLAN table index as configured by + * gswip_add_single_port_br() + */ + idx = port + 1; + } + + vinr = idx ? GSWIP_PCE_VCTRL_VINR_ALL : GSWIP_PCE_VCTRL_VINR_TAGGED; + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_VINR, + FIELD_PREP(GSWIP_PCE_VCTRL_VINR, vinr)); + + /* Note that in GSWIP 2.2 VLAN mode the VID needs to be programmed + * directly instead of referencing the index in the Active VLAN Tablet. + * However, without the VLANMD bit (9) in PCE_GCTRL_1 (0x457) even + * GSWIP 2.2 and newer hardware maintain the GSWIP 2.1 behavior. + */ + regmap_write(priv->gswip, GSWIP_PCE_DEFPVID(port), idx); +} + +static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct gswip_priv *priv = ds->priv; + + if (vlan_filtering) { + /* Use tag based VLAN */ + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_VSR | + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0, + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0); + regmap_clear_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_TVM); + } else { + /* Use port based VLAN */ + regmap_write_bits(priv->gswip, GSWIP_PCE_VCTRL(port), + GSWIP_PCE_VCTRL_UVR | + GSWIP_PCE_VCTRL_VIMR | + GSWIP_PCE_VCTRL_VEMR | + GSWIP_PCE_VCTRL_VID0 | + GSWIP_PCE_VCTRL_VSR, + GSWIP_PCE_VCTRL_VSR); + regmap_set_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_TVM); + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static void gswip_mii_delay_setup(struct gswip_priv *priv, struct dsa_port *dp, + phy_interface_t interface) +{ + u32 tx_delay = GSWIP_MII_PCDU_TXDLY_DEFAULT; + u32 rx_delay = GSWIP_MII_PCDU_RXDLY_DEFAULT; + struct device_node *port_dn = dp->dn; + u16 mii_pcdu_reg; + + /* As MII_PCDU registers only exist for MII ports, silently return + * unless the port is an MII port + */ + if (!(priv->hw_info->mii_ports & BIT(dp->index))) + return; + + switch (dp->index + priv->hw_info->mii_port_reg_offset) { + case 0: + mii_pcdu_reg = GSWIP_MII_PCDU0; + break; + case 1: + mii_pcdu_reg = GSWIP_MII_PCDU1; + break; + case 5: + mii_pcdu_reg = GSWIP_MII_PCDU5; + break; + default: + return; + } + + /* legacy code to set default delays according to the interface mode */ + switch (interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + tx_delay = 0; + rx_delay = 0; + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + rx_delay = 0; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + tx_delay = 0; + break; + default: + break; + } + + /* allow settings delays using device tree properties */ + of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); + of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); + + regmap_write_bits(priv->mii, mii_pcdu_reg, + GSWIP_MII_PCDU_TXDLY_MASK | + GSWIP_MII_PCDU_RXDLY_MASK, + GSWIP_MII_PCDU_TXDLY(tx_delay) | + GSWIP_MII_PCDU_RXDLY(rx_delay)); +} + +static int gswip_setup(struct dsa_switch *ds) +{ + unsigned int cpu_ports = dsa_cpu_ports(ds); + struct gswip_priv *priv = ds->priv; + struct dsa_port *cpu_dp; + int err, i; + + regmap_write(priv->gswip, GSWIP_SWRES, GSWIP_SWRES_R0); + usleep_range(5000, 10000); + regmap_write(priv->gswip, GSWIP_SWRES, 0); + + /* disable port fetch/store dma on all ports */ + for (i = 0; i < priv->hw_info->max_ports; i++) { + gswip_port_disable(ds, i); + gswip_port_vlan_filtering(ds, i, false, NULL); + } + + /* enable Switch */ + regmap_set_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE); + + err = gswip_pce_load_microcode(priv); + if (err) { + dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); + return err; + } + + /* Default unknown Broadcast/Multicast/Unicast port maps */ + regmap_write(priv->gswip, GSWIP_PCE_PMAP1, cpu_ports); + regmap_write(priv->gswip, GSWIP_PCE_PMAP2, cpu_ports); + regmap_write(priv->gswip, GSWIP_PCE_PMAP3, cpu_ports); + + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an + * interoperability problem with this auto polling mechanism because + * their status registers think that the link is in a different state + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the + * auto polling state machine consider the link being negotiated with + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads + * to the switch port being completely dead (RX and TX are both not + * working). + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes + * it would work fine for a few minutes to hours and then stop, on + * other device it would no traffic could be sent or received at all. + * Testing shows that when PHY auto polling is disabled these problems + * go away. + */ + regmap_write(priv->mdio, GSWIP_MDIO_MDC_CFG0, 0x0); + + /* Configure the MDIO Clock 2.5 MHz */ + regmap_write_bits(priv->mdio, GSWIP_MDIO_MDC_CFG1, 0xff, 0x09); + + /* bring up the mdio bus */ + err = gswip_mdio(priv); + if (err) { + dev_err(priv->dev, "mdio bus setup failed\n"); + return err; + } + + /* Disable the xMII interface and clear it's isolation bit */ + for (i = 0; i < priv->hw_info->max_ports; i++) + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE, + 0, i); + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + /* enable special tag insertion on cpu port */ + regmap_set_bits(priv->gswip, GSWIP_FDMA_PCTRLp(cpu_dp->index), + GSWIP_FDMA_PCTRL_STEN); + + /* accept special tag in ingress direction */ + regmap_set_bits(priv->gswip, + GSWIP_PCE_PCTRL_0p(cpu_dp->index), + GSWIP_PCE_PCTRL_0_INGRESS); + } + + regmap_set_bits(priv->gswip, GSWIP_BM_QUEUE_GCTRL, + GSWIP_BM_QUEUE_GCTRL_GL_MOD); + + /* VLAN aware Switching */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_VLAN); + + /* Flush MAC Table */ + regmap_set_bits(priv->gswip, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MTFL); + + err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0, + GSWIP_PCE_GCTRL_0_MTFL); + if (err) { + dev_err(priv->dev, "MAC flushing didn't finish\n"); + return err; + } + + ds->mtu_enforcement_ingress = true; + + return 0; +} + +static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) +{ + struct gswip_priv *priv = ds->priv; + + return priv->hw_info->tag_protocol; +} + +static int gswip_vlan_active_create(struct gswip_priv *priv, + struct net_device *bridge, + int fid, u16 vid) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int idx = -1; + int err; + int i; + + /* Look for a free slot */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (!priv->vlans[i].bridge) { + idx = i; + break; + } + } + + if (idx == -1) + return -ENOSPC; + + if (fid == -1) + fid = idx; + + vlan_active.index = idx; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.key[0] = vid; + vlan_active.val[0] = fid; + vlan_active.valid = true; + + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", err); + return err; + } + + priv->vlans[idx].bridge = bridge; + priv->vlans[idx].vid = vid; + priv->vlans[idx].fid = fid; + + return idx; +} + +static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx) +{ + struct gswip_pce_table_entry vlan_active = {0,}; + int err; + + vlan_active.index = idx; + vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; + vlan_active.valid = false; + err = gswip_pce_table_entry_write(priv, &vlan_active); + if (err) + dev_err(priv->dev, "failed to delete active VLAN: %d\n", err); + priv->vlans[idx].bridge = NULL; + + return err; +} + +static int gswip_vlan_add(struct gswip_priv *priv, struct net_device *bridge, + int port, u16 vid, bool untagged, bool pvid, + bool vlan_aware) +{ + struct gswip_pce_table_entry vlan_mapping = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + unsigned int cpu_ports = dsa_cpu_ports(priv->ds); + bool active_vlan_created = false; + int fid = -1, idx = -1; + int i, err; + + /* Check if there is already a page for this bridge */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge) { + if (vlan_aware) { + if (fid != -1 && fid != priv->vlans[i].fid) + dev_err(priv->dev, "one bridge with multiple flow ids\n"); + fid = priv->vlans[i].fid; + } + if (priv->vlans[i].vid == vid) { + idx = i; + break; + } + } + } + + /* If this bridge is not programmed yet, add a Active VLAN table + * entry in a free slot and prepare the VLAN mapping table entry. + */ + if (idx == -1) { + idx = gswip_vlan_active_create(priv, bridge, fid, vid); + if (idx < 0) + return idx; + active_vlan_created = true; + + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + } else { + /* Read the existing VLAN mapping entry from the switch */ + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + err = gswip_pce_table_entry_read(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to read VLAN mapping: %d\n", + err); + return err; + } + } + + /* VLAN ID byte, maps to the VLAN ID of vlan active table */ + vlan_mapping.val[0] = vid; + /* Update the VLAN mapping entry and write it to the switch */ + vlan_mapping.val[1] |= cpu_ports; + vlan_mapping.val[1] |= BIT(port); + if (vlan_aware) + vlan_mapping.val[2] |= cpu_ports; + if (untagged) + vlan_mapping.val[2] &= ~BIT(port); + else + vlan_mapping.val[2] |= BIT(port); + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + /* In case an Active VLAN was creaetd delete it again */ + if (active_vlan_created) + gswip_vlan_active_remove(priv, idx); + return err; + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static int gswip_vlan_remove(struct gswip_priv *priv, + struct net_device *bridge, int port, + u16 vid) +{ + struct gswip_pce_table_entry vlan_mapping = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int idx = -1; + int i; + int err; + + /* Check if there is already a page for this bridge */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge && + priv->vlans[i].vid == vid) { + idx = i; + break; + } + } + + if (idx == -1) { + dev_err(priv->dev, "Port %d cannot find VID %u of bridge %s\n", + port, vid, bridge ? bridge->name : "(null)"); + return -ENOENT; + } + + vlan_mapping.index = idx; + vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING; + err = gswip_pce_table_entry_read(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err); + return err; + } + + vlan_mapping.val[1] &= ~BIT(port); + vlan_mapping.val[2] &= ~BIT(port); + err = gswip_pce_table_entry_write(priv, &vlan_mapping); + if (err) { + dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err); + return err; + } + + /* In case all ports are removed from the bridge, remove the VLAN */ + if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) { + err = gswip_vlan_active_remove(priv, idx); + if (err) { + dev_err(priv->dev, "failed to write active VLAN: %d\n", + err); + return err; + } + } + + gswip_port_commit_pvid(priv, port); + + return 0; +} + +static int gswip_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct net_device *br = bridge.dev; + struct gswip_priv *priv = ds->priv; + int err; + + /* Set up the VLAN for VLAN-unaware bridging for this port, and remove + * it from the "single-port bridge" through which it was operating as + * standalone. + */ + err = gswip_vlan_add(priv, br, port, GSWIP_VLAN_UNAWARE_PVID, + true, true, false); + if (err) + return err; + + return gswip_add_single_port_br(priv, port, false); +} + +static void gswip_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct net_device *br = bridge.dev; + struct gswip_priv *priv = ds->priv; + + /* Add the port back to the "single-port bridge", and remove it from + * the VLAN-unaware PVID created for this bridge. + */ + gswip_add_single_port_br(priv, port, true); + gswip_vlan_remove(priv, br, port, GSWIP_VLAN_UNAWARE_PVID); +} + +static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + unsigned int max_ports = priv->hw_info->max_ports; + int pos = max_ports; + int i, idx = -1; + + /* We only support VLAN filtering on bridges */ + if (!dsa_is_cpu_port(ds, port) && !bridge) + return -EOPNOTSUPP; + + /* Check if there is already a page for this VLAN */ + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge && + priv->vlans[i].vid == vlan->vid) { + idx = i; + break; + } + } + + /* If this VLAN is not programmed yet, we have to reserve + * one entry in the VLAN table. Make sure we start at the + * next position round. + */ + if (idx == -1) { + /* Look for a free slot */ + for (; pos < ARRAY_SIZE(priv->vlans); pos++) { + if (!priv->vlans[pos].bridge) { + idx = pos; + pos++; + break; + } + } + + if (idx == -1) { + NL_SET_ERR_MSG_MOD(extack, "No slot in VLAN table"); + return -ENOSPC; + } + } + + return 0; +} + +static int gswip_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + int err; + + if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID) + return 0; + + err = gswip_port_vlan_prepare(ds, port, vlan, extack); + if (err) + return err; + + /* We have to receive all packets on the CPU port and should not + * do any VLAN filtering here. This is also called with bridge + * NULL and then we do not know for which bridge to configure + * this. + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + return gswip_vlan_add(priv, bridge, port, vlan->vid, untagged, pvid, + true); +} + +static int gswip_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); + struct gswip_priv *priv = ds->priv; + + if (vlan->vid == GSWIP_VLAN_UNAWARE_PVID) + return 0; + + /* We have to receive all packets on the CPU port and should not + * do any VLAN filtering here. This is also called with bridge + * NULL and then we do not know for which bridge to configure + * this. + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + return gswip_vlan_remove(priv, bridge, port, vlan->vid); +} + +static void gswip_port_fast_age(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + int i; + int err; + + for (i = 0; i < 2048; i++) { + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.index = i; + + err = gswip_pce_table_entry_read(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, "failed to read mac bridge: %d\n", + err); + return; + } + + if (!mac_bridge.valid) + continue; + + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) + continue; + + if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) + continue; + + mac_bridge.valid = false; + err = gswip_pce_table_entry_write(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, "failed to write mac bridge: %d\n", + err); + return; + } + } +} + +static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct gswip_priv *priv = ds->priv; + u32 stp_state; + + switch (state) { + case BR_STATE_DISABLED: + regmap_clear_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + return; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN; + break; + case BR_STATE_LEARNING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING; + break; + case BR_STATE_FORWARDING: + stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING; + break; + default: + dev_err(priv->dev, "invalid STP state: %d\n", state); + return; + } + + regmap_set_bits(priv->gswip, GSWIP_SDMA_PCTRLp(port), + GSWIP_SDMA_PCTRL_EN); + regmap_write_bits(priv->gswip, GSWIP_PCE_PCTRL_0p(port), + GSWIP_PCE_PCTRL_0_PSTATE_MASK, + stp_state); +} + +static int gswip_port_fdb(struct dsa_switch *ds, int port, + struct net_device *bridge, const unsigned char *addr, + u16 vid, bool add) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + unsigned int max_ports = priv->hw_info->max_ports; + int fid = -1; + int i; + int err; + + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { + if (priv->vlans[i].bridge == bridge) { + fid = priv->vlans[i].fid; + break; + } + } + + if (fid == -1) { + dev_err(priv->dev, "no FID found for bridge %s\n", + bridge->name); + return -EINVAL; + } + + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.key_mode = true; + mac_bridge.key[0] = addr[5] | (addr[4] << 8); + mac_bridge.key[1] = addr[3] | (addr[2] << 8); + mac_bridge.key[2] = addr[1] | (addr[0] << 8); + mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); + mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2_ETC)) + mac_bridge.val[1] = add ? (GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC | + GSWIP_TABLE_MAC_BRIDGE_VAL1_VALID) : 0; + else + mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; + + mac_bridge.valid = add; + + err = gswip_pce_table_entry_write(priv, &mac_bridge); + if (err) + dev_err(priv->dev, "failed to write mac bridge: %d\n", err); + + return err; +} + +static int gswip_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + if (db.type != DSA_DB_BRIDGE) + return -EOPNOTSUPP; + + return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, true); +} + +static int gswip_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + if (db.type != DSA_DB_BRIDGE) + return -EOPNOTSUPP; + + return gswip_port_fdb(ds, port, db.bridge.dev, addr, vid, false); +} + +static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct gswip_priv *priv = ds->priv; + struct gswip_pce_table_entry mac_bridge = {0,}; + unsigned char addr[ETH_ALEN]; + int i; + int err; + + for (i = 0; i < 2048; i++) { + mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE; + mac_bridge.index = i; + + err = gswip_pce_table_entry_read(priv, &mac_bridge); + if (err) { + dev_err(priv->dev, + "failed to read mac bridge entry %d: %d\n", + i, err); + return err; + } + + if (!mac_bridge.valid) + continue; + + addr[5] = mac_bridge.key[0] & 0xff; + addr[4] = (mac_bridge.key[0] >> 8) & 0xff; + addr[3] = mac_bridge.key[1] & 0xff; + addr[2] = (mac_bridge.key[1] >> 8) & 0xff; + addr[1] = mac_bridge.key[2] & 0xff; + addr[0] = (mac_bridge.key[2] >> 8) & 0xff; + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { + if (mac_bridge.val[0] & BIT(port)) { + err = cb(addr, 0, true, data); + if (err) + return err; + } + } else { + if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) { + err = cb(addr, 0, false, data); + if (err) + return err; + } + } + } + return 0; +} + +static int gswip_port_max_mtu(struct dsa_switch *ds, int port) +{ + /* Includes 8 bytes for special header. */ + return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN; +} + +static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct gswip_priv *priv = ds->priv; + + /* CPU port always has maximum mtu of user ports, so use it to set + * switch frame size, including 8 byte special header. + */ + if (dsa_is_cpu_port(ds, port)) { + new_mtu += 8; + regmap_write(priv->gswip, GSWIP_MAC_FLEN, + VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN); + } + + /* Enable MLEN for ports with non-standard MTUs, including the special + * header on the CPU port added above. + */ + if (new_mtu != ETH_DATA_LEN) + regmap_set_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port), + GSWIP_MAC_CTRL_2_MLEN); + else + regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_2p(port), + GSWIP_MAC_CTRL_2_MLEN); + + return 0; +} + +static void gswip_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct gswip_priv *priv = ds->priv; + + priv->hw_info->phylink_get_caps(ds, port, config); +} + +static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link) +{ + u32 mdio_phy; + + if (link) + mdio_phy = GSWIP_MDIO_PHY_LINK_UP; + else + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN; + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_LINK_MASK, mdio_phy); +} + +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed, + phy_interface_t interface) +{ + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0; + + switch (speed) { + case SPEED_10: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M2P5; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_100: + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100; + + if (interface == PHY_INTERFACE_MODE_RMII) + mii_cfg = GSWIP_MII_CFG_RATE_M50; + else + mii_cfg = GSWIP_MII_CFG_RATE_M25; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII; + break; + + case SPEED_1000: + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1; + + mii_cfg = GSWIP_MII_CFG_RATE_M125; + + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII; + break; + } + + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port); + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0); +} + +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex) +{ + u32 mac_ctrl_0, mdio_phy; + + if (duplex == DUPLEX_FULL) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN; + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS; + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS; + } + + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0); + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy); +} + +static void gswip_port_set_pause(struct gswip_priv *priv, int port, + bool tx_pause, bool rx_pause) +{ + u32 mac_ctrl_0, mdio_phy; + + if (tx_pause && rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_EN; + } else if (tx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN | + GSWIP_MDIO_PHY_FCONRX_DIS; + } else if (rx_pause) { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_EN; + } else { + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE; + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS | + GSWIP_MDIO_PHY_FCONRX_DIS; + } + + regmap_write_bits(priv->gswip, GSWIP_MAC_CTRL_0p(port), + GSWIP_MAC_CTRL_0_FCON_MASK, mac_ctrl_0); + regmap_write_bits(priv->mdio, GSWIP_MDIO_PHYp(port), + GSWIP_MDIO_PHY_FCONTX_MASK | GSWIP_MDIO_PHY_FCONRX_MASK, + mdio_phy); +} + +static void gswip_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + u32 miicfg = 0; + + miicfg |= GSWIP_MII_CFG_LDCLKDIS; + + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_INTERNAL: + miicfg |= GSWIP_MII_CFG_MODE_MIIM; + break; + case PHY_INTERFACE_MODE_REVMII: + miicfg |= GSWIP_MII_CFG_MODE_MIIP; + break; + case PHY_INTERFACE_MODE_RMII: + miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + if (of_property_read_bool(dp->dn, "maxlinear,rmii-refclk-out")) + miicfg |= GSWIP_MII_CFG_RMII_CLK; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + miicfg |= GSWIP_MII_CFG_MODE_RGMII; + break; + case PHY_INTERFACE_MODE_GMII: + miicfg |= GSWIP_MII_CFG_MODE_GMII; + break; + default: + dev_err(dp->ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + + gswip_mii_mask_cfg(priv, + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK | + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS, + miicfg, port); + + gswip_mii_delay_setup(priv, dp, state->interface); +} + +static void gswip_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); + + if (!dsa_port_is_cpu(dp)) + gswip_port_set_link(priv, port, false); +} + +static void gswip_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; + + if (!dsa_port_is_cpu(dp) || interface != PHY_INTERFACE_MODE_INTERNAL) { + gswip_port_set_link(priv, port, true); + gswip_port_set_speed(priv, port, speed, interface); + gswip_port_set_duplex(priv, port, duplex); + gswip_port_set_pause(priv, port, tx_pause, rx_pause); + } + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, GSWIP_MII_CFG_EN, port); +} + +static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) + ethtool_puts(&data, gswip_rmon_cnt[i].name); +} + +static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, + u32 index) +{ + u32 result, val; + int err; + + regmap_write(priv->gswip, GSWIP_BM_RAM_ADDR, index); + regmap_write_bits(priv->gswip, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_ADDR_MASK | GSWIP_BM_RAM_CTRL_OPMOD | + GSWIP_BM_RAM_CTRL_BAS, + table | GSWIP_BM_RAM_CTRL_BAS); + + err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_BAS); + if (err) { + dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", + table, index); + return 0; + } + + regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(0), &result); + regmap_read(priv->gswip, GSWIP_BM_RAM_VAL(1), &val); + result |= val << 16; + + return result; +} + +static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct gswip_priv *priv = ds->priv; + const struct gswip_rmon_cnt_desc *rmon_cnt; + int i; + u64 high; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { + rmon_cnt = &gswip_rmon_cnt[i]; + + data[i] = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset); + if (rmon_cnt->size == 2) { + high = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset + 1); + data[i] |= high << 32; + } + } +} + +static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(gswip_rmon_cnt); +} + +static int gswip_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_keee *e) +{ + if (e->tx_lpi_timer > 0x7f) + return -EINVAL; + + return 0; +} + +static void gswip_phylink_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + regmap_clear_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index), + GSWIP_MAC_CTRL_4_LPIEN); +} + +static int gswip_phylink_mac_enable_tx_lpi(struct phylink_config *config, + u32 timer, bool tx_clock_stop) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + return regmap_update_bits(priv->gswip, GSWIP_MAC_CTRL_4p(dp->index), + GSWIP_MAC_CTRL_4_LPIEN | + GSWIP_MAC_CTRL_4_GWAIT_MASK | + GSWIP_MAC_CTRL_4_WAIT_MASK, + GSWIP_MAC_CTRL_4_LPIEN | + GSWIP_MAC_CTRL_4_GWAIT(timer) | + GSWIP_MAC_CTRL_4_WAIT(timer)); +} + +static bool gswip_support_eee(struct dsa_switch *ds, int port) +{ + struct gswip_priv *priv = ds->priv; + + if (GSWIP_VERSION_GE(priv, GSWIP_VERSION_2_2)) + return true; + + return false; +} + +static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + + if (priv->hw_info->mac_select_pcs) + return priv->hw_info->mac_select_pcs(config, interface); + + return NULL; +} + +static const struct phylink_mac_ops gswip_phylink_mac_ops = { + .mac_config = gswip_phylink_mac_config, + .mac_link_down = gswip_phylink_mac_link_down, + .mac_link_up = gswip_phylink_mac_link_up, + .mac_disable_tx_lpi = gswip_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = gswip_phylink_mac_enable_tx_lpi, + .mac_select_pcs = gswip_phylink_mac_select_pcs, +}; + +static const struct dsa_switch_ops gswip_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_setup = gswip_port_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .port_pre_bridge_flags = gswip_port_pre_bridge_flags, + .port_bridge_flags = gswip_port_bridge_flags, + .port_bridge_join = gswip_port_bridge_join, + .port_bridge_leave = gswip_port_bridge_leave, + .port_fast_age = gswip_port_fast_age, + .port_vlan_filtering = gswip_port_vlan_filtering, + .port_vlan_add = gswip_port_vlan_add, + .port_vlan_del = gswip_port_vlan_del, + .port_stp_state_set = gswip_port_stp_state_set, + .port_fdb_add = gswip_port_fdb_add, + .port_fdb_del = gswip_port_fdb_del, + .port_fdb_dump = gswip_port_fdb_dump, + .port_change_mtu = gswip_port_change_mtu, + .port_max_mtu = gswip_port_max_mtu, + .phylink_get_caps = gswip_phylink_get_caps, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, + .set_mac_eee = gswip_set_mac_eee, + .support_eee = gswip_support_eee, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, +}; + +void gswip_disable_switch(struct gswip_priv *priv) +{ + regmap_clear_bits(priv->mdio, GSWIP_MDIO_GLOB, GSWIP_MDIO_GLOB_ENABLE); +} +EXPORT_SYMBOL_GPL(gswip_disable_switch); + +static int gswip_validate_cpu_port(struct dsa_switch *ds) +{ + struct gswip_priv *priv = ds->priv; + struct dsa_port *cpu_dp; + int cpu_port = -1; + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + if (cpu_port != -1) + return dev_err_probe(ds->dev, -EINVAL, + "only a single CPU port is supported\n"); + + cpu_port = cpu_dp->index; + } + + if (cpu_port == -1) + return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n"); + + if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports) + return dev_err_probe(ds->dev, -EINVAL, + "unsupported CPU port defined\n"); + + return 0; +} + +int gswip_probe_common(struct gswip_priv *priv, u32 version) +{ + int err; + + mutex_init(&priv->pce_table_lock); + + priv->ds = devm_kzalloc(priv->dev, sizeof(*priv->ds), GFP_KERNEL); + if (!priv->ds) + return -ENOMEM; + + priv->ds->dev = priv->dev; + priv->ds->num_ports = priv->hw_info->max_ports; + priv->ds->ops = &gswip_switch_ops; + priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; + priv->ds->priv = priv; + + /* The hardware has the 'major/minor' version bytes in the wrong order + * preventing numerical comparisons. Construct a 16-bit unsigned integer + * having the REV field as most significant byte and the MOD field as + * least significant byte. This is effectively swapping the two bytes of + * the version variable, but other than using swab16 it doesn't affect + * the source variable. + */ + priv->version = GSWIP_VERSION_REV(version) << 8 | + GSWIP_VERSION_MOD(version); + + err = dsa_register_switch(priv->ds); + if (err) + return dev_err_probe(priv->dev, err, "dsa switch registration failed\n"); + + err = gswip_validate_cpu_port(priv->ds); + if (err) + goto disable_switch; + + dev_info(priv->dev, "probed GSWIP version %lx mod %lx\n", + GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version)); + + return 0; + +disable_switch: + gswip_disable_switch(priv); + dsa_unregister_switch(priv->ds); + + return err; +} +EXPORT_SYMBOL_GPL(gswip_probe_common); + +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_DESCRIPTION("Lantiq / Intel / MaxLinear GSWIP common functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.c b/drivers/net/dsa/lantiq/mxl-gsw1xx.c new file mode 100644 index 000000000000..0816c61a47f1 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.c @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* DSA Driver for MaxLinear GSW1xx switch devices + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/regmap.h> +#include <net/dsa.h> + +#include "lantiq_gswip.h" +#include "mxl-gsw1xx.h" +#include "mxl-gsw1xx_pce.h" + +struct gsw1xx_priv { + struct mdio_device *mdio_dev; + int smdio_badr; + struct regmap *sgmii; + struct regmap *gpio; + struct regmap *clk; + struct regmap *shell; + struct phylink_pcs pcs; + phy_interface_t tbi_interface; + struct gswip_priv gswip; +}; + +static int gsw1xx_config_smdio_badr(struct gsw1xx_priv *priv, + unsigned int reg) +{ + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr = priv->smdio_badr; + int res; + + if (smdio_badr == GSW1XX_SMDIO_BADR_UNKNOWN || + reg - smdio_badr >= GSW1XX_SMDIO_BADR || + smdio_badr > reg) { + /* Configure the Switch Base Address */ + smdio_badr = reg & ~GENMASK(3, 0); + res = __mdiobus_write(bus, sw_addr, GSW1XX_SMDIO_BADR, smdio_badr); + if (res < 0) { + dev_err(&priv->mdio_dev->dev, + "%s: Error %d, configuring switch base\n", + __func__, res); + return res; + } + priv->smdio_badr = smdio_badr; + } + + return smdio_badr; +} + +static int gsw1xx_regmap_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct gsw1xx_priv *priv = context; + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr; + int res; + + smdio_badr = gsw1xx_config_smdio_badr(priv, reg); + if (smdio_badr < 0) + return smdio_badr; + + res = __mdiobus_read(bus, sw_addr, reg - smdio_badr); + if (res < 0) { + dev_err(&priv->mdio_dev->dev, "%s: Error %d reading 0x%x\n", + __func__, res, reg); + return res; + } + + *val = res; + + return 0; +} + +static int gsw1xx_regmap_write(void *context, unsigned int reg, + unsigned int val) +{ + struct gsw1xx_priv *priv = context; + struct mii_bus *bus = priv->mdio_dev->bus; + int sw_addr = priv->mdio_dev->addr; + int smdio_badr; + int res; + + smdio_badr = gsw1xx_config_smdio_badr(priv, reg); + if (smdio_badr < 0) + return smdio_badr; + + res = __mdiobus_write(bus, sw_addr, reg - smdio_badr, val); + if (res < 0) + dev_err(&priv->mdio_dev->dev, + "%s: Error %d, writing 0x%x:0x%x\n", __func__, res, reg, + val); + + return res; +} + +static const struct regmap_bus gsw1xx_regmap_bus = { + .reg_write = gsw1xx_regmap_write, + .reg_read = gsw1xx_regmap_read, +}; + +static void gsw1xx_mdio_regmap_lock(void *mdio_lock) +{ + mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED); +} + +static void gsw1xx_mdio_regmap_unlock(void *mdio_lock) +{ + mutex_unlock(mdio_lock); +} + +static unsigned int gsw1xx_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) +{ + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; +} + +static struct gsw1xx_priv *pcs_to_gsw1xx(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct gsw1xx_priv, pcs); +} + +static int gsw1xx_pcs_enable(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + /* Deassert SGMII shell reset */ + return regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); +} + +static void gsw1xx_pcs_disable(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + /* Assert SGMII shell reset */ + regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + + priv->tbi_interface = PHY_INTERFACE_MODE_NA; +} + +static void gsw1xx_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, + struct phylink_link_state *state) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + int ret; + u32 val; + + ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_TBISTAT, &val); + if (ret < 0) + return; + + state->link = !!(val & GSW1XX_SGMII_TBI_TBISTAT_LINK); + state->an_complete = !!(val & GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE); + + ret = regmap_read(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, &val); + if (ret < 0) + return; + + state->duplex = (val & GSW1XX_SGMII_TBI_LPSTAT_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX) + state->pause |= MLO_PAUSE_RX; + + if (val & GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX) + state->pause |= MLO_PAUSE_TX; + + switch (FIELD_GET(GSW1XX_SGMII_TBI_LPSTAT_SPEED, val)) { + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_10: + state->speed = SPEED_10; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_100: + state->speed = SPEED_100; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000: + state->speed = SPEED_1000; + break; + case GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII: + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + state->speed = SPEED_1000; + else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_UNKNOWN; + break; + } +} + +static int gsw1xx_pcs_phy_xaui_write(struct gsw1xx_priv *priv, u16 addr, + u16 data) +{ + int ret, val; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_D, data); + if (ret < 0) + return ret; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_A, addr); + if (ret < 0) + return ret; + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_C, + GSW1XX_SGMII_PHY_WRITE | + GSW1XX_SGMII_PHY_RESET_N); + if (ret < 0) + return ret; + + return regmap_read_poll_timeout(priv->sgmii, GSW1XX_SGMII_PHY_C, + val, val & GSW1XX_SGMII_PHY_STATUS, + 1000, 100000); +} + +static int gsw1xx_pcs_reset(struct gsw1xx_priv *priv) +{ + int ret; + u16 val; + + /* Assert and deassert SGMII shell reset */ + ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + ret = regmap_clear_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + /* Hardware Bringup FSM Enable */ + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_HWBU_CTRL, + GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM | + GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN); + if (ret < 0) + return ret; + + /* Configure SGMII PHY Receiver */ + val = FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_EQ, + GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF) | + GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN | + GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN | + FIELD_PREP(GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT, + GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF); + + /* TODO: Take care of inverted RX pair once generic property is + * available + */ + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_RX0_CFG2, val); + if (ret < 0) + return ret; + + val = FIELD_PREP(GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL, + GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF); + + /* TODO: Take care of inverted TX pair once generic property is + * available + */ + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_PHY_TX0_CFG3, val); + if (ret < 0) + return ret; + + /* Reset and Release TBI */ + val = GSW1XX_SGMII_TBI_TBICTL_INITTBI | GSW1XX_SGMII_TBI_TBICTL_ENTBI | + GSW1XX_SGMII_TBI_TBICTL_CRSTRR | GSW1XX_SGMII_TBI_TBICTL_CRSOFF; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val); + if (ret < 0) + return ret; + val &= ~GSW1XX_SGMII_TBI_TBICTL_INITTBI; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TBICTL, val); + if (ret < 0) + return ret; + + /* Release Tx Data Buffers */ + ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL, + GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB); + if (ret < 0) + return ret; + ret = regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_TXB_CTL, + GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB); + if (ret < 0) + return ret; + + /* Release Rx Data Buffers */ + ret = regmap_set_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL, + GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB); + if (ret < 0) + return ret; + return regmap_clear_bits(priv->sgmii, GSW1XX_SGMII_PCS_RXB_CTL, + GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB); +} + +static int gsw1xx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + u16 txaneg, anegctl, nco_ctrl; + bool reconf = false; + int ret = 0; + + /* do not unnecessarily disrupt link and skip resetting the hardware in + * case the PCS has previously been successfully configured for this + * interface mode + */ + if (priv->tbi_interface == interface) + reconf = true; + + /* mark PCS configuration as incomplete */ + priv->tbi_interface = PHY_INTERFACE_MODE_NA; + + if (!reconf) + ret = gsw1xx_pcs_reset(priv); + + if (ret) + return ret; + + /* override bootstrap pin settings + * OVRANEG sets ANEG Mode, Enable ANEG and restart ANEG to be + * taken from bits ANMODE, ANEGEN, RANEG of the ANEGCTL register. + * OVERABL sets ability bits in tx_config_reg to be taken from + * the TXANEGH and TXANEGL registers. + */ + anegctl = GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG | + GSW1XX_SGMII_TBI_ANEGCTL_OVRABL; + + switch (phylink_get_link_timer_ns(interface)) { + case 10000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_10US); + break; + case 1600000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS); + break; + case 5000000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS); + break; + case 10000000: + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_LT, + GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS); + break; + default: + return -EINVAL; + } + + if (neg_mode & PHYLINK_PCS_NEG_INBAND) + anegctl |= GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN; + + txaneg = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* lacking a defined reverse-SGMII interface mode this + * driver only supports SGMII (MAC side) for now + */ + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE, + GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC); + txaneg |= ADVERTISE_LPACK; + } else if (interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + anegctl |= FIELD_PREP(GSW1XX_SGMII_TBI_ANEGCTL_ANMODE, + GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX); + } else { + dev_err(priv->gswip.dev, "%s: wrong interface mode %s\n", + __func__, phy_modes(interface)); + return -EINVAL; + } + + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGH, + FIELD_GET(GENMASK(15, 8), txaneg)); + if (ret < 0) + return ret; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_TXANEGL, + FIELD_GET(GENMASK(7, 0), txaneg)); + if (ret < 0) + return ret; + ret = regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, anegctl); + if (ret < 0) + return ret; + + if (!reconf) { + /* setup SerDes clock speed */ + if (interface == PHY_INTERFACE_MODE_2500BASEX) + nco_ctrl = GSW1XX_SGMII_2G5 | GSW1XX_SGMII_2G5_NCO2; + else + nco_ctrl = GSW1XX_SGMII_1G | GSW1XX_SGMII_1G_NCO1; + + ret = regmap_update_bits(priv->clk, GSW1XX_CLK_NCO_CTRL, + GSW1XX_SGMII_HSP_MASK | + GSW1XX_SGMII_SEL, + nco_ctrl); + if (ret) + return ret; + + ret = gsw1xx_pcs_phy_xaui_write(priv, 0x30, 0x80); + if (ret) + return ret; + } + + /* PCS configuration has now been completed, store mode to prevent + * disrupting the link in case of future calls of this function for the + * same interface mode. + */ + priv->tbi_interface = interface; + + return 0; +} + +static void gsw1xx_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + + regmap_set_bits(priv->sgmii, GSW1XX_SGMII_TBI_ANEGCTL, + GSW1XX_SGMII_TBI_ANEGCTL_RANEG); +} + +static void gsw1xx_pcs_link_up(struct phylink_pcs *pcs, + unsigned int neg_mode, + phy_interface_t interface, int speed, + int duplex) +{ + struct gsw1xx_priv *priv = pcs_to_gsw1xx(pcs); + u16 lpstat; + + /* When in-band AN is enabled hardware will set lpstat */ + if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) + return; + + /* Force speed and duplex settings */ + if (interface == PHY_INTERFACE_MODE_SGMII) { + if (speed == SPEED_10) + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_10); + else if (speed == SPEED_100) + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_100); + else + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000); + } else { + lpstat = FIELD_PREP(GSW1XX_SGMII_TBI_LPSTAT_SPEED, + GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII); + } + + if (duplex == DUPLEX_FULL) + lpstat |= GSW1XX_SGMII_TBI_LPSTAT_DUPLEX; + + regmap_write(priv->sgmii, GSW1XX_SGMII_TBI_LPSTAT, lpstat); +} + +static const struct phylink_pcs_ops gsw1xx_pcs_ops = { + .pcs_inband_caps = gsw1xx_pcs_inband_caps, + .pcs_enable = gsw1xx_pcs_enable, + .pcs_disable = gsw1xx_pcs_disable, + .pcs_get_state = gsw1xx_pcs_get_state, + .pcs_config = gsw1xx_pcs_config, + .pcs_an_restart = gsw1xx_pcs_an_restart, + .pcs_link_up = gsw1xx_pcs_link_up, +}; + +static void gsw1xx_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct gswip_priv *priv = ds->priv; + + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + + switch (port) { + case 0: + case 1: + case 2: + case 3: + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + break; + case 4: /* port 4: SGMII */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + if (priv->hw_info->supports_2500m) { + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + config->mac_capabilities |= MAC_2500FD; + } + return; /* no support for EEE on SGMII port */ + case 5: /* port 5: RGMII or RMII */ + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); + phy_interface_set_rgmii(config->supported_interfaces); + break; + } + + config->lpi_capabilities = MAC_100FD | MAC_1000FD; + config->lpi_timer_default = 20; + memcpy(config->lpi_interfaces, config->supported_interfaces, + sizeof(config->lpi_interfaces)); +} + +static struct phylink_pcs *gsw1xx_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *gswip_priv = dp->ds->priv; + struct gsw1xx_priv *gsw1xx_priv = container_of(gswip_priv, + struct gsw1xx_priv, + gswip); + + switch (dp->index) { + case GSW1XX_SGMII_PORT: + return &gsw1xx_priv->pcs; + default: + return NULL; + } +} + +static struct regmap *gsw1xx_regmap_init(struct gsw1xx_priv *priv, + const char *name, + unsigned int reg_base, + unsigned int max_register) +{ + const struct regmap_config config = { + .name = name, + .reg_bits = 16, + .val_bits = 16, + .reg_base = reg_base, + .max_register = max_register, + .lock = gsw1xx_mdio_regmap_lock, + .unlock = gsw1xx_mdio_regmap_unlock, + .lock_arg = &priv->mdio_dev->bus->mdio_lock, + }; + + return devm_regmap_init(&priv->mdio_dev->dev, &gsw1xx_regmap_bus, + priv, &config); +} + +static int gsw1xx_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct gsw1xx_priv *priv; + u32 version; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->mdio_dev = mdiodev; + priv->smdio_badr = GSW1XX_SMDIO_BADR_UNKNOWN; + + priv->gswip.dev = dev; + priv->gswip.hw_info = of_device_get_match_data(dev); + if (!priv->gswip.hw_info) + return -EINVAL; + + priv->gswip.gswip = gsw1xx_regmap_init(priv, "switch", + GSW1XX_SWITCH_BASE, 0xfff); + if (IS_ERR(priv->gswip.gswip)) + return PTR_ERR(priv->gswip.gswip); + + priv->gswip.mdio = gsw1xx_regmap_init(priv, "mdio", GSW1XX_MMDIO_BASE, + 0xff); + if (IS_ERR(priv->gswip.mdio)) + return PTR_ERR(priv->gswip.mdio); + + priv->gswip.mii = gsw1xx_regmap_init(priv, "mii", GSW1XX_RGMII_BASE, + 0xff); + if (IS_ERR(priv->gswip.mii)) + return PTR_ERR(priv->gswip.mii); + + priv->sgmii = gsw1xx_regmap_init(priv, "sgmii", GSW1XX_SGMII_BASE, + 0xfff); + if (IS_ERR(priv->sgmii)) + return PTR_ERR(priv->sgmii); + + priv->gpio = gsw1xx_regmap_init(priv, "gpio", GSW1XX_GPIO_BASE, 0xff); + if (IS_ERR(priv->gpio)) + return PTR_ERR(priv->gpio); + + priv->clk = gsw1xx_regmap_init(priv, "clk", GSW1XX_CLK_BASE, 0xff); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->shell = gsw1xx_regmap_init(priv, "shell", GSW1XX_SHELL_BASE, + 0xff); + if (IS_ERR(priv->shell)) + return PTR_ERR(priv->shell); + + priv->pcs.ops = &gsw1xx_pcs_ops; + priv->pcs.poll = true; + __set_bit(PHY_INTERFACE_MODE_SGMII, + priv->pcs.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + priv->pcs.supported_interfaces); + if (priv->gswip.hw_info->supports_2500m) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + priv->pcs.supported_interfaces); + priv->tbi_interface = PHY_INTERFACE_MODE_NA; + + /* assert SGMII reset to power down SGMII unit */ + ret = regmap_set_bits(priv->shell, GSW1XX_SHELL_RST_REQ, + GSW1XX_RST_REQ_SGMII_SHELL); + if (ret < 0) + return ret; + + /* configure GPIO pin-mux for MMDIO in case of external PHY connected to + * SGMII or RGMII as slave interface + */ + regmap_set_bits(priv->gpio, GPIO_ALTSEL0, 3); + regmap_set_bits(priv->gpio, GPIO_ALTSEL1, 3); + + ret = regmap_read(priv->gswip.gswip, GSWIP_VERSION, &version); + if (ret) + return ret; + + ret = gswip_probe_common(&priv->gswip, version); + if (ret) + return ret; + + dev_set_drvdata(dev, &priv->gswip); + + return 0; +} + +static void gsw1xx_remove(struct mdio_device *mdiodev) +{ + struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev); + + if (!priv) + return; + + gswip_disable_switch(priv); + + dsa_unregister_switch(priv->ds); +} + +static void gsw1xx_shutdown(struct mdio_device *mdiodev) +{ + struct gswip_priv *priv = dev_get_drvdata(&mdiodev->dev); + + if (!priv) + return; + + dev_set_drvdata(&mdiodev->dev, NULL); + + gswip_disable_switch(priv); +} + +static const struct gswip_hw_info gsw12x_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = &gsw1xx_phylink_get_caps, + .supports_2500m = true, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +static const struct gswip_hw_info gsw140_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = &gsw1xx_phylink_get_caps, + .supports_2500m = true, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +static const struct gswip_hw_info gsw141_data = { + .max_ports = GSW1XX_PORTS, + .allowed_cpu_ports = BIT(GSW1XX_MII_PORT) | BIT(GSW1XX_SGMII_PORT), + .mii_ports = BIT(GSW1XX_MII_PORT), + .mii_port_reg_offset = -GSW1XX_MII_PORT, + .mac_select_pcs = gsw1xx_phylink_mac_select_pcs, + .phylink_get_caps = gsw1xx_phylink_get_caps, + .pce_microcode = &gsw1xx_pce_microcode, + .pce_microcode_size = ARRAY_SIZE(gsw1xx_pce_microcode), + .tag_protocol = DSA_TAG_PROTO_MXL_GSW1XX, +}; + +/* + * GSW125 is the industrial temperature version of GSW120. + * GSW145 is the industrial temperature version of GSW140. + */ +static const struct of_device_id gsw1xx_of_match[] = { + { .compatible = "maxlinear,gsw120", .data = &gsw12x_data }, + { .compatible = "maxlinear,gsw125", .data = &gsw12x_data }, + { .compatible = "maxlinear,gsw140", .data = &gsw140_data }, + { .compatible = "maxlinear,gsw141", .data = &gsw141_data }, + { .compatible = "maxlinear,gsw145", .data = &gsw140_data }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, gsw1xx_of_match); + +static struct mdio_driver gsw1xx_driver = { + .probe = gsw1xx_probe, + .remove = gsw1xx_remove, + .shutdown = gsw1xx_shutdown, + .mdiodrv.driver = { + .name = "mxl-gsw1xx", + .of_match_table = gsw1xx_of_match, + }, +}; + +mdio_module_driver(gsw1xx_driver); + +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_DESCRIPTION("Driver for MaxLinear GSW1xx ethernet switch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx.h b/drivers/net/dsa/lantiq/mxl-gsw1xx.h new file mode 100644 index 000000000000..38e03c048a26 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Register definitions for MaxLinear GSW1xx series switches + * + * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org> + * Copyright (C) 2023 - 2024 MaxLinear Inc. + */ +#ifndef __MXL_GSW1XX_H +#define __MXL_GSW1XX_H + +#include <linux/bitfield.h> + +#define GSW1XX_PORTS 6 +/* Port used for RGMII or optional RMII */ +#define GSW1XX_MII_PORT 5 +/* Port used for SGMII */ +#define GSW1XX_SGMII_PORT 4 + +#define GSW1XX_SYS_CLK_FREQ 340000000 + +/* SMDIO switch register base address */ +#define GSW1XX_SMDIO_BADR 0x1f +#define GSW1XX_SMDIO_BADR_UNKNOWN -1 + +/* GSW1XX SGMII PCS */ +#define GSW1XX_SGMII_BASE 0xd000 +#define GSW1XX_SGMII_PHY_HWBU_CTRL 0x009 +#define GSW1XX_SGMII_PHY_HWBU_CTRL_EN_HWBU_FSM BIT(0) +#define GSW1XX_SGMII_PHY_HWBU_CTRL_HW_FSM_EN BIT(3) +#define GSW1XX_SGMII_TBI_TXANEGH 0x300 +#define GSW1XX_SGMII_TBI_TXANEGL 0x301 +#define GSW1XX_SGMII_TBI_ANEGCTL 0x304 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT GENMASK(1, 0) +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10US 0 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_1_6MS 1 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_5MS 2 +#define GSW1XX_SGMII_TBI_ANEGCTL_LT_10MS 3 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANEGEN BIT(2) +#define GSW1XX_SGMII_TBI_ANEGCTL_RANEG BIT(3) +#define GSW1XX_SGMII_TBI_ANEGCTL_OVRABL BIT(4) +#define GSW1XX_SGMII_TBI_ANEGCTL_OVRANEG BIT(5) +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE GENMASK(7, 6) +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_1000BASEX 1 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_PHY 2 +#define GSW1XX_SGMII_TBI_ANEGCTL_ANMODE_SGMII_MAC 3 +#define GSW1XX_SGMII_TBI_ANEGCTL_BCOMP BIT(15) + +#define GSW1XX_SGMII_TBI_TBICTL 0x305 +#define GSW1XX_SGMII_TBI_TBICTL_INITTBI BIT(0) +#define GSW1XX_SGMII_TBI_TBICTL_ENTBI BIT(1) +#define GSW1XX_SGMII_TBI_TBICTL_CRSTRR BIT(4) +#define GSW1XX_SGMII_TBI_TBICTL_CRSOFF BIT(5) +#define GSW1XX_SGMII_TBI_TBISTAT 0x309 +#define GSW1XX_SGMII_TBI_TBISTAT_LINK BIT(0) +#define GSW1XX_SGMII_TBI_TBISTAT_AN_COMPLETE BIT(1) +#define GSW1XX_SGMII_TBI_LPSTAT 0x30a +#define GSW1XX_SGMII_TBI_LPSTAT_DUPLEX BIT(0) +#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_RX BIT(1) +#define GSW1XX_SGMII_TBI_LPSTAT_PAUSE_TX BIT(2) +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED GENMASK(6, 5) +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_10 0 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_100 1 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_1000 2 +#define GSW1XX_SGMII_TBI_LPSTAT_SPEED_NOSGMII 3 +#define GSW1XX_SGMII_PHY_D 0x100 +#define GSW1XX_SGMII_PHY_A 0x101 +#define GSW1XX_SGMII_PHY_C 0x102 +#define GSW1XX_SGMII_PHY_STATUS BIT(0) +#define GSW1XX_SGMII_PHY_READ BIT(4) +#define GSW1XX_SGMII_PHY_WRITE BIT(8) +#define GSW1XX_SGMII_PHY_RESET_N BIT(12) +#define GSW1XX_SGMII_PCS_RXB_CTL 0x401 +#define GSW1XX_SGMII_PCS_RXB_CTL_INIT_RX_RXB BIT(1) +#define GSW1XX_SGMII_PCS_TXB_CTL 0x404 +#define GSW1XX_SGMII_PCS_TXB_CTL_INIT_TX_TXB BIT(1) + +#define GSW1XX_SGMII_PHY_RX0_CFG2 0x004 +#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ GENMASK(2, 0) +#define GSW1XX_SGMII_PHY_RX0_CFG2_EQ_DEF 2 +#define GSW1XX_SGMII_PHY_RX0_CFG2_INVERT BIT(3) +#define GSW1XX_SGMII_PHY_RX0_CFG2_LOS_EN BIT(4) +#define GSW1XX_SGMII_PHY_RX0_CFG2_TERM_EN BIT(5) +#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT GENMASK(12, 6) +#define GSW1XX_SGMII_PHY_RX0_CFG2_FILT_CNT_DEF 20 + +#define GSW1XX_SGMII_PHY_TX0_CFG3 0x007 +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_EN BIT(12) +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL GENMASK(11, 9) +#define GSW1XX_SGMII_PHY_TX0_CFG3_VBOOST_LEVEL_DEF 4 +#define GSW1XX_SGMII_PHY_TX0_CFG3_INVERT BIT(8) + +/* GSW1XX PDI Registers */ +#define GSW1XX_SWITCH_BASE 0xe000 + +/* GSW1XX MII Registers */ +#define GSW1XX_RGMII_BASE 0xf100 + +/* GSW1XX GPIO Registers */ +#define GSW1XX_GPIO_BASE 0xf300 +#define GPIO_ALTSEL0 0x83 +#define GPIO_ALTSEL0_EXTPHY_MUX_VAL 0x03c3 +#define GPIO_ALTSEL1 0x84 +#define GPIO_ALTSEL1_EXTPHY_MUX_VAL 0x003f + +/* MDIO bus controller */ +#define GSW1XX_MMDIO_BASE 0xf400 + +/* generic IC registers */ +#define GSW1XX_SHELL_BASE 0xfa00 +#define GSW1XX_SHELL_RST_REQ 0x01 +#define GSW1XX_RST_REQ_SGMII_SHELL BIT(5) +/* RGMII PAD Slew Control Register */ +#define GSW1XX_SHELL_RGMII_SLEW_CFG 0x78 +#define RGMII_SLEW_CFG_RX_2_5_V BIT(4) +#define RGMII_SLEW_CFG_TX_2_5_V BIT(5) + +/* SGMII clock related settings */ +#define GSW1XX_CLK_BASE 0xf900 +#define GSW1XX_CLK_NCO_CTRL 0x68 +#define GSW1XX_SGMII_HSP_MASK GENMASK(3, 2) +#define GSW1XX_SGMII_SEL BIT(1) +#define GSW1XX_SGMII_1G 0x0 +#define GSW1XX_SGMII_2G5 0xc +#define GSW1XX_SGMII_1G_NCO1 0x0 +#define GSW1XX_SGMII_2G5_NCO2 0x2 + +#endif /* __MXL_GSW1XX_H */ diff --git a/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h new file mode 100644 index 000000000000..eefcd411a340 --- /dev/null +++ b/drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCE microcode code update for driver for MaxLinear GSW1xx switch chips + * + * Copyright (C) 2023 - 2024 MaxLinear Inc. + * Copyright (C) 2022 Snap One, LLC. All rights reserved. + * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2010 Lantiq Deutschland + */ + +#include "lantiq_gswip.h" + +#define INSTR 0 +#define IPV6 1 +#define LENACCU 2 + +/* GSWIP_2.X */ +enum { + OUT_MAC0 = 0, + OUT_MAC1, + OUT_MAC2, + OUT_MAC3, + OUT_MAC4, + OUT_MAC5, + OUT_ETHTYP, + OUT_VTAG0, + OUT_VTAG1, + OUT_ITAG0, + OUT_ITAG1, /* 10 */ + OUT_ITAG2, + OUT_ITAG3, + OUT_IP0, + OUT_IP1, + OUT_IP2, + OUT_IP3, + OUT_SIP0, + OUT_SIP1, + OUT_SIP2, + OUT_SIP3, /* 20 */ + OUT_SIP4, + OUT_SIP5, + OUT_SIP6, + OUT_SIP7, + OUT_DIP0, + OUT_DIP1, + OUT_DIP2, + OUT_DIP3, + OUT_DIP4, + OUT_DIP5, /* 30 */ + OUT_DIP6, + OUT_DIP7, + OUT_SESID, + OUT_PROT, + OUT_APP0, + OUT_APP1, + OUT_IGMP0, + OUT_IGMP1, + OUT_STAG0 = 61, + OUT_STAG1 = 62, + OUT_NONE = 63, +}; + +/* parser's microcode flag type */ +enum { + FLAG_ITAG = 0, + FLAG_VLAN, + FLAG_SNAP, + FLAG_PPPOE, + FLAG_IPV6, + FLAG_IPV6FL, + FLAG_IPV4, + FLAG_IGMP, + FLAG_TU, + FLAG_HOP, + FLAG_NN1, /* 10 */ + FLAG_NN2, + FLAG_END, + FLAG_NO, /* 13 */ + FLAG_SVLAN, /* 14 */ +}; + +#define PCE_MC_M(val, msk, ns, out, len, type, flags, ipv4_len) \ + { (val), (msk), ((ns) << 10 | (out) << 4 | (len) >> 1),\ + ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 } + +/* V22_2X (IPv6 issue fixed) */ +static const struct gswip_pce_microcode gsw1xx_pce_microcode[] = { + /* value mask ns fields L type flags ipv4_len */ + PCE_MC_M(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0), + PCE_MC_M(0x8100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x88A8, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x9100, 0xFFFF, 4, OUT_STAG0, 2, INSTR, FLAG_SVLAN, 0), + PCE_MC_M(0x8100, 0xFFFF, 5, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x88A8, 0xFFFF, 6, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x9100, 0xFFFF, 4, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + PCE_MC_M(0x8864, 0xFFFF, 20, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0800, 0xFFFF, 24, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x86DD, 0xFFFF, 25, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x8863, 0xFFFF, 19, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xF800, 13, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0600, 0x0600, 44, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 15, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0xAAAA, 0xFFFF, 17, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0300, 0xFF00, 45, OUT_NONE, 0, INSTR, FLAG_SNAP, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_DIP7, 3, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 21, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0), + PCE_MC_M(0x0021, 0xFFFF, 24, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0057, 0xFFFF, 25, OUT_NONE, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x4000, 0xF000, 27, OUT_IP0, 4, INSTR, FLAG_IPV4, 1), + PCE_MC_M(0x6000, 0xF000, 30, OUT_IP0, 3, INSTR, FLAG_IPV6, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 28, OUT_IP3, 2, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 29, OUT_SIP0, 4, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, LENACCU, FLAG_NO, 0), + PCE_MC_M(0x1100, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0600, 0xFF00, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_HOP, 0), + PCE_MC_M(0x2B00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN1, 0), + PCE_MC_M(0x3C00, 0xFF00, 36, OUT_IP3, 17, INSTR, FLAG_NN2, 0), + PCE_MC_M(0x0000, 0x0000, 43, OUT_PROT, 1, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x00F0, 38, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_HOP, 0), + PCE_MC_M(0x2B00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN1, 0), + PCE_MC_M(0x3C00, 0xFF00, 36, OUT_NONE, 0, IPV6, FLAG_NN2, 0), + PCE_MC_M(0x0000, 0x00FC, 44, OUT_PROT, 0, IPV6, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_NONE, 0, IPV6, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 44, OUT_SIP0, 16, INSTR, FLAG_NO, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_APP0, 4, INSTR, FLAG_IGMP, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), + PCE_MC_M(0x0000, 0x0000, 45, OUT_NONE, 0, INSTR, FLAG_END, 0), +}; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 5df8f153d511..5facffbb9c9a 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -244,7 +244,7 @@ static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg) p->phydev.link = 0; } } else if (reg == MII_BMSR) { - p->phydev.link = (val & BMSR_LSTATUS); + p->phydev.link = !!(val & BMSR_LSTATUS); } } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 548b85befbf4..b9423389c2ef 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -3254,7 +3254,7 @@ static int mt7988_setup(struct dsa_switch *ds) return mt7531_setup_common(ds); } -const struct dsa_switch_ops mt7530_switch_ops = { +static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt753x_setup, .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port, @@ -3290,8 +3290,9 @@ const struct dsa_switch_ops mt7530_switch_ops = { .set_mac_eee = mt753x_set_mac_eee, .conduit_state_change = mt753x_conduit_state_change, .port_setup_tc = mt753x_setup_tc, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; -EXPORT_SYMBOL_GPL(mt7530_switch_ops); static const struct phylink_mac_ops mt753x_phylink_mac_ops = { .mac_select_pcs = mt753x_phylink_mac_select_pcs, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 7e47cd9af256..3e0090bed298 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -939,7 +939,6 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p, int mt7530_probe_common(struct mt7530_priv *priv); void mt7530_remove_common(struct mt7530_priv *priv); -extern const struct dsa_switch_ops mt7530_switch_ops; extern const struct mt753x_info mt753x_table[]; #endif /* __MT7530_H */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 294312b58e4f..9c8ac14cd4f5 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -297,6 +297,8 @@ static const struct dsa_switch_ops mv88e6060_switch_ops = { .phy_read = mv88e6060_phy_read, .phy_write = mv88e6060_phy_write, .phylink_get_caps = mv88e6060_phylink_get_caps, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int mv88e6060_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 20ab558fde24..9e5ede932b42 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1233,6 +1233,7 @@ static int felix_port_enable(struct dsa_switch *ds, int port, { struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); if (!dsa_port_is_user(dp)) return 0; @@ -1246,7 +1247,25 @@ static int felix_port_enable(struct dsa_switch *ds, int port, } } - return 0; + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; + + return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL); +} + +static void felix_port_disable(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (!dsa_port_is_user(dp)) + return; + + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return; + + dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev); } static void felix_port_qos_map_init(struct ocelot *ocelot, int port) @@ -2232,6 +2251,52 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port, ocelot_port_get_mm_stats(ocelot, port, stats); } +/* Depending on port type, we may be able to support the offload later (with + * the "ocelot"/"seville" tagging protocols), or never. + * If we return 0, the dp->hsr_dev reference is kept for later; if we return + * -EOPNOTSUPP, it is cleared (which helps to not bother + * dsa_port_simple_hsr_leave() with an offload that didn't pass validation). + */ +static int felix_port_hsr_join(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) { + int err; + + err = dsa_port_simple_hsr_validate(ds, port, hsr, extack); + if (err) + return err; + + NL_SET_ERR_MSG_MOD(extack, + "Offloading not supported with \"ocelot-8021q\""); + return 0; + } + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_join(ds, port, hsr, extack); +} + +static int felix_port_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_leave(ds, port, hsr); +} + static const struct phylink_mac_ops felix_phylink_mac_ops = { .mac_select_pcs = felix_phylink_mac_select_pcs, .mac_config = felix_phylink_mac_config, @@ -2262,6 +2327,7 @@ static const struct dsa_switch_ops felix_switch_ops = { .get_ts_info = felix_get_ts_info, .phylink_get_caps = felix_phylink_get_caps, .port_enable = felix_port_enable, + .port_disable = felix_port_disable, .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, @@ -2318,6 +2384,8 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_del_dscp_prio = felix_port_del_dscp_prio, .port_set_host_flood = felix_port_set_host_flood, .port_change_conduit = felix_port_change_conduit, + .port_hsr_join = felix_port_hsr_join, + .port_hsr_leave = felix_port_hsr_leave, }; int felix_register_switch(struct device *dev, resource_size_t switch_base, diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 964a56ee16cc..c575e164368c 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -2134,6 +2134,8 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = { .get_stats64 = rtl8365mb_get_stats64, .port_change_mtu = rtl8365mb_port_change_mtu, .port_max_mtu = rtl8365mb_port_max_mtu, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static const struct realtek_ops rtl8365mb_ops = { diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 8bdb52b5fdcb..d96ae72b0a5c 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -1815,6 +1815,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = { .port_fast_age = rtl8366rb_port_fast_age, .port_change_mtu = rtl8366rb_change_mtu, .port_max_mtu = rtl8366rb_max_mtu, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static const struct realtek_ops rtl8366rb_ops = { diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 1635255f58e4..4d857e3be10b 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -1035,6 +1035,8 @@ static const struct dsa_switch_ops a5psw_switch_ops = { .port_fdb_add = a5psw_port_fdb_add, .port_fdb_del = a5psw_port_fdb_del, .port_fdb_dump = a5psw_port_fdb_dump, + .port_hsr_join = dsa_port_simple_hsr_join, + .port_hsr_leave = dsa_port_simple_hsr_leave, }; static int a5psw_mdio_wait_busy(struct a5psw *a5psw) diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 4dbcc49a9e52..0a05f4156ef4 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -566,6 +566,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, struct xrs700x *priv = ds->priv; struct net_device *user; int ret, i, hsr_pair[2]; + enum hsr_port_type type; enum hsr_version ver; bool fwd = false; @@ -589,6 +590,16 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port, return -EOPNOTSUPP; } + ret = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type); + if (ret) + return ret; + + if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) { + NL_SET_ERR_MSG_MOD(extack, + "Only HSR slave ports can be offloaded"); + return -EOPNOTSUPP; + } + dsa_hsr_foreach_port(dp, ds, hsr) { if (dp->index != port) { partner = dp; diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c new file mode 100644 index 000000000000..1c511f5dc6ab --- /dev/null +++ b/drivers/net/dsa/yt921x.c @@ -0,0 +1,3006 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Motorcomm YT921x Switch + * + * Should work on YT9213/YT9214/YT9215/YT9218, but only tested on YT9215+SGMII, + * be sure to do your own checks before porting to another chip. + * + * Copyright (c) 2025 David Yang + */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/if_hsr.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include <net/dsa.h> + +#include "yt921x.h" + +struct yt921x_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) \ + {_size, _offset, _name} + +/* Must agree with yt921x_mib + * + * Unstructured fields (name != NULL) will appear in get_ethtool_stats(), + * structured go to their *_stats() methods, but we need their sizes and offsets + * to perform 32bit MIB overflow wraparound. + */ +static const struct yt921x_mib_desc yt921x_mib_descs[] = { + MIB_DESC(1, YT921X_MIB_DATA_RX_BROADCAST, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PAUSE, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_MULTICAST, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_CRC_ERR, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_RX_ALIGN_ERR, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_UNDERSIZE_ERR, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_FRAG_ERR, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_64, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX, NULL), + MIB_DESC(2, YT921X_MIB_DATA_RX_GOOD_BYTES, NULL), + + MIB_DESC(2, YT921X_MIB_DATA_RX_BAD_BYTES, "RxBadBytes"), + MIB_DESC(1, YT921X_MIB_DATA_RX_OVERSIZE_ERR, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_RX_DROPPED, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_BROADCAST, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PAUSE, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_MULTICAST, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_TX_UNDERSIZE_ERR, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_64, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX, NULL), + + MIB_DESC(2, YT921X_MIB_DATA_TX_GOOD_BYTES, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_COLLISION, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_MULTIPLE_COLLISION, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_SINGLE_COLLISION, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_PKT, NULL), + + MIB_DESC(1, YT921X_MIB_DATA_TX_DEFERRED, NULL), + MIB_DESC(1, YT921X_MIB_DATA_TX_LATE_COLLISION, NULL), + MIB_DESC(1, YT921X_MIB_DATA_RX_OAM, "RxOAM"), + MIB_DESC(1, YT921X_MIB_DATA_TX_OAM, "TxOAM"), +}; + +struct yt921x_info { + const char *name; + u16 major; + /* Unknown, seems to be plain enumeration */ + u8 mode; + u8 extmode; + /* Ports with integral GbE PHYs, not including MCU Port 10 */ + u16 internal_mask; + /* TODO: see comments in yt921x_dsa_phylink_get_caps() */ + u16 external_mask; +}; + +#define YT921X_PORT_MASK_INTn(port) BIT(port) +#define YT921X_PORT_MASK_INT0_n(n) GENMASK((n) - 1, 0) +#define YT921X_PORT_MASK_EXT0 BIT(8) +#define YT921X_PORT_MASK_EXT1 BIT(9) + +static const struct yt921x_info yt921x_infos[] = { + { + "YT9215SC", YT9215_MAJOR, 1, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9215S", YT9215_MAJOR, 2, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9215RB", YT9215_MAJOR, 3, 0, + YT921X_PORT_MASK_INT0_n(5), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9214NB", YT9215_MAJOR, 3, 2, + YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + { + "YT9213NB", YT9215_MAJOR, 3, 3, + YT921X_PORT_MASK_INTn(1) | YT921X_PORT_MASK_INTn(3), + YT921X_PORT_MASK_EXT1, + }, + { + "YT9218N", YT9218_MAJOR, 0, 0, + YT921X_PORT_MASK_INT0_n(8), + 0, + }, + { + "YT9218MB", YT9218_MAJOR, 1, 0, + YT921X_PORT_MASK_INT0_n(8), + YT921X_PORT_MASK_EXT0 | YT921X_PORT_MASK_EXT1, + }, + {} +}; + +#define YT921X_NAME "yt921x" + +#define YT921X_VID_UNWARE 4095 + +#define YT921X_POLL_SLEEP_US 10000 +#define YT921X_POLL_TIMEOUT_US 100000 + +/* The interval should be small enough to avoid overflow of 32bit MIBs. + * + * Until we can read MIBs from stats64 call directly (i.e. sleep + * there), we have to poll stats more frequently then it is actually needed. + * For overflow protection, normally, 100 sec interval should have been OK. + */ +#define YT921X_STATS_INTERVAL_JIFFIES (3 * HZ) + +struct yt921x_reg_mdio { + struct mii_bus *bus; + int addr; + /* SWITCH_ID_1 / SWITCH_ID_0 of the device + * + * This is a way to multiplex multiple devices on the same MII phyaddr + * and should be configurable in DT. However, MDIO core simply doesn't + * allow multiple devices over one reg addr, so this is a fixed value + * for now until a solution is found. + * + * Keep this because we need switchid to form MII regaddrs anyway. + */ + unsigned char switchid; +}; + +/* TODO: SPI/I2C */ + +#define to_yt921x_priv(_ds) container_of_const(_ds, struct yt921x_priv, ds) +#define to_device(priv) ((priv)->ds.dev) + +static int yt921x_reg_read(struct yt921x_priv *priv, u32 reg, u32 *valp) +{ + WARN_ON(!mutex_is_locked(&priv->reg_lock)); + + return priv->reg_ops->read(priv->reg_ctx, reg, valp); +} + +static int yt921x_reg_write(struct yt921x_priv *priv, u32 reg, u32 val) +{ + WARN_ON(!mutex_is_locked(&priv->reg_lock)); + + return priv->reg_ops->write(priv->reg_ctx, reg, val); +} + +static int +yt921x_reg_wait(struct yt921x_priv *priv, u32 reg, u32 mask, u32 *valp) +{ + u32 val; + int res; + int ret; + + ret = read_poll_timeout(yt921x_reg_read, res, + res || (val & mask) == *valp, + YT921X_POLL_SLEEP_US, YT921X_POLL_TIMEOUT_US, + false, priv, reg, &val); + if (ret) + return ret; + if (res) + return res; + + *valp = val; + return 0; +} + +static int +yt921x_reg_update_bits(struct yt921x_priv *priv, u32 reg, u32 mask, u32 val) +{ + int res; + u32 v; + u32 u; + + res = yt921x_reg_read(priv, reg, &v); + if (res) + return res; + + u = v; + u &= ~mask; + u |= val; + if (u == v) + return 0; + + return yt921x_reg_write(priv, reg, u); +} + +static int yt921x_reg_set_bits(struct yt921x_priv *priv, u32 reg, u32 mask) +{ + return yt921x_reg_update_bits(priv, reg, 0, mask); +} + +static int yt921x_reg_clear_bits(struct yt921x_priv *priv, u32 reg, u32 mask) +{ + return yt921x_reg_update_bits(priv, reg, mask, 0); +} + +static int +yt921x_reg_toggle_bits(struct yt921x_priv *priv, u32 reg, u32 mask, bool set) +{ + return yt921x_reg_update_bits(priv, reg, mask, !set ? 0 : mask); +} + +/* Some registers, like VLANn_CTRL, should always be written in 64-bit, even if + * you are to write only the lower / upper 32 bits. + * + * There is no such restriction for reading, but we still provide 64-bit read + * wrappers so that we always handle u64 values. + */ + +static int yt921x_reg64_read(struct yt921x_priv *priv, u32 reg, u64 *valp) +{ + u32 lo; + u32 hi; + int res; + + res = yt921x_reg_read(priv, reg, &lo); + if (res) + return res; + res = yt921x_reg_read(priv, reg + 4, &hi); + if (res) + return res; + + *valp = ((u64)hi << 32) | lo; + return 0; +} + +static int yt921x_reg64_write(struct yt921x_priv *priv, u32 reg, u64 val) +{ + int res; + + res = yt921x_reg_write(priv, reg, (u32)val); + if (res) + return res; + return yt921x_reg_write(priv, reg + 4, (u32)(val >> 32)); +} + +static int +yt921x_reg64_update_bits(struct yt921x_priv *priv, u32 reg, u64 mask, u64 val) +{ + int res; + u64 v; + u64 u; + + res = yt921x_reg64_read(priv, reg, &v); + if (res) + return res; + + u = v; + u &= ~mask; + u |= val; + if (u == v) + return 0; + + return yt921x_reg64_write(priv, reg, u); +} + +static int yt921x_reg64_clear_bits(struct yt921x_priv *priv, u32 reg, u64 mask) +{ + return yt921x_reg64_update_bits(priv, reg, mask, 0); +} + +static int yt921x_reg_mdio_read(void *context, u32 reg, u32 *valp) +{ + struct yt921x_reg_mdio *mdio = context; + struct mii_bus *bus = mdio->bus; + int addr = mdio->addr; + u32 reg_addr; + u32 reg_data; + u32 val; + int res; + + /* Hold the mdio bus lock to avoid (un)locking for 4 times */ + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | + YT921X_SMI_READ; + res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); + if (res) + goto end; + + reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | + YT921X_SMI_READ; + res = __mdiobus_read(bus, addr, reg_data); + if (res < 0) + goto end; + val = (u16)res; + res = __mdiobus_read(bus, addr, reg_data); + if (res < 0) + goto end; + val = (val << 16) | (u16)res; + + *valp = val; + res = 0; + +end: + mutex_unlock(&bus->mdio_lock); + return res; +} + +static int yt921x_reg_mdio_write(void *context, u32 reg, u32 val) +{ + struct yt921x_reg_mdio *mdio = context; + struct mii_bus *bus = mdio->bus; + int addr = mdio->addr; + u32 reg_addr; + u32 reg_data; + int res; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + reg_addr = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_ADDR | + YT921X_SMI_WRITE; + res = __mdiobus_write(bus, addr, reg_addr, (u16)(reg >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_addr, (u16)reg); + if (res) + goto end; + + reg_data = YT921X_SMI_SWITCHID(mdio->switchid) | YT921X_SMI_DATA | + YT921X_SMI_WRITE; + res = __mdiobus_write(bus, addr, reg_data, (u16)(val >> 16)); + if (res) + goto end; + res = __mdiobus_write(bus, addr, reg_data, (u16)val); + if (res) + goto end; + + res = 0; + +end: + mutex_unlock(&bus->mdio_lock); + return res; +} + +static const struct yt921x_reg_ops yt921x_reg_ops_mdio = { + .read = yt921x_reg_mdio_read, + .write = yt921x_reg_mdio_write, +}; + +/* TODO: SPI/I2C */ + +static int yt921x_intif_wait(struct yt921x_priv *priv) +{ + u32 val = 0; + + return yt921x_reg_wait(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START, + &val); +} + +static int +yt921x_intif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + u32 val; + int res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_READ; + res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_INT_MBUS_DIN, &val); + if (res) + return res; + + if ((u16)val != val) + dev_info(dev, + "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", + __func__, port, reg, val); + *valp = (u16)val; + return 0; +} + +static int +yt921x_intif_write(struct yt921x_priv *priv, int port, int reg, u16 val) +{ + u32 mask; + u32 ctrl; + int res; + + res = yt921x_intif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_WRITE; + res = yt921x_reg_update_bits(priv, YT921X_INT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_DOUT, val); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_INT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + return yt921x_intif_wait(priv); +} + +static int yt921x_mbus_int_read(struct mii_bus *mbus, int port, int reg) +{ + struct yt921x_priv *priv = mbus->priv; + u16 val; + int res; + + if (port >= YT921X_PORT_NUM) + return U16_MAX; + + mutex_lock(&priv->reg_lock); + res = yt921x_intif_read(priv, port, reg, &val); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + return val; +} + +static int +yt921x_mbus_int_write(struct mii_bus *mbus, int port, int reg, u16 data) +{ + struct yt921x_priv *priv = mbus->priv; + int res; + + if (port >= YT921X_PORT_NUM) + return -ENODEV; + + mutex_lock(&priv->reg_lock); + res = yt921x_intif_write(priv, port, reg, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_mbus_int_init(struct yt921x_priv *priv, struct device_node *mnp) +{ + struct device *dev = to_device(priv); + struct mii_bus *mbus; + int res; + + mbus = devm_mdiobus_alloc(dev); + if (!mbus) + return -ENOMEM; + + mbus->name = "YT921x internal MDIO bus"; + snprintf(mbus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + mbus->priv = priv; + mbus->read = yt921x_mbus_int_read; + mbus->write = yt921x_mbus_int_write; + mbus->parent = dev; + mbus->phy_mask = (u32)~GENMASK(YT921X_PORT_NUM - 1, 0); + + res = devm_of_mdiobus_register(dev, mbus, mnp); + if (res) + return res; + + priv->mbus_int = mbus; + + return 0; +} + +static int yt921x_extif_wait(struct yt921x_priv *priv) +{ + u32 val = 0; + + return yt921x_reg_wait(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START, + &val); +} + +static int +yt921x_extif_read(struct yt921x_priv *priv, int port, int reg, u16 *valp) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + u32 val; + int res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_READ; + res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_EXT_MBUS_DIN, &val); + if (res) + return res; + + if ((u16)val != val) + dev_info(dev, + "%s: port %d, reg 0x%x: Expected u16, got 0x%08x\n", + __func__, port, reg, val); + *valp = (u16)val; + return 0; +} + +static int +yt921x_extif_write(struct yt921x_priv *priv, int port, int reg, u16 val) +{ + u32 mask; + u32 ctrl; + int res; + + res = yt921x_extif_wait(priv); + if (res) + return res; + + mask = YT921X_MBUS_CTRL_PORT_M | YT921X_MBUS_CTRL_REG_M | + YT921X_MBUS_CTRL_TYPE_M | YT921X_MBUS_CTRL_OP_M; + ctrl = YT921X_MBUS_CTRL_PORT(port) | YT921X_MBUS_CTRL_REG(reg) | + YT921X_MBUS_CTRL_TYPE_C22 | YT921X_MBUS_CTRL_WRITE; + res = yt921x_reg_update_bits(priv, YT921X_EXT_MBUS_CTRL, mask, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_DOUT, val); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_EXT_MBUS_OP, YT921X_MBUS_OP_START); + if (res) + return res; + + return yt921x_extif_wait(priv); +} + +static int yt921x_mbus_ext_read(struct mii_bus *mbus, int port, int reg) +{ + struct yt921x_priv *priv = mbus->priv; + u16 val; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_extif_read(priv, port, reg, &val); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + return val; +} + +static int +yt921x_mbus_ext_write(struct mii_bus *mbus, int port, int reg, u16 data) +{ + struct yt921x_priv *priv = mbus->priv; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_extif_write(priv, port, reg, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_mbus_ext_init(struct yt921x_priv *priv, struct device_node *mnp) +{ + struct device *dev = to_device(priv); + struct mii_bus *mbus; + int res; + + mbus = devm_mdiobus_alloc(dev); + if (!mbus) + return -ENOMEM; + + mbus->name = "YT921x external MDIO bus"; + snprintf(mbus->id, MII_BUS_ID_SIZE, "%s@ext", dev_name(dev)); + mbus->priv = priv; + /* TODO: c45? */ + mbus->read = yt921x_mbus_ext_read; + mbus->write = yt921x_mbus_ext_write; + mbus->parent = dev; + + res = devm_of_mdiobus_register(dev, mbus, mnp); + if (res) + return res; + + priv->mbus_ext = mbus; + + return 0; +} + +/* Read and handle overflow of 32bit MIBs. MIB buffer must be zeroed before. */ +static int yt921x_read_mib(struct yt921x_priv *priv, int port) +{ + struct yt921x_port *pp = &priv->ports[port]; + struct device *dev = to_device(priv); + struct yt921x_mib *mib = &pp->mib; + int res = 0; + + /* Reading of yt921x_port::mib is not protected by a lock and it's vain + * to keep its consistency, since we have to read registers one by one + * and there is no way to make a snapshot of MIB stats. + * + * Writing (by this function only) is and should be protected by + * reg_lock. + */ + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; + u64 *valp = &((u64 *)mib)[i]; + u64 val = *valp; + u32 val0; + u32 val1; + + res = yt921x_reg_read(priv, reg, &val0); + if (res) + break; + + if (desc->size <= 1) { + if (val < (u32)val) + /* overflow */ + val += (u64)U32_MAX + 1; + val &= ~U32_MAX; + val |= val0; + } else { + res = yt921x_reg_read(priv, reg + 4, &val1); + if (res) + break; + val = ((u64)val1 << 32) | val0; + } + + WRITE_ONCE(*valp, val); + } + + pp->rx_frames = mib->rx_64byte + mib->rx_65_127byte + + mib->rx_128_255byte + mib->rx_256_511byte + + mib->rx_512_1023byte + mib->rx_1024_1518byte + + mib->rx_jumbo; + pp->tx_frames = mib->tx_64byte + mib->tx_65_127byte + + mib->tx_128_255byte + mib->tx_256_511byte + + mib->tx_512_1023byte + mib->tx_1024_1518byte + + mib->tx_jumbo; + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "read stats for", + port, res); + return res; +} + +static void yt921x_poll_mib(struct work_struct *work) +{ + struct yt921x_port *pp = container_of_const(work, struct yt921x_port, + mib_read.work); + struct yt921x_priv *priv = (void *)(pp - pp->index) - + offsetof(struct yt921x_priv, ports); + unsigned long delay = YT921X_STATS_INTERVAL_JIFFIES; + int port = pp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + if (res) + delay *= 4; + + schedule_delayed_work(&pp->mib_read, delay); +} + +static void +yt921x_dsa_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + if (stringset != ETH_SS_STATS) + return; + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (desc->name) + ethtool_puts(&data, desc->name); + } +} + +static void +yt921x_dsa_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + size_t j; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + j = 0; + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (!desc->name) + continue; + + data[j] = ((u64 *)mib)[i]; + j++; + } +} + +static int yt921x_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + int cnt = 0; + + if (sset != ETH_SS_STATS) + return 0; + + for (size_t i = 0; i < ARRAY_SIZE(yt921x_mib_descs); i++) { + const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; + + if (desc->name) + cnt++; + } + + return cnt; +} + +static void +yt921x_dsa_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + mac_stats->FramesTransmittedOK = pp->tx_frames; + mac_stats->SingleCollisionFrames = mib->tx_single_collisions; + mac_stats->MultipleCollisionFrames = mib->tx_multiple_collisions; + mac_stats->FramesReceivedOK = pp->rx_frames; + mac_stats->FrameCheckSequenceErrors = mib->rx_crc_errors; + mac_stats->AlignmentErrors = mib->rx_alignment_errors; + mac_stats->OctetsTransmittedOK = mib->tx_good_bytes; + mac_stats->FramesWithDeferredXmissions = mib->tx_deferred; + mac_stats->LateCollisions = mib->tx_late_collisions; + mac_stats->FramesAbortedDueToXSColls = mib->tx_aborted_errors; + /* mac_stats->FramesLostDueToIntMACXmitError */ + /* mac_stats->CarrierSenseErrors */ + mac_stats->OctetsReceivedOK = mib->rx_good_bytes; + /* mac_stats->FramesLostDueToIntMACRcvError */ + mac_stats->MulticastFramesXmittedOK = mib->tx_multicast; + mac_stats->BroadcastFramesXmittedOK = mib->tx_broadcast; + /* mac_stats->FramesWithExcessiveDeferral */ + mac_stats->MulticastFramesReceivedOK = mib->rx_multicast; + mac_stats->BroadcastFramesReceivedOK = mib->rx_broadcast; + /* mac_stats->InRangeLengthErrors */ + /* mac_stats->OutOfRangeLengthField */ + mac_stats->FrameTooLongErrors = mib->rx_oversize_errors; +} + +static void +yt921x_dsa_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + ctrl_stats->MACControlFramesTransmitted = mib->tx_pause; + ctrl_stats->MACControlFramesReceived = mib->rx_pause; + /* ctrl_stats->UnsupportedOpcodesReceived */ +} + +static const struct ethtool_rmon_hist_range yt921x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, YT921X_FRAME_SIZE_MAX }, + {} +}; + +static void +yt921x_dsa_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + *ranges = yt921x_rmon_ranges; + + rmon_stats->undersize_pkts = mib->rx_undersize_errors; + rmon_stats->oversize_pkts = mib->rx_oversize_errors; + rmon_stats->fragments = mib->rx_alignment_errors; + /* rmon_stats->jabbers */ + + rmon_stats->hist[0] = mib->rx_64byte; + rmon_stats->hist[1] = mib->rx_65_127byte; + rmon_stats->hist[2] = mib->rx_128_255byte; + rmon_stats->hist[3] = mib->rx_256_511byte; + rmon_stats->hist[4] = mib->rx_512_1023byte; + rmon_stats->hist[5] = mib->rx_1024_1518byte; + rmon_stats->hist[6] = mib->rx_jumbo; + + rmon_stats->hist_tx[0] = mib->tx_64byte; + rmon_stats->hist_tx[1] = mib->tx_65_127byte; + rmon_stats->hist_tx[2] = mib->tx_128_255byte; + rmon_stats->hist_tx[3] = mib->tx_256_511byte; + rmon_stats->hist_tx[4] = mib->tx_512_1023byte; + rmon_stats->hist_tx[5] = mib->tx_1024_1518byte; + rmon_stats->hist_tx[6] = mib->tx_jumbo; +} + +static void +yt921x_dsa_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + stats->rx_length_errors = mib->rx_undersize_errors + + mib->rx_fragment_errors; + stats->rx_over_errors = mib->rx_oversize_errors; + stats->rx_crc_errors = mib->rx_crc_errors; + stats->rx_frame_errors = mib->rx_alignment_errors; + /* stats->rx_fifo_errors */ + /* stats->rx_missed_errors */ + + stats->tx_aborted_errors = mib->tx_aborted_errors; + /* stats->tx_carrier_errors */ + stats->tx_fifo_errors = mib->tx_undersize_errors; + /* stats->tx_heartbeat_errors */ + stats->tx_window_errors = mib->tx_late_collisions; + + stats->rx_packets = pp->rx_frames; + stats->tx_packets = pp->tx_frames; + stats->rx_bytes = mib->rx_good_bytes - ETH_FCS_LEN * stats->rx_packets; + stats->tx_bytes = mib->tx_good_bytes - ETH_FCS_LEN * stats->tx_packets; + stats->rx_errors = stats->rx_length_errors + stats->rx_over_errors + + stats->rx_crc_errors + stats->rx_frame_errors; + stats->tx_errors = stats->tx_aborted_errors + stats->tx_fifo_errors + + stats->tx_window_errors; + stats->rx_dropped = mib->rx_dropped; + /* stats->tx_dropped */ + stats->multicast = mib->rx_multicast; + stats->collisions = mib->tx_collisions; +} + +static void +yt921x_dsa_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct yt921x_port *pp = &priv->ports[port]; + struct yt921x_mib *mib = &pp->mib; + + mutex_lock(&priv->reg_lock); + yt921x_read_mib(priv, port); + mutex_unlock(&priv->reg_lock); + + pause_stats->tx_pause_frames = mib->tx_pause; + pause_stats->rx_pause_frames = mib->rx_pause; +} + +static int +yt921x_set_eee(struct yt921x_priv *priv, int port, struct ethtool_keee *e) +{ + /* Poor datasheet for EEE operations; don't ask if you are confused */ + + bool enable = e->eee_enabled; + u16 new_mask; + int res; + + /* Enable / disable global EEE */ + new_mask = priv->eee_ports_mask; + new_mask &= ~BIT(port); + new_mask |= !enable ? 0 : BIT(port); + + if (!!new_mask != !!priv->eee_ports_mask) { + res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_FUNC, + YT921X_PON_STRAP_EEE, !!new_mask); + if (res) + return res; + res = yt921x_reg_toggle_bits(priv, YT921X_PON_STRAP_VAL, + YT921X_PON_STRAP_EEE, !!new_mask); + if (res) + return res; + } + + priv->eee_ports_mask = new_mask; + + /* Enable / disable port EEE */ + res = yt921x_reg_toggle_bits(priv, YT921X_EEE_CTRL, + YT921X_EEE_CTRL_ENn(port), enable); + if (res) + return res; + res = yt921x_reg_toggle_bits(priv, YT921X_EEEn_VAL(port), + YT921X_EEE_VAL_DATA, enable); + if (res) + return res; + + return 0; +} + +static int +yt921x_dsa_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_set_eee(priv, port, e); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + /* Only serves as packet filter, since the frame size is always set to + * maximum after reset + */ + + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct dsa_port *dp = dsa_to_port(ds, port); + int frame_size; + int res; + + frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (dsa_port_is_cpu(dp)) + frame_size += YT921X_TAG_LEN; + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_update_bits(priv, YT921X_MACn_FRAME(port), + YT921X_MAC_FRAME_SIZE_M, + YT921X_MAC_FRAME_SIZE(frame_size)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_dsa_port_max_mtu(struct dsa_switch *ds, int port) +{ + /* Only called for user ports, exclude tag len here */ + return YT921X_FRAME_SIZE_MAX - ETH_HLEN - ETH_FCS_LEN - YT921X_TAG_LEN; +} + +static int +yt921x_mirror_del(struct yt921x_priv *priv, int port, bool ingress) +{ + u32 mask; + + if (ingress) + mask = YT921X_MIRROR_IGR_PORTn(port); + else + mask = YT921X_MIRROR_EGR_PORTn(port); + return yt921x_reg_clear_bits(priv, YT921X_MIRROR, mask); +} + +static int +yt921x_mirror_add(struct yt921x_priv *priv, int port, bool ingress, + int to_local_port, struct netlink_ext_ack *extack) +{ + u32 srcs; + u32 ctrl; + u32 val; + u32 dst; + int res; + + if (ingress) + srcs = YT921X_MIRROR_IGR_PORTn(port); + else + srcs = YT921X_MIRROR_EGR_PORTn(port); + dst = YT921X_MIRROR_PORT(to_local_port); + + res = yt921x_reg_read(priv, YT921X_MIRROR, &val); + if (res) + return res; + + /* other mirror tasks & different dst port -> conflict */ + if ((val & ~srcs & (YT921X_MIRROR_EGR_PORTS_M | + YT921X_MIRROR_IGR_PORTS_M)) && + (val & YT921X_MIRROR_PORT_M) != dst) { + NL_SET_ERR_MSG_MOD(extack, + "Sniffer port is already configured, delete existing rules & retry"); + return -EBUSY; + } + + ctrl = val & ~YT921X_MIRROR_PORT_M; + ctrl |= srcs; + ctrl |= dst; + + if (ctrl == val) + return 0; + + return yt921x_reg_write(priv, YT921X_MIRROR, ctrl); +} + +static void +yt921x_dsa_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_mirror_del(priv, port, mirror->ingress); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "unmirror", + port, res); +} + +static int +yt921x_dsa_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_mirror_add(priv, port, ingress, + mirror->to_local_port, extack); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_fdb_wait(struct yt921x_priv *priv, u32 *valp) +{ + struct device *dev = to_device(priv); + u32 val = YT921X_FDB_RESULT_DONE; + int res; + + res = yt921x_reg_wait(priv, YT921X_FDB_RESULT, YT921X_FDB_RESULT_DONE, + &val); + if (res) { + dev_err(dev, "FDB probably stuck\n"); + return res; + } + + *valp = val; + return 0; +} + +static int +yt921x_fdb_in01(struct yt921x_priv *priv, const unsigned char *addr, + u16 vid, u32 ctrl1) +{ + u32 ctrl; + int res; + + ctrl = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + res = yt921x_reg_write(priv, YT921X_FDB_IN0, ctrl); + if (res) + return res; + + ctrl = ctrl1 | YT921X_FDB_IO1_FID(vid) | (addr[4] << 8) | addr[5]; + return yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); +} + +static int +yt921x_fdb_has(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 *indexp) +{ + u32 ctrl; + u32 val; + int res; + + res = yt921x_fdb_in01(priv, addr, vid, 0); + if (res) + return res; + + ctrl = 0; + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + if (val & YT921X_FDB_RESULT_NOTFOUND) { + *indexp = YT921X_FDB_NUM; + return 0; + } + + *indexp = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); + return 0; +} + +static int +yt921x_fdb_read(struct yt921x_priv *priv, unsigned char *addr, u16 *vidp, + u16 *ports_maskp, u16 *indexp, u8 *statusp) +{ + struct device *dev = to_device(priv); + u16 index; + u32 data0; + u32 data1; + u32 data2; + u32 val; + int res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + if (val & YT921X_FDB_RESULT_NOTFOUND) { + *ports_maskp = 0; + return 0; + } + index = FIELD_GET(YT921X_FDB_RESULT_INDEX_M, val); + + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &data1); + if (res) + return res; + if ((data1 & YT921X_FDB_IO1_STATUS_M) == + YT921X_FDB_IO1_STATUS_INVALID) { + *ports_maskp = 0; + return 0; + } + + res = yt921x_reg_read(priv, YT921X_FDB_OUT0, &data0); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &data2); + if (res) + return res; + + addr[0] = data0 >> 24; + addr[1] = data0 >> 16; + addr[2] = data0 >> 8; + addr[3] = data0; + addr[4] = data1 >> 8; + addr[5] = data1; + *vidp = FIELD_GET(YT921X_FDB_IO1_FID_M, data1); + *indexp = index; + *ports_maskp = FIELD_GET(YT921X_FDB_IO2_EGR_PORTS_M, data2); + *statusp = FIELD_GET(YT921X_FDB_IO1_STATUS_M, data1); + + dev_dbg(dev, + "%s: index 0x%x, mac %02x:%02x:%02x:%02x:%02x:%02x, vid %d, ports 0x%x, status %d\n", + __func__, *indexp, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], *vidp, *ports_maskp, *statusp); + return 0; +} + +static int +yt921x_fdb_dump(struct yt921x_priv *priv, u16 ports_mask, + dsa_fdb_dump_cb_t *cb, void *data) +{ + unsigned char addr[ETH_ALEN]; + u8 status; + u16 pmask; + u16 index; + u32 ctrl; + u16 vid; + int res; + + ctrl = YT921X_FDB_OP_INDEX(0) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_OP_GET_ONE | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, &status); + if (res) + return res; + if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { + res = cb(addr, vid, + status == YT921X_FDB_ENTRY_STATUS_STATIC, data); + if (res) + return res; + } + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + index = 0; + do { + ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT | + YT921X_FDB_OP_OP_GET_NEXT | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_read(priv, addr, &vid, &pmask, &index, + &status); + if (res) + return res; + if (!pmask) + break; + + if ((pmask & ports_mask) && !is_multicast_ether_addr(addr)) { + res = cb(addr, vid, + status == YT921X_FDB_ENTRY_STATUS_STATIC, + data); + if (res) + return res; + } + + /* Never call GET_NEXT with 4095, otherwise it will hang + * forever until a reset! + */ + } while (index < YT921X_FDB_NUM - 1); + + return 0; +} + +static int +yt921x_fdb_flush_raw(struct yt921x_priv *priv, u16 ports_mask, u16 vid, + bool flush_static) +{ + u32 ctrl; + u32 val; + int res; + + if (vid < 4096) { + ctrl = YT921X_FDB_IO1_FID(vid); + res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl); + if (res) + return res; + } + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_FLUSH | YT921X_FDB_OP_START; + if (vid >= 4096) + ctrl |= YT921X_FDB_OP_FLUSH_PORT; + else + ctrl |= YT921X_FDB_OP_FLUSH_PORT_VID; + if (flush_static) + ctrl |= YT921X_FDB_OP_FLUSH_STATIC; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + res = yt921x_fdb_wait(priv, &val); + if (res) + return res; + + return 0; +} + +static int +yt921x_fdb_flush_port(struct yt921x_priv *priv, int port, bool flush_static) +{ + return yt921x_fdb_flush_raw(priv, BIT(port), 4096, flush_static); +} + +static int +yt921x_fdb_add_index_in12(struct yt921x_priv *priv, u16 index, u16 ctrl1, + u16 ctrl2) +{ + u32 ctrl; + u32 val; + int res; + + res = yt921x_reg_write(priv, YT921X_FDB_IN1, ctrl1); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl2); + if (res) + return res; + + ctrl = YT921X_FDB_OP_INDEX(index) | YT921X_FDB_OP_MODE_INDEX | + YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); +} + +static int +yt921x_fdb_add(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 ports_mask) +{ + u32 ctrl; + u32 val; + int res; + + ctrl = YT921X_FDB_IO1_STATUS_STATIC; + res = yt921x_fdb_in01(priv, addr, vid, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_IO2_EGR_PORTS(ports_mask); + res = yt921x_reg_write(priv, YT921X_FDB_IN2, ctrl); + if (res) + return res; + + ctrl = YT921X_FDB_OP_OP_ADD | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); +} + +static int +yt921x_fdb_leave(struct yt921x_priv *priv, const unsigned char *addr, + u16 vid, u16 ports_mask) +{ + u16 index; + u32 ctrl1; + u32 ctrl2; + u32 ctrl; + u32 val2; + u32 val; + int res; + + /* Check for presence */ + res = yt921x_fdb_has(priv, addr, vid, &index); + if (res) + return res; + if (index >= YT921X_FDB_NUM) + return 0; + + /* Check if action required */ + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); + if (res) + return res; + + ctrl2 = val2 & ~YT921X_FDB_IO2_EGR_PORTS(ports_mask); + if (ctrl2 == val2) + return 0; + if (!(ctrl2 & YT921X_FDB_IO2_EGR_PORTS_M)) { + ctrl = YT921X_FDB_OP_OP_DEL | YT921X_FDB_OP_START; + res = yt921x_reg_write(priv, YT921X_FDB_OP, ctrl); + if (res) + return res; + + return yt921x_fdb_wait(priv, &val); + } + + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &ctrl1); + if (res) + return res; + + return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); +} + +static int +yt921x_fdb_join(struct yt921x_priv *priv, const unsigned char *addr, u16 vid, + u16 ports_mask) +{ + u16 index; + u32 ctrl1; + u32 ctrl2; + u32 val1; + u32 val2; + int res; + + /* Check for presence */ + res = yt921x_fdb_has(priv, addr, vid, &index); + if (res) + return res; + if (index >= YT921X_FDB_NUM) + return yt921x_fdb_add(priv, addr, vid, ports_mask); + + /* Check if action required */ + res = yt921x_reg_read(priv, YT921X_FDB_OUT1, &val1); + if (res) + return res; + res = yt921x_reg_read(priv, YT921X_FDB_OUT2, &val2); + if (res) + return res; + + ctrl1 = val1 & ~YT921X_FDB_IO1_STATUS_M; + ctrl1 |= YT921X_FDB_IO1_STATUS_STATIC; + ctrl2 = val2 | YT921X_FDB_IO2_EGR_PORTS(ports_mask); + if (ctrl1 == val1 && ctrl2 == val2) + return 0; + + return yt921x_fdb_add_index_in12(priv, index, ctrl1, ctrl2); +} + +static int +yt921x_dsa_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + res = yt921x_fdb_dump(priv, BIT(port), cb, data); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void yt921x_dsa_port_fast_age(struct dsa_switch *ds, int port) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_flush_port(priv, port, false); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "clear FDB for", + port, res); +} + +static int +yt921x_dsa_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u32 ctrl; + int res; + + /* AGEING reg is set in 5s step */ + ctrl = clamp(msecs / 5000, 1, U16_MAX); + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_write(priv, YT921X_AGEING, ctrl); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_join(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const unsigned char *addr = mdb->addr; + u16 vid = mdb->vid; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_leave(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const unsigned char *addr = mdb->addr; + u16 vid = mdb->vid; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_fdb_join(priv, addr, vid, BIT(port)); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_port_set_pvid(struct yt921x_priv *priv, int port, u16 vid) +{ + u32 mask; + u32 ctrl; + + mask = YT921X_PORT_VLAN_CTRL_CVID_M; + ctrl = YT921X_PORT_VLAN_CTRL_CVID(vid); + return yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL(port), + mask, ctrl); +} + +static int +yt921x_vlan_filtering(struct yt921x_priv *priv, int port, bool vlan_filtering) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + struct net_device *bdev; + u16 pvid; + u32 mask; + u32 ctrl; + int res; + + bdev = dsa_port_bridge_dev_get(dp); + + if (!bdev || !vlan_filtering) + pvid = YT921X_VID_UNWARE; + else + br_vlan_get_pvid(bdev, &pvid); + res = yt921x_port_set_pvid(priv, port, pvid); + if (res) + return res; + + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED | + YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + ctrl = 0; + /* Do not drop tagged frames here; let VLAN_IGR_FILTER do it */ + if (vlan_filtering && !pvid) + ctrl |= YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_update_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), + mask, ctrl); + if (res) + return res; + + res = yt921x_reg_toggle_bits(priv, YT921X_VLAN_IGR_FILTER, + YT921X_VLAN_IGR_FILTER_PORTn(port), + vlan_filtering); + if (res) + return res; + + /* Turn on / off VLAN awareness */ + mask = YT921X_PORT_IGR_TPIDn_CTAG_M; + if (!vlan_filtering) + ctrl = 0; + else + ctrl = YT921X_PORT_IGR_TPIDn_CTAG(0); + res = yt921x_reg_update_bits(priv, YT921X_PORTn_IGR_TPID(port), + mask, ctrl); + if (res) + return res; + + return 0; +} + +static int +yt921x_vlan_del(struct yt921x_priv *priv, int port, u16 vid) +{ + u64 mask64; + + mask64 = YT921X_VLAN_CTRL_PORTS(port) | + YT921X_VLAN_CTRL_UNTAG_PORTn(port); + + return yt921x_reg64_clear_bits(priv, YT921X_VLANn_CTRL(vid), mask64); +} + +static int +yt921x_vlan_add(struct yt921x_priv *priv, int port, u16 vid, bool untagged) +{ + u64 mask64; + u64 ctrl64; + + mask64 = YT921X_VLAN_CTRL_PORTn(port) | + YT921X_VLAN_CTRL_PORTS(priv->cpu_ports_mask); + ctrl64 = mask64; + + mask64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); + if (untagged) + ctrl64 |= YT921X_VLAN_CTRL_UNTAG_PORTn(port); + + return yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(vid), + mask64, ctrl64); +} + +static int +yt921x_pvid_clear(struct yt921x_priv *priv, int port) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + bool vlan_filtering; + u32 mask; + int res; + + vlan_filtering = dsa_port_is_vlan_filtering(dp); + + res = yt921x_port_set_pvid(priv, port, + vlan_filtering ? 0 : YT921X_VID_UNWARE); + if (res) + return res; + + if (vlan_filtering) { + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_set_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), + mask); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_pvid_set(struct yt921x_priv *priv, int port, u16 vid) +{ + struct dsa_port *dp = dsa_to_port(&priv->ds, port); + bool vlan_filtering; + u32 mask; + int res; + + vlan_filtering = dsa_port_is_vlan_filtering(dp); + + if (vlan_filtering) { + res = yt921x_port_set_pvid(priv, port, vid); + if (res) + return res; + } + + mask = YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_VLAN_CTRL1(port), mask); + if (res) + return res; + + return 0; +} + +static int +yt921x_dsa_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + res = yt921x_vlan_filtering(priv, port, vlan_filtering); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 vid = vlan->vid; + u16 pvid; + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + do { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + res = yt921x_vlan_del(priv, port, vid); + if (res) + break; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + br_vlan_get_pvid(bdev, &pvid); + if (pvid == vid) + res = yt921x_pvid_clear(priv, port); + } + } while (0); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 vid = vlan->vid; + u16 pvid; + int res; + + /* CPU port is supposed to be a member of every VLAN; see + * yt921x_vlan_add() and yt921x_port_setup() + */ + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + do { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + res = yt921x_vlan_add(priv, port, vid, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (res) + break; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { + res = yt921x_pvid_set(priv, port, vid); + } else { + br_vlan_get_pvid(bdev, &pvid); + if (pvid == vid) + res = yt921x_pvid_clear(priv, port); + } + } + } while (0); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_userport_standalone(struct yt921x_priv *priv, int port) +{ + u32 mask; + u32 ctrl; + int res; + + ctrl = ~priv->cpu_ports_mask; + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), ctrl); + if (res) + return res; + + /* Turn off FDB learning to prevent FDB pollution */ + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_set_bits(priv, YT921X_PORTn_LEARN(port), mask); + if (res) + return res; + + /* Turn off VLAN awareness */ + mask = YT921X_PORT_IGR_TPIDn_CTAG_M; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_IGR_TPID(port), mask); + if (res) + return res; + + /* Unrelated since learning is off and all packets are trapped; + * set it anyway + */ + res = yt921x_port_set_pvid(priv, port, YT921X_VID_UNWARE); + if (res) + return res; + + return 0; +} + +static int yt921x_userport_bridge(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_LEARN(port), mask); + if (res) + return res; + + return 0; +} + +static int yt921x_isolate(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = BIT(port); + for (int i = 0; i < YT921X_PORT_NUM; i++) { + if ((BIT(i) & priv->cpu_ports_mask) || i == port) + continue; + + res = yt921x_reg_set_bits(priv, YT921X_PORTn_ISOLATION(i), + mask); + if (res) + return res; + } + + return 0; +} + +/* Make sure to include the CPU port in ports_mask, or your bridge will + * not have it. + */ +static int yt921x_bridge(struct yt921x_priv *priv, u16 ports_mask) +{ + unsigned long targets_mask = ports_mask & ~priv->cpu_ports_mask; + u32 isolated_mask; + u32 ctrl; + int port; + int res; + + isolated_mask = 0; + for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { + struct yt921x_port *pp = &priv->ports[port]; + + if (pp->isolated) + isolated_mask |= BIT(port); + } + + /* Block from non-cpu bridge ports ... */ + for_each_set_bit(port, &targets_mask, YT921X_PORT_NUM) { + struct yt921x_port *pp = &priv->ports[port]; + + /* to non-bridge ports */ + ctrl = ~ports_mask; + /* to isolated ports when isolated */ + if (pp->isolated) + ctrl |= isolated_mask; + /* to itself when non-hairpin */ + if (!pp->hairpin) + ctrl |= BIT(port); + else + ctrl &= ~BIT(port); + + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), + ctrl); + if (res) + return res; + } + + return 0; +} + +static int yt921x_bridge_leave(struct yt921x_priv *priv, int port) +{ + int res; + + res = yt921x_userport_standalone(priv, port); + if (res) + return res; + + res = yt921x_isolate(priv, port); + if (res) + return res; + + return 0; +} + +static int +yt921x_bridge_join(struct yt921x_priv *priv, int port, u16 ports_mask) +{ + int res; + + res = yt921x_userport_bridge(priv, port); + if (res) + return res; + + res = yt921x_bridge(priv, ports_mask); + if (res) + return res; + + return 0; +} + +static u32 +dsa_bridge_ports(struct dsa_switch *ds, const struct net_device *bdev) +{ + struct dsa_port *dp; + u32 mask = 0; + + dsa_switch_for_each_user_port(dp, ds) + if (dsa_port_offloads_bridge_dev(dp, bdev)) + mask |= BIT(dp->index); + + return mask; +} + +static int +yt921x_bridge_flags(struct yt921x_priv *priv, int port, + struct switchdev_brport_flags flags) +{ + struct yt921x_port *pp = &priv->ports[port]; + bool do_flush; + u32 mask; + int res; + + if (flags.mask & BR_LEARNING) { + bool learning = flags.val & BR_LEARNING; + + mask = YT921X_PORT_LEARN_DIS; + res = yt921x_reg_toggle_bits(priv, YT921X_PORTn_LEARN(port), + mask, !learning); + if (res) + return res; + } + + /* BR_FLOOD, BR_MCAST_FLOOD: see the comment where ACT_UNK_ACTn_TRAP + * is set + */ + + /* BR_BCAST_FLOOD: we can filter bcast, but cannot trap them */ + + do_flush = false; + if (flags.mask & BR_HAIRPIN_MODE) { + pp->hairpin = flags.val & BR_HAIRPIN_MODE; + do_flush = true; + } + if (flags.mask & BR_ISOLATED) { + pp->isolated = flags.val & BR_ISOLATED; + do_flush = true; + } + if (do_flush) { + struct dsa_switch *ds = &priv->ds; + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bdev; + + bdev = dsa_port_bridge_dev_get(dp); + if (bdev) { + u32 ports_mask; + + ports_mask = dsa_bridge_ports(ds, bdev); + ports_mask |= priv->cpu_ports_mask; + res = yt921x_bridge(priv, ports_mask); + if (res) + return res; + } + } + + return 0; +} + +static int +yt921x_dsa_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_HAIRPIN_MODE | BR_LEARNING | BR_FLOOD | + BR_MCAST_FLOOD | BR_ISOLATED)) + return -EINVAL; + return 0; +} + +static int +yt921x_dsa_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_flags(priv, port, flags); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void +yt921x_dsa_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + int res; + + if (dsa_is_cpu_port(ds, port)) + return; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_leave(priv, port); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "unbridge", + port, res); +} + +static int +yt921x_dsa_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u16 ports_mask; + int res; + + if (dsa_is_cpu_port(ds, port)) + return 0; + + ports_mask = dsa_bridge_ports(ds, bridge.dev); + ports_mask |= priv->cpu_ports_mask; + + mutex_lock(&priv->reg_lock); + res = yt921x_bridge_join(priv, port, ports_mask); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_port_mst_state_set(struct dsa_switch *ds, int port, + const struct switchdev_mst_state *st) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + switch (st->state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + res = yt921x_reg_update_bits(priv, YT921X_STPn(st->msti), mask, ctrl); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int +yt921x_dsa_vlan_msti_set(struct dsa_switch *ds, struct dsa_bridge bridge, + const struct switchdev_vlan_msti *msti) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + u64 mask64; + u64 ctrl64; + int res; + + if (!msti->vid) + return -EINVAL; + if (!msti->msti || msti->msti >= YT921X_MSTI_NUM) + return -EINVAL; + + mask64 = YT921X_VLAN_CTRL_STP_ID_M; + ctrl64 = YT921X_VLAN_CTRL_STP_ID(msti->msti); + + mutex_lock(&priv->reg_lock); + res = yt921x_reg64_update_bits(priv, YT921X_VLANn_CTRL(msti->vid), + mask64, ctrl64); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static void +yt921x_dsa_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct dsa_port *dp = dsa_to_port(ds, port); + struct device *dev = to_device(priv); + bool learning; + u32 mask; + u32 ctrl; + int res; + + mask = YT921X_STP_PORTn_M(port); + learning = false; + switch (state) { + case BR_STATE_DISABLED: + ctrl = YT921X_STP_PORTn_DISABLED(port); + break; + case BR_STATE_LISTENING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + break; + case BR_STATE_LEARNING: + ctrl = YT921X_STP_PORTn_LEARNING(port); + learning = dp->learning; + break; + case BR_STATE_FORWARDING: + default: + ctrl = YT921X_STP_PORTn_FORWARD(port); + learning = dp->learning; + break; + case BR_STATE_BLOCKING: + ctrl = YT921X_STP_PORTn_BLOCKING(port); + break; + } + + mutex_lock(&priv->reg_lock); + do { + res = yt921x_reg_update_bits(priv, YT921X_STPn(0), mask, ctrl); + if (res) + break; + + mask = YT921X_PORT_LEARN_DIS; + ctrl = !learning ? YT921X_PORT_LEARN_DIS : 0; + res = yt921x_reg_update_bits(priv, YT921X_PORTn_LEARN(port), + mask, ctrl); + } while (0); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dev, "Failed to %s port %d: %i\n", "set STP state for", + port, res); +} + +static int yt921x_port_down(struct yt921x_priv *priv, int port) +{ + u32 mask; + int res; + + mask = YT921X_PORT_LINK | YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; + res = yt921x_reg_clear_bits(priv, YT921X_PORTn_CTRL(port), mask); + if (res) + return res; + + if (yt921x_port_is_external(port)) { + mask = YT921X_SERDES_LINK; + res = yt921x_reg_clear_bits(priv, YT921X_SERDESn(port), mask); + if (res) + return res; + + mask = YT921X_XMII_LINK; + res = yt921x_reg_clear_bits(priv, YT921X_XMIIn(port), mask); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_port_up(struct yt921x_priv *priv, int port, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + u32 mask; + u32 ctrl; + int res; + + switch (speed) { + case SPEED_10: + ctrl = YT921X_PORT_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_PORT_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_PORT_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_PORT_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_PORT_SPEED_10000; + break; + default: + return -EINVAL; + } + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_PORT_DUPLEX_FULL; + if (tx_pause) + ctrl |= YT921X_PORT_TX_PAUSE; + if (rx_pause) + ctrl |= YT921X_PORT_RX_PAUSE; + ctrl |= YT921X_PORT_RX_MAC_EN | YT921X_PORT_TX_MAC_EN; + res = yt921x_reg_write(priv, YT921X_PORTn_CTRL(port), ctrl); + if (res) + return res; + + if (yt921x_port_is_external(port)) { + mask = YT921X_SERDES_SPEED_M; + switch (speed) { + case SPEED_10: + ctrl = YT921X_SERDES_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_SERDES_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_SERDES_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_SERDES_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_SERDES_SPEED_10000; + break; + default: + return -EINVAL; + } + mask |= YT921X_SERDES_DUPLEX_FULL; + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_SERDES_DUPLEX_FULL; + mask |= YT921X_SERDES_TX_PAUSE; + if (tx_pause) + ctrl |= YT921X_SERDES_TX_PAUSE; + mask |= YT921X_SERDES_RX_PAUSE; + if (rx_pause) + ctrl |= YT921X_SERDES_RX_PAUSE; + mask |= YT921X_SERDES_LINK; + ctrl |= YT921X_SERDES_LINK; + res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), + mask, ctrl); + if (res) + return res; + + mask = YT921X_XMII_LINK; + res = yt921x_reg_set_bits(priv, YT921X_XMIIn(port), mask); + if (res) + return res; + + switch (speed) { + case SPEED_10: + ctrl = YT921X_MDIO_POLLING_SPEED_10; + break; + case SPEED_100: + ctrl = YT921X_MDIO_POLLING_SPEED_100; + break; + case SPEED_1000: + ctrl = YT921X_MDIO_POLLING_SPEED_1000; + break; + case SPEED_2500: + ctrl = YT921X_MDIO_POLLING_SPEED_2500; + break; + case SPEED_10000: + ctrl = YT921X_MDIO_POLLING_SPEED_10000; + break; + default: + return -EINVAL; + } + if (duplex == DUPLEX_FULL) + ctrl |= YT921X_MDIO_POLLING_DUPLEX_FULL; + ctrl |= YT921X_MDIO_POLLING_LINK; + res = yt921x_reg_write(priv, YT921X_MDIO_POLLINGn(port), ctrl); + if (res) + return res; + } + + return 0; +} + +static int +yt921x_port_config(struct yt921x_priv *priv, int port, unsigned int mode, + phy_interface_t interface) +{ + struct device *dev = to_device(priv); + u32 mask; + u32 ctrl; + int res; + + if (!yt921x_port_is_external(port)) { + if (interface != PHY_INTERFACE_MODE_INTERNAL) { + dev_err(dev, "Wrong mode %d on port %d\n", + interface, port); + return -EINVAL; + } + return 0; + } + + switch (interface) { + /* SERDES */ + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_100BASEX: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + mask = YT921X_SERDES_CTRL_PORTn(port); + res = yt921x_reg_set_bits(priv, YT921X_SERDES_CTRL, mask); + if (res) + return res; + + mask = YT921X_XMII_CTRL_PORTn(port); + res = yt921x_reg_clear_bits(priv, YT921X_XMII_CTRL, mask); + if (res) + return res; + + mask = YT921X_SERDES_MODE_M; + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + ctrl = YT921X_SERDES_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_100BASEX: + ctrl = YT921X_SERDES_MODE_100BASEX; + break; + case PHY_INTERFACE_MODE_1000BASEX: + ctrl = YT921X_SERDES_MODE_1000BASEX; + break; + case PHY_INTERFACE_MODE_2500BASEX: + ctrl = YT921X_SERDES_MODE_2500BASEX; + break; + default: + return -EINVAL; + } + res = yt921x_reg_update_bits(priv, YT921X_SERDESn(port), + mask, ctrl); + if (res) + return res; + + break; + /* add XMII support here */ + default: + return -EINVAL; + } + + return 0; +} + +static void +yt921x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + /* No need to sync; port control block is hold until device remove */ + cancel_delayed_work(&priv->ports[port].mib_read); + + mutex_lock(&priv->reg_lock); + res = yt921x_port_down(priv, port); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring down", + port, res); +} + +static void +yt921x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_up(priv, port, mode, interface, speed, duplex, + tx_pause, rx_pause); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "bring up", + port, res); + + schedule_delayed_work(&priv->ports[port].mib_read, 0); +} + +static void +yt921x_phylink_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct yt921x_priv *priv = to_yt921x_priv(dp->ds); + int port = dp->index; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_config(priv, port, mode, state->interface); + mutex_unlock(&priv->reg_lock); + + if (res) + dev_err(dp->ds->dev, "Failed to %s port %d: %i\n", "config", + port, res); +} + +static void +yt921x_dsa_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + const struct yt921x_info *info = priv->info; + + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + + if (info->internal_mask & BIT(port)) { + /* Port 10 for MCU should probably go here too. But since that + * is untested yet, turn it down for the moment by letting it + * fall to the default branch. + */ + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + } else if (info->external_mask & BIT(port)) { + /* TODO: external ports may support SERDES only, XMII only, or + * SERDES + XMII depending on the chip. However, we can't get + * the accurate config table due to lack of document, thus + * we simply declare SERDES + XMII and rely on the correctness + * of devicetree for now. + */ + + /* SERDES */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + /* REVSGMII (SGMII in PHY role) should go here, once + * PHY_INTERFACE_MODE_REVSGMII is introduced. + */ + __set_bit(PHY_INTERFACE_MODE_100BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + config->mac_capabilities |= MAC_2500FD; + + /* XMII */ + + /* Not tested. To add support for XMII: + * - Add proper interface modes below + * - Handle them in yt921x_port_config() + */ + } + /* no such port: empty supported_interfaces causes phylink to turn it + * down + */ +} + +static int yt921x_port_setup(struct yt921x_priv *priv, int port) +{ + struct dsa_switch *ds = &priv->ds; + u32 ctrl; + int res; + + res = yt921x_userport_standalone(priv, port); + if (res) + return res; + + if (dsa_is_cpu_port(ds, port)) { + /* Egress of CPU port is supposed to be completely controlled + * via tagging, so set to oneway isolated (drop all packets + * without tag). + */ + ctrl = ~(u32)0; + res = yt921x_reg_write(priv, YT921X_PORTn_ISOLATION(port), + ctrl); + if (res) + return res; + + /* To simplify FDB "isolation" simulation, we also disable + * learning on the CPU port, and let software identify packets + * towarding CPU (either trapped or a static FDB entry is + * matched, no matter which bridge that entry is for), which is + * already done by yt921x_userport_standalone(). As a result, + * VLAN-awareness becomes unrelated on the CPU port (set to + * VLAN-unaware by the way). + */ + } + + return 0; +} + +static enum dsa_tag_protocol +yt921x_dsa_get_tag_protocol(struct dsa_switch *ds, int port, + enum dsa_tag_protocol m) +{ + return DSA_TAG_PROTO_YT921X; +} + +static int yt921x_dsa_port_setup(struct dsa_switch *ds, int port) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_port_setup(priv, port); + mutex_unlock(&priv->reg_lock); + + return res; +} + +static int yt921x_edata_wait(struct yt921x_priv *priv, u32 *valp) +{ + u32 val = YT921X_EDATA_DATA_IDLE; + int res; + + res = yt921x_reg_wait(priv, YT921X_EDATA_DATA, + YT921X_EDATA_DATA_STATUS_M, &val); + if (res) + return res; + + *valp = val; + return 0; +} + +static int +yt921x_edata_read_cont(struct yt921x_priv *priv, u8 addr, u8 *valp) +{ + u32 ctrl; + u32 val; + int res; + + ctrl = YT921X_EDATA_CTRL_ADDR(addr) | YT921X_EDATA_CTRL_READ; + res = yt921x_reg_write(priv, YT921X_EDATA_CTRL, ctrl); + if (res) + return res; + res = yt921x_edata_wait(priv, &val); + if (res) + return res; + + *valp = FIELD_GET(YT921X_EDATA_DATA_DATA_M, val); + return 0; +} + +static int yt921x_edata_read(struct yt921x_priv *priv, u8 addr, u8 *valp) +{ + u32 val; + int res; + + res = yt921x_edata_wait(priv, &val); + if (res) + return res; + return yt921x_edata_read_cont(priv, addr, valp); +} + +static int yt921x_chip_detect(struct yt921x_priv *priv) +{ + struct device *dev = to_device(priv); + const struct yt921x_info *info; + u8 extmode; + u32 chipid; + u32 major; + u32 mode; + int res; + + res = yt921x_reg_read(priv, YT921X_CHIP_ID, &chipid); + if (res) + return res; + + major = FIELD_GET(YT921X_CHIP_ID_MAJOR, chipid); + + for (info = yt921x_infos; info->name; info++) + if (info->major == major) + break; + if (!info->name) { + dev_err(dev, "Unexpected chipid 0x%x\n", chipid); + return -ENODEV; + } + + res = yt921x_reg_read(priv, YT921X_CHIP_MODE, &mode); + if (res) + return res; + res = yt921x_edata_read(priv, YT921X_EDATA_EXTMODE, &extmode); + if (res) + return res; + + for (; info->name; info++) + if (info->major == major && info->mode == mode && + info->extmode == extmode) + break; + if (!info->name) { + dev_err(dev, + "Unsupported chipid 0x%x with chipmode 0x%x 0x%x\n", + chipid, mode, extmode); + return -ENODEV; + } + + /* Print chipid here since we are interested in lower 16 bits */ + dev_info(dev, + "Motorcomm %s ethernet switch, chipid: 0x%x, chipmode: 0x%x 0x%x\n", + info->name, chipid, mode, extmode); + + priv->info = info; + return 0; +} + +static int yt921x_chip_reset(struct yt921x_priv *priv) +{ + struct device *dev = to_device(priv); + u16 eth_p_tag; + u32 val; + int res; + + res = yt921x_chip_detect(priv); + if (res) + return res; + + /* Reset */ + res = yt921x_reg_write(priv, YT921X_RST, YT921X_RST_HW); + if (res) + return res; + + /* RST_HW is almost same as GPIO hard reset, so we need this delay. */ + fsleep(YT921X_RST_DELAY_US); + + val = 0; + res = yt921x_reg_wait(priv, YT921X_RST, ~0, &val); + if (res) + return res; + + /* Check for tag EtherType; do it after reset in case you messed it up + * before. + */ + res = yt921x_reg_read(priv, YT921X_CPU_TAG_TPID, &val); + if (res) + return res; + eth_p_tag = FIELD_GET(YT921X_CPU_TAG_TPID_TPID_M, val); + if (eth_p_tag != ETH_P_YT921X) { + dev_err(dev, "Tag type 0x%x != 0x%x\n", eth_p_tag, + ETH_P_YT921X); + /* Despite being possible, we choose not to set CPU_TAG_TPID, + * since there is no way it can be different unless you have the + * wrong chip. + */ + return -EINVAL; + } + + return 0; +} + +static int yt921x_chip_setup(struct yt921x_priv *priv) +{ + struct dsa_switch *ds = &priv->ds; + unsigned long cpu_ports_mask; + u64 ctrl64; + u32 ctrl; + int port; + int res; + + /* Enable DSA */ + priv->cpu_ports_mask = dsa_cpu_ports(ds); + + ctrl = YT921X_EXT_CPU_PORT_TAG_EN | YT921X_EXT_CPU_PORT_PORT_EN | + YT921X_EXT_CPU_PORT_PORT(__ffs(priv->cpu_ports_mask)); + res = yt921x_reg_write(priv, YT921X_EXT_CPU_PORT, ctrl); + if (res) + return res; + + /* Enable and clear MIB */ + res = yt921x_reg_set_bits(priv, YT921X_FUNC, YT921X_FUNC_MIB); + if (res) + return res; + + ctrl = YT921X_MIB_CTRL_CLEAN | YT921X_MIB_CTRL_ALL_PORT; + res = yt921x_reg_write(priv, YT921X_MIB_CTRL, ctrl); + if (res) + return res; + + /* Setup software switch */ + ctrl = YT921X_CPU_COPY_TO_EXT_CPU; + res = yt921x_reg_write(priv, YT921X_CPU_COPY, ctrl); + if (res) + return res; + + ctrl = GENMASK(10, 0); + res = yt921x_reg_write(priv, YT921X_FILTER_UNK_UCAST, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_FILTER_UNK_MCAST, ctrl); + if (res) + return res; + + /* YT921x does not support native DSA port bridging, so we use port + * isolation to emulate it. However, be especially careful that port + * isolation takes _after_ FDB lookups, i.e. if an FDB entry (from + * another bridge) is matched and the destination port (in another + * bridge) is blocked, the packet will be dropped instead of flooding to + * the "bridged" ports, thus we need to trap and handle those packets by + * software. + * + * If there is no more than one bridge, we might be able to drop them + * directly given some conditions are met, but we trap them in all cases + * for now. + */ + ctrl = 0; + for (int i = 0; i < YT921X_PORT_NUM; i++) + ctrl |= YT921X_ACT_UNK_ACTn_TRAP(i); + /* Except for CPU ports, if any packets are sent via CPU ports without + * tag, they should be dropped. + */ + cpu_ports_mask = priv->cpu_ports_mask; + for_each_set_bit(port, &cpu_ports_mask, YT921X_PORT_NUM) { + ctrl &= ~YT921X_ACT_UNK_ACTn_M(port); + ctrl |= YT921X_ACT_UNK_ACTn_DROP(port); + } + res = yt921x_reg_write(priv, YT921X_ACT_UNK_UCAST, ctrl); + if (res) + return res; + res = yt921x_reg_write(priv, YT921X_ACT_UNK_MCAST, ctrl); + if (res) + return res; + + /* Tagged VID 0 should be treated as untagged, which confuses the + * hardware a lot + */ + ctrl64 = YT921X_VLAN_CTRL_LEARN_DIS | YT921X_VLAN_CTRL_PORTS_M; + res = yt921x_reg64_write(priv, YT921X_VLANn_CTRL(0), ctrl64); + if (res) + return res; + + /* Miscellaneous */ + res = yt921x_reg_set_bits(priv, YT921X_SENSOR, YT921X_SENSOR_TEMP); + if (res) + return res; + + return 0; +} + +static int yt921x_dsa_setup(struct dsa_switch *ds) +{ + struct yt921x_priv *priv = to_yt921x_priv(ds); + struct device *dev = to_device(priv); + struct device_node *np = dev->of_node; + struct device_node *child; + int res; + + mutex_lock(&priv->reg_lock); + res = yt921x_chip_reset(priv); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + + /* Register the internal mdio bus. Nodes for internal ports should have + * proper phy-handle pointing to their PHYs. Not enabling the internal + * bus is possible, though pretty wired, if internal ports are not used. + */ + child = of_get_child_by_name(np, "mdio"); + if (child) { + res = yt921x_mbus_int_init(priv, child); + of_node_put(child); + if (res) + return res; + } + + /* External mdio bus is optional */ + child = of_get_child_by_name(np, "mdio-external"); + if (child) { + res = yt921x_mbus_ext_init(priv, child); + of_node_put(child); + if (res) + return res; + + dev_err(dev, "Untested external mdio bus\n"); + return -ENODEV; + } + + mutex_lock(&priv->reg_lock); + res = yt921x_chip_setup(priv); + mutex_unlock(&priv->reg_lock); + + if (res) + return res; + + return 0; +} + +static const struct phylink_mac_ops yt921x_phylink_mac_ops = { + .mac_link_down = yt921x_phylink_mac_link_down, + .mac_link_up = yt921x_phylink_mac_link_up, + .mac_config = yt921x_phylink_mac_config, +}; + +static const struct dsa_switch_ops yt921x_dsa_switch_ops = { + /* mib */ + .get_strings = yt921x_dsa_get_strings, + .get_ethtool_stats = yt921x_dsa_get_ethtool_stats, + .get_sset_count = yt921x_dsa_get_sset_count, + .get_eth_mac_stats = yt921x_dsa_get_eth_mac_stats, + .get_eth_ctrl_stats = yt921x_dsa_get_eth_ctrl_stats, + .get_rmon_stats = yt921x_dsa_get_rmon_stats, + .get_stats64 = yt921x_dsa_get_stats64, + .get_pause_stats = yt921x_dsa_get_pause_stats, + /* eee */ + .support_eee = dsa_supports_eee, + .set_mac_eee = yt921x_dsa_set_mac_eee, + /* mtu */ + .port_change_mtu = yt921x_dsa_port_change_mtu, + .port_max_mtu = yt921x_dsa_port_max_mtu, + /* hsr */ + .port_hsr_leave = dsa_port_simple_hsr_leave, + .port_hsr_join = dsa_port_simple_hsr_join, + /* mirror */ + .port_mirror_del = yt921x_dsa_port_mirror_del, + .port_mirror_add = yt921x_dsa_port_mirror_add, + /* fdb */ + .port_fdb_dump = yt921x_dsa_port_fdb_dump, + .port_fast_age = yt921x_dsa_port_fast_age, + .set_ageing_time = yt921x_dsa_set_ageing_time, + .port_fdb_del = yt921x_dsa_port_fdb_del, + .port_fdb_add = yt921x_dsa_port_fdb_add, + .port_mdb_del = yt921x_dsa_port_mdb_del, + .port_mdb_add = yt921x_dsa_port_mdb_add, + /* vlan */ + .port_vlan_filtering = yt921x_dsa_port_vlan_filtering, + .port_vlan_del = yt921x_dsa_port_vlan_del, + .port_vlan_add = yt921x_dsa_port_vlan_add, + /* bridge */ + .port_pre_bridge_flags = yt921x_dsa_port_pre_bridge_flags, + .port_bridge_flags = yt921x_dsa_port_bridge_flags, + .port_bridge_leave = yt921x_dsa_port_bridge_leave, + .port_bridge_join = yt921x_dsa_port_bridge_join, + /* mst */ + .port_mst_state_set = yt921x_dsa_port_mst_state_set, + .vlan_msti_set = yt921x_dsa_vlan_msti_set, + .port_stp_state_set = yt921x_dsa_port_stp_state_set, + /* port */ + .get_tag_protocol = yt921x_dsa_get_tag_protocol, + .phylink_get_caps = yt921x_dsa_phylink_get_caps, + .port_setup = yt921x_dsa_port_setup, + /* chip */ + .setup = yt921x_dsa_setup, +}; + +static void yt921x_mdio_shutdown(struct mdio_device *mdiodev) +{ + struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); + + if (!priv) + return; + + dsa_switch_shutdown(&priv->ds); +} + +static void yt921x_mdio_remove(struct mdio_device *mdiodev) +{ + struct yt921x_priv *priv = mdiodev_get_drvdata(mdiodev); + + if (!priv) + return; + + for (size_t i = ARRAY_SIZE(priv->ports); i-- > 0; ) { + struct yt921x_port *pp = &priv->ports[i]; + + disable_delayed_work_sync(&pp->mib_read); + } + + dsa_unregister_switch(&priv->ds); + + mutex_destroy(&priv->reg_lock); +} + +static int yt921x_mdio_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct yt921x_reg_mdio *mdio; + struct yt921x_priv *priv; + struct dsa_switch *ds; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mdio = devm_kzalloc(dev, sizeof(*mdio), GFP_KERNEL); + if (!mdio) + return -ENOMEM; + + mdio->bus = mdiodev->bus; + mdio->addr = mdiodev->addr; + mdio->switchid = 0; + + mutex_init(&priv->reg_lock); + + priv->reg_ops = &yt921x_reg_ops_mdio; + priv->reg_ctx = mdio; + + for (size_t i = 0; i < ARRAY_SIZE(priv->ports); i++) { + struct yt921x_port *pp = &priv->ports[i]; + + pp->index = i; + INIT_DELAYED_WORK(&pp->mib_read, yt921x_poll_mib); + } + + ds = &priv->ds; + ds->dev = dev; + ds->assisted_learning_on_cpu_port = true; + ds->priv = priv; + ds->ops = &yt921x_dsa_switch_ops; + ds->ageing_time_min = 1 * 5000; + ds->ageing_time_max = U16_MAX * 5000; + ds->phylink_mac_ops = &yt921x_phylink_mac_ops; + ds->num_ports = YT921X_PORT_NUM; + + mdiodev_set_drvdata(mdiodev, priv); + + return dsa_register_switch(ds); +} + +static const struct of_device_id yt921x_of_match[] = { + { .compatible = "motorcomm,yt9215" }, + {} +}; +MODULE_DEVICE_TABLE(of, yt921x_of_match); + +static struct mdio_driver yt921x_mdio_driver = { + .probe = yt921x_mdio_probe, + .remove = yt921x_mdio_remove, + .shutdown = yt921x_mdio_shutdown, + .mdiodrv.driver = { + .name = YT921X_NAME, + .of_match_table = yt921x_of_match, + }, +}; + +mdio_module_driver(yt921x_mdio_driver); + +MODULE_AUTHOR("David Yang <mmyangfl@gmail.com>"); +MODULE_DESCRIPTION("Driver for Motorcomm YT921x Switch"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/yt921x.h b/drivers/net/dsa/yt921x.h new file mode 100644 index 000000000000..61bb0ab3b09a --- /dev/null +++ b/drivers/net/dsa/yt921x.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2025 David Yang + */ + +#ifndef __YT921X_H +#define __YT921X_H + +#include <net/dsa.h> + +#define YT921X_SMI_SWITCHID_M GENMASK(3, 2) +#define YT921X_SMI_SWITCHID(x) FIELD_PREP(YT921X_SMI_SWITCHID_M, (x)) +#define YT921X_SMI_AD BIT(1) +#define YT921X_SMI_ADDR 0 +#define YT921X_SMI_DATA YT921X_SMI_AD +#define YT921X_SMI_RW BIT(0) +#define YT921X_SMI_WRITE 0 +#define YT921X_SMI_READ YT921X_SMI_RW + +#define YT921X_SWITCHID_NUM 4 + +#define YT921X_RST 0x80000 +#define YT921X_RST_HW BIT(31) +#define YT921X_RST_SW BIT(1) +#define YT921X_FUNC 0x80004 +#define YT921X_FUNC_MIB BIT(1) +#define YT921X_CHIP_ID 0x80008 +#define YT921X_CHIP_ID_MAJOR GENMASK(31, 16) +#define YT921X_EXT_CPU_PORT 0x8000c +#define YT921X_EXT_CPU_PORT_TAG_EN BIT(15) +#define YT921X_EXT_CPU_PORT_PORT_EN BIT(14) +#define YT921X_EXT_CPU_PORT_PORT_M GENMASK(3, 0) +#define YT921X_EXT_CPU_PORT_PORT(x) FIELD_PREP(YT921X_EXT_CPU_PORT_PORT_M, (x)) +#define YT921X_CPU_TAG_TPID 0x80010 +#define YT921X_CPU_TAG_TPID_TPID_M GENMASK(15, 0) +/* Same as ETH_P_YT921X, but this represents the true HW default, while the + * former is a local convention chosen by us. + */ +#define YT921X_CPU_TAG_TPID_TPID_DEFAULT 0x9988 +#define YT921X_PVID_SEL 0x80014 +#define YT921X_PVID_SEL_SVID_PORTn(port) BIT(port) +#define YT921X_SERDES_CTRL 0x80028 +#define YT921X_SERDES_CTRL_PORTn_TEST(port) BIT((port) - 3) +#define YT921X_SERDES_CTRL_PORTn(port) BIT((port) - 8) +#define YT921X_IO_LEVEL 0x80030 +#define YT9215_IO_LEVEL_NORMAL_M GENMASK(5, 4) +#define YT9215_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9215_IO_LEVEL_NORMAL_M, (x)) +#define YT9215_IO_LEVEL_NORMAL_3V3 YT9215_IO_LEVEL_NORMAL(0) +#define YT9215_IO_LEVEL_NORMAL_1V8 YT9215_IO_LEVEL_NORMAL(3) +#define YT9215_IO_LEVEL_RGMII1_M GENMASK(3, 2) +#define YT9215_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII1_M, (x)) +#define YT9215_IO_LEVEL_RGMII1_3V3 YT9215_IO_LEVEL_RGMII1(0) +#define YT9215_IO_LEVEL_RGMII1_2V5 YT9215_IO_LEVEL_RGMII1(1) +#define YT9215_IO_LEVEL_RGMII1_1V8 YT9215_IO_LEVEL_RGMII1(2) +#define YT9215_IO_LEVEL_RGMII0_M GENMASK(1, 0) +#define YT9215_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9215_IO_LEVEL_RGMII0_M, (x)) +#define YT9215_IO_LEVEL_RGMII0_3V3 YT9215_IO_LEVEL_RGMII0(0) +#define YT9215_IO_LEVEL_RGMII0_2V5 YT9215_IO_LEVEL_RGMII0(1) +#define YT9215_IO_LEVEL_RGMII0_1V8 YT9215_IO_LEVEL_RGMII0(2) +#define YT9218_IO_LEVEL_RGMII1_M GENMASK(5, 4) +#define YT9218_IO_LEVEL_RGMII1(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII1_M, (x)) +#define YT9218_IO_LEVEL_RGMII1_3V3 YT9218_IO_LEVEL_RGMII1(0) +#define YT9218_IO_LEVEL_RGMII1_2V5 YT9218_IO_LEVEL_RGMII1(1) +#define YT9218_IO_LEVEL_RGMII1_1V8 YT9218_IO_LEVEL_RGMII1(2) +#define YT9218_IO_LEVEL_RGMII0_M GENMASK(3, 2) +#define YT9218_IO_LEVEL_RGMII0(x) FIELD_PREP(YT9218_IO_LEVEL_RGMII0_M, (x)) +#define YT9218_IO_LEVEL_RGMII0_3V3 YT9218_IO_LEVEL_RGMII0(0) +#define YT9218_IO_LEVEL_RGMII0_2V5 YT9218_IO_LEVEL_RGMII0(1) +#define YT9218_IO_LEVEL_RGMII0_1V8 YT9218_IO_LEVEL_RGMII0(2) +#define YT9218_IO_LEVEL_NORMAL_M GENMASK(1, 0) +#define YT9218_IO_LEVEL_NORMAL(x) FIELD_PREP(YT9218_IO_LEVEL_NORMAL_M, (x)) +#define YT9218_IO_LEVEL_NORMAL_3V3 YT9218_IO_LEVEL_NORMAL(0) +#define YT9218_IO_LEVEL_NORMAL_1V8 YT9218_IO_LEVEL_NORMAL(3) +#define YT921X_MAC_ADDR_HI2 0x80080 +#define YT921X_MAC_ADDR_LO4 0x80084 +#define YT921X_SERDESn(port) (0x8008c + 4 * ((port) - 8)) +#define YT921X_SERDES_MODE_M GENMASK(9, 7) +#define YT921X_SERDES_MODE(x) FIELD_PREP(YT921X_SERDES_MODE_M, (x)) +#define YT921X_SERDES_MODE_SGMII YT921X_SERDES_MODE(0) +#define YT921X_SERDES_MODE_REVSGMII YT921X_SERDES_MODE(1) +#define YT921X_SERDES_MODE_1000BASEX YT921X_SERDES_MODE(2) +#define YT921X_SERDES_MODE_100BASEX YT921X_SERDES_MODE(3) +#define YT921X_SERDES_MODE_2500BASEX YT921X_SERDES_MODE(4) +#define YT921X_SERDES_RX_PAUSE BIT(6) +#define YT921X_SERDES_TX_PAUSE BIT(5) +#define YT921X_SERDES_LINK BIT(4) /* force link */ +#define YT921X_SERDES_DUPLEX_FULL BIT(3) +#define YT921X_SERDES_SPEED_M GENMASK(2, 0) +#define YT921X_SERDES_SPEED(x) FIELD_PREP(YT921X_SERDES_SPEED_M, (x)) +#define YT921X_SERDES_SPEED_10 YT921X_SERDES_SPEED(0) +#define YT921X_SERDES_SPEED_100 YT921X_SERDES_SPEED(1) +#define YT921X_SERDES_SPEED_1000 YT921X_SERDES_SPEED(2) +#define YT921X_SERDES_SPEED_10000 YT921X_SERDES_SPEED(3) +#define YT921X_SERDES_SPEED_2500 YT921X_SERDES_SPEED(4) +#define YT921X_PORTn_CTRL(port) (0x80100 + 4 * (port)) +#define YT921X_PORT_CTRL_PAUSE_AN BIT(10) +#define YT921X_PORTn_STATUS(port) (0x80200 + 4 * (port)) +#define YT921X_PORT_LINK BIT(9) /* CTRL: auto negotiation */ +#define YT921X_PORT_HALF_PAUSE BIT(8) /* Half-duplex back pressure mode */ +#define YT921X_PORT_DUPLEX_FULL BIT(7) +#define YT921X_PORT_RX_PAUSE BIT(6) +#define YT921X_PORT_TX_PAUSE BIT(5) +#define YT921X_PORT_RX_MAC_EN BIT(4) +#define YT921X_PORT_TX_MAC_EN BIT(3) +#define YT921X_PORT_SPEED_M GENMASK(2, 0) +#define YT921X_PORT_SPEED(x) FIELD_PREP(YT921X_PORT_SPEED_M, (x)) +#define YT921X_PORT_SPEED_10 YT921X_PORT_SPEED(0) +#define YT921X_PORT_SPEED_100 YT921X_PORT_SPEED(1) +#define YT921X_PORT_SPEED_1000 YT921X_PORT_SPEED(2) +#define YT921X_PORT_SPEED_10000 YT921X_PORT_SPEED(3) +#define YT921X_PORT_SPEED_2500 YT921X_PORT_SPEED(4) +#define YT921X_PON_STRAP_FUNC 0x80320 +#define YT921X_PON_STRAP_VAL 0x80324 +#define YT921X_PON_STRAP_CAP 0x80328 +#define YT921X_PON_STRAP_EEE BIT(16) +#define YT921X_PON_STRAP_LOOP_DETECT BIT(7) +#define YT921X_MDIO_POLLINGn(port) (0x80364 + 4 * ((port) - 8)) +#define YT921X_MDIO_POLLING_DUPLEX_FULL BIT(4) +#define YT921X_MDIO_POLLING_LINK BIT(3) +#define YT921X_MDIO_POLLING_SPEED_M GENMASK(2, 0) +#define YT921X_MDIO_POLLING_SPEED(x) FIELD_PREP(YT921X_MDIO_POLLING_SPEED_M, (x)) +#define YT921X_MDIO_POLLING_SPEED_10 YT921X_MDIO_POLLING_SPEED(0) +#define YT921X_MDIO_POLLING_SPEED_100 YT921X_MDIO_POLLING_SPEED(1) +#define YT921X_MDIO_POLLING_SPEED_1000 YT921X_MDIO_POLLING_SPEED(2) +#define YT921X_MDIO_POLLING_SPEED_10000 YT921X_MDIO_POLLING_SPEED(3) +#define YT921X_MDIO_POLLING_SPEED_2500 YT921X_MDIO_POLLING_SPEED(4) +#define YT921X_SENSOR 0x8036c +#define YT921X_SENSOR_TEMP BIT(18) +#define YT921X_TEMP 0x80374 +#define YT921X_CHIP_MODE 0x80388 +#define YT921X_CHIP_MODE_MODE GENMASK(1, 0) +#define YT921X_XMII_CTRL 0x80394 +#define YT921X_XMII_CTRL_PORTn(port) BIT(9 - (port)) /* Yes, it's reversed */ +#define YT921X_XMIIn(port) (0x80400 + 8 * ((port) - 8)) +#define YT921X_XMII_MODE_M GENMASK(31, 29) +#define YT921X_XMII_MODE(x) FIELD_PREP(YT921X_XMII_MODE_M, (x)) +#define YT921X_XMII_MODE_MII YT921X_XMII_MODE(0) +#define YT921X_XMII_MODE_REVMII YT921X_XMII_MODE(1) +#define YT921X_XMII_MODE_RMII YT921X_XMII_MODE(2) +#define YT921X_XMII_MODE_REVRMII YT921X_XMII_MODE(3) +#define YT921X_XMII_MODE_RGMII YT921X_XMII_MODE(4) +#define YT921X_XMII_MODE_DISABLE YT921X_XMII_MODE(5) +#define YT921X_XMII_LINK BIT(19) /* force link */ +#define YT921X_XMII_EN BIT(18) +#define YT921X_XMII_SOFT_RST BIT(17) +#define YT921X_XMII_RGMII_TX_DELAY_150PS_M GENMASK(16, 13) +#define YT921X_XMII_RGMII_TX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_TX_DELAY_150PS_M, (x)) +#define YT921X_XMII_TX_CLK_IN BIT(11) +#define YT921X_XMII_RX_CLK_IN BIT(10) +#define YT921X_XMII_RGMII_TX_DELAY_2NS BIT(8) +#define YT921X_XMII_RGMII_TX_CLK_OUT BIT(7) +#define YT921X_XMII_RGMII_RX_DELAY_150PS_M GENMASK(6, 3) +#define YT921X_XMII_RGMII_RX_DELAY_150PS(x) FIELD_PREP(YT921X_XMII_RGMII_RX_DELAY_150PS_M, (x)) +#define YT921X_XMII_RMII_PHY_TX_CLK_OUT BIT(2) +#define YT921X_XMII_REVMII_TX_CLK_OUT BIT(1) +#define YT921X_XMII_REVMII_RX_CLK_OUT BIT(0) + +#define YT921X_MACn_FRAME(port) (0x81008 + 0x1000 * (port)) +#define YT921X_MAC_FRAME_SIZE_M GENMASK(21, 8) +#define YT921X_MAC_FRAME_SIZE(x) FIELD_PREP(YT921X_MAC_FRAME_SIZE_M, (x)) + +#define YT921X_EEEn_VAL(port) (0xa0000 + 0x40 * (port)) +#define YT921X_EEE_VAL_DATA BIT(1) + +#define YT921X_EEE_CTRL 0xb0000 +#define YT921X_EEE_CTRL_ENn(port) BIT(port) + +#define YT921X_MIB_CTRL 0xc0004 +#define YT921X_MIB_CTRL_CLEAN BIT(30) +#define YT921X_MIB_CTRL_PORT_M GENMASK(6, 3) +#define YT921X_MIB_CTRL_PORT(x) FIELD_PREP(YT921X_MIB_CTRL_PORT_M, (x)) +#define YT921X_MIB_CTRL_ONE_PORT BIT(1) +#define YT921X_MIB_CTRL_ALL_PORT BIT(0) +#define YT921X_MIBn_DATA0(port) (0xc0100 + 0x100 * (port)) +#define YT921X_MIBn_DATAm(port, x) (YT921X_MIBn_DATA0(port) + 4 * (x)) +#define YT921X_MIB_DATA_RX_BROADCAST 0x00 +#define YT921X_MIB_DATA_RX_PAUSE 0x04 +#define YT921X_MIB_DATA_RX_MULTICAST 0x08 +#define YT921X_MIB_DATA_RX_CRC_ERR 0x0c + +#define YT921X_MIB_DATA_RX_ALIGN_ERR 0x10 +#define YT921X_MIB_DATA_RX_UNDERSIZE_ERR 0x14 +#define YT921X_MIB_DATA_RX_FRAG_ERR 0x18 +#define YT921X_MIB_DATA_RX_PKT_SZ_64 0x1c + +#define YT921X_MIB_DATA_RX_PKT_SZ_65_TO_127 0x20 +#define YT921X_MIB_DATA_RX_PKT_SZ_128_TO_255 0x24 +#define YT921X_MIB_DATA_RX_PKT_SZ_256_TO_511 0x28 +#define YT921X_MIB_DATA_RX_PKT_SZ_512_TO_1023 0x2c + +#define YT921X_MIB_DATA_RX_PKT_SZ_1024_TO_1518 0x30 +#define YT921X_MIB_DATA_RX_PKT_SZ_1519_TO_MAX 0x34 +/* 0x38: unused */ +#define YT921X_MIB_DATA_RX_GOOD_BYTES 0x3c + +/* 0x40: 64 bytes */ +#define YT921X_MIB_DATA_RX_BAD_BYTES 0x44 +/* 0x48: 64 bytes */ +#define YT921X_MIB_DATA_RX_OVERSIZE_ERR 0x4c + +#define YT921X_MIB_DATA_RX_DROPPED 0x50 +#define YT921X_MIB_DATA_TX_BROADCAST 0x54 +#define YT921X_MIB_DATA_TX_PAUSE 0x58 +#define YT921X_MIB_DATA_TX_MULTICAST 0x5c + +#define YT921X_MIB_DATA_TX_UNDERSIZE_ERR 0x60 +#define YT921X_MIB_DATA_TX_PKT_SZ_64 0x64 +#define YT921X_MIB_DATA_TX_PKT_SZ_65_TO_127 0x68 +#define YT921X_MIB_DATA_TX_PKT_SZ_128_TO_255 0x6c + +#define YT921X_MIB_DATA_TX_PKT_SZ_256_TO_511 0x70 +#define YT921X_MIB_DATA_TX_PKT_SZ_512_TO_1023 0x74 +#define YT921X_MIB_DATA_TX_PKT_SZ_1024_TO_1518 0x78 +#define YT921X_MIB_DATA_TX_PKT_SZ_1519_TO_MAX 0x7c + +/* 0x80: unused */ +#define YT921X_MIB_DATA_TX_GOOD_BYTES 0x84 +/* 0x88: 64 bytes */ +#define YT921X_MIB_DATA_TX_COLLISION 0x8c + +#define YT921X_MIB_DATA_TX_EXCESSIVE_COLLISION 0x90 +#define YT921X_MIB_DATA_TX_MULTIPLE_COLLISION 0x94 +#define YT921X_MIB_DATA_TX_SINGLE_COLLISION 0x98 +#define YT921X_MIB_DATA_TX_PKT 0x9c + +#define YT921X_MIB_DATA_TX_DEFERRED 0xa0 +#define YT921X_MIB_DATA_TX_LATE_COLLISION 0xa4 +#define YT921X_MIB_DATA_RX_OAM 0xa8 +#define YT921X_MIB_DATA_TX_OAM 0xac + +#define YT921X_EDATA_CTRL 0xe0000 +#define YT921X_EDATA_CTRL_ADDR_M GENMASK(15, 8) +#define YT921X_EDATA_CTRL_ADDR(x) FIELD_PREP(YT921X_EDATA_CTRL_ADDR_M, (x)) +#define YT921X_EDATA_CTRL_OP_M GENMASK(3, 0) +#define YT921X_EDATA_CTRL_OP(x) FIELD_PREP(YT921X_EDATA_CTRL_OP_M, (x)) +#define YT921X_EDATA_CTRL_READ YT921X_EDATA_CTRL_OP(5) +#define YT921X_EDATA_DATA 0xe0004 +#define YT921X_EDATA_DATA_DATA_M GENMASK(31, 24) +#define YT921X_EDATA_DATA_STATUS_M GENMASK(3, 0) +#define YT921X_EDATA_DATA_STATUS(x) FIELD_PREP(YT921X_EDATA_DATA_STATUS_M, (x)) +#define YT921X_EDATA_DATA_IDLE YT921X_EDATA_DATA_STATUS(3) + +#define YT921X_EXT_MBUS_OP 0x6a000 +#define YT921X_INT_MBUS_OP 0xf0000 +#define YT921X_MBUS_OP_START BIT(0) +#define YT921X_EXT_MBUS_CTRL 0x6a004 +#define YT921X_INT_MBUS_CTRL 0xf0004 +#define YT921X_MBUS_CTRL_PORT_M GENMASK(25, 21) +#define YT921X_MBUS_CTRL_PORT(x) FIELD_PREP(YT921X_MBUS_CTRL_PORT_M, (x)) +#define YT921X_MBUS_CTRL_REG_M GENMASK(20, 16) +#define YT921X_MBUS_CTRL_REG(x) FIELD_PREP(YT921X_MBUS_CTRL_REG_M, (x)) +#define YT921X_MBUS_CTRL_TYPE_M GENMASK(11, 8) /* wild guess */ +#define YT921X_MBUS_CTRL_TYPE(x) FIELD_PREP(YT921X_MBUS_CTRL_TYPE_M, (x)) +#define YT921X_MBUS_CTRL_TYPE_C22 YT921X_MBUS_CTRL_TYPE(4) +#define YT921X_MBUS_CTRL_OP_M GENMASK(3, 2) /* wild guess */ +#define YT921X_MBUS_CTRL_OP(x) FIELD_PREP(YT921X_MBUS_CTRL_OP_M, (x)) +#define YT921X_MBUS_CTRL_WRITE YT921X_MBUS_CTRL_OP(1) +#define YT921X_MBUS_CTRL_READ YT921X_MBUS_CTRL_OP(2) +#define YT921X_EXT_MBUS_DOUT 0x6a008 +#define YT921X_INT_MBUS_DOUT 0xf0008 +#define YT921X_EXT_MBUS_DIN 0x6a00c +#define YT921X_INT_MBUS_DIN 0xf000c + +#define YT921X_PORTn_EGR(port) (0x100000 + 4 * (port)) +#define YT921X_PORT_EGR_TPID_CTAG_M GENMASK(5, 4) +#define YT921X_PORT_EGR_TPID_CTAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_CTAG_M, (x)) +#define YT921X_PORT_EGR_TPID_STAG_M GENMASK(3, 2) +#define YT921X_PORT_EGR_TPID_STAG(x) FIELD_PREP(YT921X_PORT_EGR_TPID_STAG_M, (x)) +#define YT921X_TPID_EGRn(x) (0x100300 + 4 * (x)) /* [0, 3] */ +#define YT921X_TPID_EGR_TPID_M GENMASK(15, 0) + +#define YT921X_VLAN_IGR_FILTER 0x180280 +#define YT921X_VLAN_IGR_FILTER_PORTn_BYPASS_IGMP(port) BIT((port) + 11) +#define YT921X_VLAN_IGR_FILTER_PORTn(port) BIT(port) +#define YT921X_PORTn_ISOLATION(port) (0x180294 + 4 * (port)) +#define YT921X_PORT_ISOLATION_BLOCKn(port) BIT(port) +#define YT921X_STPn(n) (0x18038c + 4 * (n)) +#define YT921X_STP_PORTn_M(port) GENMASK(2 * (port) + 1, 2 * (port)) +#define YT921X_STP_PORTn(port, x) ((x) << (2 * (port))) +#define YT921X_STP_PORTn_DISABLED(port) YT921X_STP_PORTn(port, 0) +#define YT921X_STP_PORTn_LEARNING(port) YT921X_STP_PORTn(port, 1) +#define YT921X_STP_PORTn_BLOCKING(port) YT921X_STP_PORTn(port, 2) +#define YT921X_STP_PORTn_FORWARD(port) YT921X_STP_PORTn(port, 3) +#define YT921X_PORTn_LEARN(port) (0x1803d0 + 4 * (port)) +#define YT921X_PORT_LEARN_VID_LEARN_MULTI_EN BIT(22) +#define YT921X_PORT_LEARN_VID_LEARN_MODE BIT(21) +#define YT921X_PORT_LEARN_VID_LEARN_EN BIT(20) +#define YT921X_PORT_LEARN_SUSPEND_COPY_EN BIT(19) +#define YT921X_PORT_LEARN_SUSPEND_DROP_EN BIT(18) +#define YT921X_PORT_LEARN_DIS BIT(17) +#define YT921X_PORT_LEARN_LIMIT_EN BIT(16) +#define YT921X_PORT_LEARN_LIMIT_M GENMASK(15, 8) +#define YT921X_PORT_LEARN_LIMIT(x) FIELD_PREP(YT921X_PORT_LEARN_LIMIT_M, (x)) +#define YT921X_PORT_LEARN_DROP_ON_EXCEEDED BIT(2) +#define YT921X_PORT_LEARN_MODE_M GENMASK(1, 0) +#define YT921X_PORT_LEARN_MODE(x) FIELD_PREP(YT921X_PORT_LEARN_MODE_M, (x)) +#define YT921X_PORT_LEARN_MODE_AUTO YT921X_PORT_LEARN_MODE(0) +#define YT921X_PORT_LEARN_MODE_AUTO_AND_COPY YT921X_PORT_LEARN_MODE(1) +#define YT921X_PORT_LEARN_MODE_CPU_CONTROL YT921X_PORT_LEARN_MODE(2) +#define YT921X_AGEING 0x180440 +#define YT921X_AGEING_INTERVAL_M GENMASK(15, 0) +#define YT921X_FDB_IN0 0x180454 +#define YT921X_FDB_IN1 0x180458 +#define YT921X_FDB_IN2 0x18045c +#define YT921X_FDB_OP 0x180460 +#define YT921X_FDB_OP_INDEX_M GENMASK(22, 11) +#define YT921X_FDB_OP_INDEX(x) FIELD_PREP(YT921X_FDB_OP_INDEX_M, (x)) +#define YT921X_FDB_OP_MODE_INDEX BIT(10) /* mac+fid / index */ +#define YT921X_FDB_OP_FLUSH_MCAST BIT(9) /* ucast / mcast */ +#define YT921X_FDB_OP_FLUSH_M GENMASK(8, 7) +#define YT921X_FDB_OP_FLUSH(x) FIELD_PREP(YT921X_FDB_OP_FLUSH_M, (x)) +#define YT921X_FDB_OP_FLUSH_ALL YT921X_FDB_OP_FLUSH(0) +#define YT921X_FDB_OP_FLUSH_PORT YT921X_FDB_OP_FLUSH(1) +#define YT921X_FDB_OP_FLUSH_PORT_VID YT921X_FDB_OP_FLUSH(2) +#define YT921X_FDB_OP_FLUSH_VID YT921X_FDB_OP_FLUSH(3) +#define YT921X_FDB_OP_FLUSH_STATIC BIT(6) +#define YT921X_FDB_OP_NEXT_TYPE_M GENMASK(5, 4) +#define YT921X_FDB_OP_NEXT_TYPE(x) FIELD_PREP(YT921X_FDB_OP_NEXT_TYPE_M, (x)) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST_PORT YT921X_FDB_OP_NEXT_TYPE(0) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST_VID YT921X_FDB_OP_NEXT_TYPE(1) +#define YT921X_FDB_OP_NEXT_TYPE_UCAST YT921X_FDB_OP_NEXT_TYPE(2) +#define YT921X_FDB_OP_NEXT_TYPE_MCAST YT921X_FDB_OP_NEXT_TYPE(3) +#define YT921X_FDB_OP_OP_M GENMASK(3, 1) +#define YT921X_FDB_OP_OP(x) FIELD_PREP(YT921X_FDB_OP_OP_M, (x)) +#define YT921X_FDB_OP_OP_ADD YT921X_FDB_OP_OP(0) +#define YT921X_FDB_OP_OP_DEL YT921X_FDB_OP_OP(1) +#define YT921X_FDB_OP_OP_GET_ONE YT921X_FDB_OP_OP(2) +#define YT921X_FDB_OP_OP_GET_NEXT YT921X_FDB_OP_OP(3) +#define YT921X_FDB_OP_OP_FLUSH YT921X_FDB_OP_OP(4) +#define YT921X_FDB_OP_START BIT(0) +#define YT921X_FDB_RESULT 0x180464 +#define YT921X_FDB_RESULT_DONE BIT(15) +#define YT921X_FDB_RESULT_NOTFOUND BIT(14) +#define YT921X_FDB_RESULT_OVERWRITED BIT(13) +#define YT921X_FDB_RESULT_INDEX_M GENMASK(11, 0) +#define YT921X_FDB_RESULT_INDEX(x) FIELD_PREP(YT921X_FDB_RESULT_INDEX_M, (x)) +#define YT921X_FDB_OUT0 0x1804b0 +#define YT921X_FDB_IO0_ADDR_HI4_M GENMASK(31, 0) +#define YT921X_FDB_OUT1 0x1804b4 +#define YT921X_FDB_IO1_EGR_INT_PRI_EN BIT(31) +#define YT921X_FDB_IO1_STATUS_M GENMASK(30, 28) +#define YT921X_FDB_IO1_STATUS(x) FIELD_PREP(YT921X_FDB_IO1_STATUS_M, (x)) +#define YT921X_FDB_IO1_STATUS_INVALID YT921X_FDB_IO1_STATUS(0) +#define YT921X_FDB_IO1_STATUS_MIN_TIME YT921X_FDB_IO1_STATUS(1) +#define YT921X_FDB_IO1_STATUS_MOVE_AGING_MAX_TIME YT921X_FDB_IO1_STATUS(3) +#define YT921X_FDB_IO1_STATUS_MAX_TIME YT921X_FDB_IO1_STATUS(5) +#define YT921X_FDB_IO1_STATUS_PENDING YT921X_FDB_IO1_STATUS(6) +#define YT921X_FDB_IO1_STATUS_STATIC YT921X_FDB_IO1_STATUS(7) +#define YT921X_FDB_IO1_FID_M GENMASK(27, 16) /* filtering ID (VID) */ +#define YT921X_FDB_IO1_FID(x) FIELD_PREP(YT921X_FDB_IO1_FID_M, (x)) +#define YT921X_FDB_IO1_ADDR_LO2_M GENMASK(15, 0) +#define YT921X_FDB_OUT2 0x1804b8 +#define YT921X_FDB_IO2_MOVE_AGING_STATUS_M GENMASK(31, 30) +#define YT921X_FDB_IO2_IGR_DROP BIT(29) +#define YT921X_FDB_IO2_EGR_PORTS_M GENMASK(28, 18) +#define YT921X_FDB_IO2_EGR_PORTS(x) FIELD_PREP(YT921X_FDB_IO2_EGR_PORTS_M, (x)) +#define YT921X_FDB_IO2_EGR_DROP BIT(17) +#define YT921X_FDB_IO2_COPY_TO_CPU BIT(16) +#define YT921X_FDB_IO2_IGR_INT_PRI_EN BIT(15) +#define YT921X_FDB_IO2_INT_PRI_M GENMASK(14, 12) +#define YT921X_FDB_IO2_INT_PRI(x) FIELD_PREP(YT921X_FDB_IO2_INT_PRI_M, (x)) +#define YT921X_FDB_IO2_NEW_VID_M GENMASK(11, 0) +#define YT921X_FDB_IO2_NEW_VID(x) FIELD_PREP(YT921X_FDB_IO2_NEW_VID_M, (x)) +#define YT921X_FILTER_UNK_UCAST 0x180508 +#define YT921X_FILTER_UNK_MCAST 0x18050c +#define YT921X_FILTER_MCAST 0x180510 +#define YT921X_FILTER_BCAST 0x180514 +#define YT921X_FILTER_PORTS_M GENMASK(10, 0) +#define YT921X_FILTER_PORTS(x) FIELD_PREP(YT921X_FILTER_PORTS_M, (x)) +#define YT921X_FILTER_PORTn(port) BIT(port) +#define YT921X_VLAN_EGR_FILTER 0x180598 +#define YT921X_VLAN_EGR_FILTER_PORTn(port) BIT(port) +#define YT921X_CPU_COPY 0x180690 +#define YT921X_CPU_COPY_FORCE_INT_PORT BIT(2) +#define YT921X_CPU_COPY_TO_INT_CPU BIT(1) +#define YT921X_CPU_COPY_TO_EXT_CPU BIT(0) +#define YT921X_ACT_UNK_UCAST 0x180734 +#define YT921X_ACT_UNK_MCAST 0x180738 +#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_RMA BIT(23) +#define YT921X_ACT_UNK_MCAST_BYPASS_DROP_IGMP BIT(22) +#define YT921X_ACT_UNK_ACTn_M(port) GENMASK(2 * (port) + 1, 2 * (port)) +#define YT921X_ACT_UNK_ACTn(port, x) ((x) << (2 * (port))) +#define YT921X_ACT_UNK_ACTn_FORWARD(port) YT921X_ACT_UNK_ACTn(port, 0) /* flood */ +#define YT921X_ACT_UNK_ACTn_TRAP(port) YT921X_ACT_UNK_ACTn(port, 1) /* steer to CPU */ +#define YT921X_ACT_UNK_ACTn_DROP(port) YT921X_ACT_UNK_ACTn(port, 2) /* discard */ +/* NEVER use this action; see comments in the tag driver */ +#define YT921X_ACT_UNK_ACTn_COPY(port) YT921X_ACT_UNK_ACTn(port, 3) /* flood and copy */ +#define YT921X_FDB_HW_FLUSH 0x180958 +#define YT921X_FDB_HW_FLUSH_ON_LINKDOWN BIT(0) + +#define YT921X_VLANn_CTRL(vlan) (0x188000 + 8 * (vlan)) +#define YT921X_VLAN_CTRL_UNTAG_PORTS_M GENMASK_ULL(50, 40) +#define YT921X_VLAN_CTRL_UNTAG_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_UNTAG_PORTS_M, (x)) +#define YT921X_VLAN_CTRL_UNTAG_PORTn(port) BIT_ULL((port) + 40) +#define YT921X_VLAN_CTRL_STP_ID_M GENMASK_ULL(39, 36) +#define YT921X_VLAN_CTRL_STP_ID(x) FIELD_PREP(YT921X_VLAN_CTRL_STP_ID_M, (x)) +#define YT921X_VLAN_CTRL_SVLAN_EN BIT_ULL(35) +#define YT921X_VLAN_CTRL_FID_M GENMASK_ULL(34, 23) +#define YT921X_VLAN_CTRL_FID(x) FIELD_PREP(YT921X_VLAN_CTRL_FID_M, (x)) +#define YT921X_VLAN_CTRL_LEARN_DIS BIT_ULL(22) +#define YT921X_VLAN_CTRL_INT_PRI_EN BIT_ULL(21) +#define YT921X_VLAN_CTRL_INT_PRI_M GENMASK_ULL(20, 18) +#define YT921X_VLAN_CTRL_PORTS_M GENMASK_ULL(17, 7) +#define YT921X_VLAN_CTRL_PORTS(x) FIELD_PREP(YT921X_VLAN_CTRL_PORTS_M, (x)) +#define YT921X_VLAN_CTRL_PORTn(port) BIT_ULL((port) + 7) +#define YT921X_VLAN_CTRL_BYPASS_1X_AC BIT_ULL(6) +#define YT921X_VLAN_CTRL_METER_EN BIT_ULL(5) +#define YT921X_VLAN_CTRL_METER_ID_M GENMASK_ULL(4, 0) + +#define YT921X_TPID_IGRn(x) (0x210000 + 4 * (x)) /* [0, 3] */ +#define YT921X_TPID_IGR_TPID_M GENMASK(15, 0) +#define YT921X_PORTn_IGR_TPID(port) (0x210010 + 4 * (port)) +#define YT921X_PORT_IGR_TPIDn_STAG_M GENMASK(7, 4) +#define YT921X_PORT_IGR_TPIDn_STAG(x) BIT((x) + 4) +#define YT921X_PORT_IGR_TPIDn_CTAG_M GENMASK(3, 0) +#define YT921X_PORT_IGR_TPIDn_CTAG(x) BIT(x) + +#define YT921X_PORTn_VLAN_CTRL(port) (0x230010 + 4 * (port)) +#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_EN BIT(31) +#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_EN BIT(30) +#define YT921X_PORT_VLAN_CTRL_SVID_M GENMASK(29, 18) +#define YT921X_PORT_VLAN_CTRL_SVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_SVID_M, (x)) +#define YT921X_PORT_VLAN_CTRL_CVID_M GENMASK(17, 6) +#define YT921X_PORT_VLAN_CTRL_CVID(x) FIELD_PREP(YT921X_PORT_VLAN_CTRL_CVID_M, (x)) +#define YT921X_PORT_VLAN_CTRL_SVLAN_PRI_M GENMASK(5, 3) +#define YT921X_PORT_VLAN_CTRL_CVLAN_PRI_M GENMASK(2, 0) +#define YT921X_PORTn_VLAN_CTRL1(port) (0x230080 + 4 * (port)) +#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_EN BIT(8) +#define YT921X_PORT_VLAN_CTRL1_VLAN_RANGE_PROFILE_ID_M GENMASK(7, 4) +#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_TAGGED BIT(3) +#define YT921X_PORT_VLAN_CTRL1_SVLAN_DROP_UNTAGGED BIT(2) +#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_TAGGED BIT(1) +#define YT921X_PORT_VLAN_CTRL1_CVLAN_DROP_UNTAGGED BIT(0) + +#define YT921X_MIRROR 0x300300 +#define YT921X_MIRROR_IGR_PORTS_M GENMASK(26, 16) +#define YT921X_MIRROR_IGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_IGR_PORTS_M, (x)) +#define YT921X_MIRROR_IGR_PORTn(port) BIT((port) + 16) +#define YT921X_MIRROR_EGR_PORTS_M GENMASK(14, 4) +#define YT921X_MIRROR_EGR_PORTS(x) FIELD_PREP(YT921X_MIRROR_EGR_PORTS_M, (x)) +#define YT921X_MIRROR_EGR_PORTn(port) BIT((port) + 4) +#define YT921X_MIRROR_PORT_M GENMASK(3, 0) +#define YT921X_MIRROR_PORT(x) FIELD_PREP(YT921X_MIRROR_PORT_M, (x)) + +#define YT921X_EDATA_EXTMODE 0xfb +#define YT921X_EDATA_LEN 0x100 + +#define YT921X_FDB_NUM 4096 + +enum yt921x_fdb_entry_status { + YT921X_FDB_ENTRY_STATUS_INVALID = 0, + YT921X_FDB_ENTRY_STATUS_MIN_TIME = 1, + YT921X_FDB_ENTRY_STATUS_MOVE_AGING_MAX_TIME = 3, + YT921X_FDB_ENTRY_STATUS_MAX_TIME = 5, + YT921X_FDB_ENTRY_STATUS_PENDING = 6, + YT921X_FDB_ENTRY_STATUS_STATIC = 7, +}; + +#define YT921X_MSTI_NUM 16 + +#define YT9215_MAJOR 0x9002 +#define YT9218_MAJOR 0x9001 + +/* required for a hard reset */ +#define YT921X_RST_DELAY_US 10000 + +#define YT921X_FRAME_SIZE_MAX 0x2400 /* 9216 */ + +#define YT921X_TAG_LEN 8 + +/* 8 internal + 2 external + 1 mcu */ +#define YT921X_PORT_NUM 11 + +#define yt921x_port_is_internal(port) ((port) < 8) +#define yt921x_port_is_external(port) (8 <= (port) && (port) < 9) + +struct yt921x_mib { + u64 rx_broadcast; + u64 rx_pause; + u64 rx_multicast; + u64 rx_crc_errors; + + u64 rx_alignment_errors; + u64 rx_undersize_errors; + u64 rx_fragment_errors; + u64 rx_64byte; + + u64 rx_65_127byte; + u64 rx_128_255byte; + u64 rx_256_511byte; + u64 rx_512_1023byte; + + u64 rx_1024_1518byte; + u64 rx_jumbo; + u64 rx_good_bytes; + + u64 rx_bad_bytes; + u64 rx_oversize_errors; + + u64 rx_dropped; + u64 tx_broadcast; + u64 tx_pause; + u64 tx_multicast; + + u64 tx_undersize_errors; + u64 tx_64byte; + u64 tx_65_127byte; + u64 tx_128_255byte; + + u64 tx_256_511byte; + u64 tx_512_1023byte; + u64 tx_1024_1518byte; + u64 tx_jumbo; + + u64 tx_good_bytes; + u64 tx_collisions; + + u64 tx_aborted_errors; + u64 tx_multiple_collisions; + u64 tx_single_collisions; + u64 tx_good; + + u64 tx_deferred; + u64 tx_late_collisions; + u64 rx_oam; + u64 tx_oam; +}; + +struct yt921x_port { + unsigned char index; + + bool hairpin; + bool isolated; + + struct delayed_work mib_read; + struct yt921x_mib mib; + u64 rx_frames; + u64 tx_frames; +}; + +struct yt921x_reg_ops { + int (*read)(void *context, u32 reg, u32 *valp); + int (*write)(void *context, u32 reg, u32 val); +}; + +struct yt921x_priv { + struct dsa_switch ds; + + const struct yt921x_info *info; + /* cache of dsa_cpu_ports(ds) */ + u16 cpu_ports_mask; + + /* protect the access to the switch registers */ + struct mutex reg_lock; + const struct yt921x_reg_ops *reg_ops; + void *reg_ctx; + + /* mdio master bus */ + struct mii_bus *mbus_int; + struct mii_bus *mbus_ext; + + struct yt921x_port ports[YT921X_PORT_NUM]; + + u16 eee_ports_mask; +}; + +#endif |
