diff options
Diffstat (limited to 'drivers')
39 files changed, 629 insertions, 152 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index c488725aa2a5..29f16f220384 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -294,6 +294,16 @@ config MTK_CMDQ_MBOX critical time limitation, such as updating display configuration during the vblank. +config MTK_GPUEB_MBOX + tristate "MediaTek GPUEB Mailbox Support" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + The MediaTek GPUEB mailbox is used to communicate with the embedded + controller in charge of GPU frequency and power management on some + MediaTek SoCs, such as the MT8196. + Say Y or m here if you want to support the MT8196 SoC in your kernel + build. + config ZYNQMP_IPI_MBOX tristate "Xilinx ZynqMP IPI Mailbox" depends on ARCH_ZYNQMP && OF diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index f9a11f5639aa..81820a4f5528 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -63,6 +63,8 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o +obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o + obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c index b97e79a5870f..0910da67f8a1 100644 --- a/drivers/mailbox/arm_mhuv3.c +++ b/drivers/mailbox/arm_mhuv3.c @@ -945,7 +945,7 @@ static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg) if (IS_ERR(data)) { dev_err(dev, "Failed to read in-band data. err:%ld\n", - PTR_ERR(no_free_ptr(data))); + PTR_ERR(data)); goto rx_ack; } } diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 532929916e99..654a60f63756 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev); struct cmdq_task *task; unsigned long curr_pa, end_pa; - int ret; /* Client should not flush new tasks if suspended. */ WARN_ON(cmdq->suspended); - ret = pm_runtime_get_sync(cmdq->mbox.dev); - if (ret < 0) - return ret; - task = kzalloc(sizeof(*task), GFP_ATOMIC); - if (!task) { - pm_runtime_put_autosuspend(cmdq->mbox.dev); + if (!task) return -ENOMEM; - } task->cmdq = cmdq; INIT_LIST_HEAD(&task->list_entry); @@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data) } list_move_tail(&task->list_entry, &thread->task_busy_list); - pm_runtime_mark_last_busy(cmdq->mbox.dev); - pm_runtime_put_autosuspend(cmdq->mbox.dev); - return 0; } diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c new file mode 100644 index 000000000000..925bcf21f650 --- /dev/null +++ b/drivers/mailbox/mtk-gpueb-mailbox.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * MediaTek GPUEB mailbox driver for SoCs such as the MT8196 + * + * Copyright (C) 2025, Collabora Ltd. + * + * Developers harmed in the making of this driver: + * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#define GPUEB_MBOX_CTL_TX_STS 0x00 +#define GPUEB_MBOX_CTL_IRQ_SET 0x04 +#define GPUEB_MBOX_CTL_IRQ_CLR 0x74 +#define GPUEB_MBOX_CTL_RX_STS 0x78 + +#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */ +#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */ + +#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */ + +struct mtk_gpueb_mbox { + struct device *dev; + struct clk *clk; + void __iomem *mbox_mmio; + void __iomem *mbox_ctl; + struct mbox_controller mbox; + struct mtk_gpueb_mbox_chan *ch; + int irq; + const struct mtk_gpueb_mbox_variant *v; +}; + +/** + * struct mtk_gpueb_mbox_chan - per-channel runtime data + * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox + * @full_name: descriptive name of channel for IRQ subsystem + * @num: channel number, starting at 0 + * @rx_status: signifies whether channel reception is turned off, or full + * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data + */ +struct mtk_gpueb_mbox_chan { + struct mtk_gpueb_mbox *ebm; + char *full_name; + u8 num; + atomic_t rx_status; + const struct mtk_gpueb_mbox_chan_desc *c; +}; + +/** + * struct mtk_gpueb_mbox_chan_desc - per-channel constant data + * @name: name of this channel + * @num: index of this channel, starting at 0 + * @tx_offset: byte offset measured from mmio base for outgoing data + * @tx_len: size, in bytes, of the outgoing data on this channel + * @rx_offset: bytes offset measured from mmio base for incoming data + * @rx_len: size, in bytes, of the incoming data on this channel + */ +struct mtk_gpueb_mbox_chan_desc { + const char *name; + const u8 num; + const u16 tx_offset; + const u8 tx_len; + const u16 rx_offset; + const u8 rx_len; +}; + +struct mtk_gpueb_mbox_variant { + const u8 num_channels; + const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels); +}; + +/** + * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer + * @buf: buffer to read into + * @chan: pointer to the channel to read + */ +static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan) +{ + memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len); +} + +static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = data; + u32 rx_sts; + + rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS); + + if (rx_sts & BIT(ch->num)) { + if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) + return IRQ_WAKE_THREAD; + } + + return IRQ_NONE; +} + +static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = data; + int status; + + status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED, + GPUEB_MBOX_FULL); + if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) { + u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {}; + + mtk_gpueb_mbox_read_rx(buf, ch); + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); + mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf); + atomic_set(&ch->rx_status, 0); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + u32 *values = data; + int i; + + if (atomic_read(&ch->rx_status)) + return -EBUSY; + + /* + * We don't want any fancy nonsense, just write the 32-bit values in + * order. memcpy_toio/__iowrite32_copy don't work here, as they may use + * writes of different sizes or memory ordering characteristics depending + * on the architecture, alignment and the current phase of the moon. + */ + for (i = 0; i < ch->c->tx_len; i += 4) + writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i); + + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET); + + return 0; +} + +static int mtk_gpueb_mbox_startup(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + int ret; + + atomic_set(&ch->rx_status, 0); + + ret = clk_enable(ch->ebm->clk); + if (ret) { + dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n", + ERR_PTR(ret)); + goto err_block; + } + + writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR); + + ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr, + mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT, + ch->full_name, ch); + if (ret) { + dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n", + ERR_PTR(ret)); + goto err_unclk; + } + + return 0; + +err_unclk: + clk_disable(ch->ebm->clk); +err_block: + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + + return ret; +} + +static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + + devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch); + + clk_disable(ch->ebm->clk); +} + +static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan) +{ + struct mtk_gpueb_mbox_chan *ch = chan->con_priv; + + return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num)); +} + +const struct mbox_chan_ops mtk_gpueb_mbox_ops = { + .send_data = mtk_gpueb_mbox_send_data, + .startup = mtk_gpueb_mbox_startup, + .shutdown = mtk_gpueb_mbox_shutdown, + .last_tx_done = mtk_gpueb_mbox_last_tx_done, +}; + +static int mtk_gpueb_mbox_probe(struct platform_device *pdev) +{ + struct mtk_gpueb_mbox_chan *ch; + struct mtk_gpueb_mbox *ebm; + unsigned int i; + + ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL); + if (!ebm) + return -ENOMEM; + + ebm->dev = &pdev->dev; + ebm->v = of_device_get_match_data(ebm->dev); + + ebm->irq = platform_get_irq(pdev, 0); + if (ebm->irq < 0) + return ebm->irq; + + ebm->clk = devm_clk_get_prepared(ebm->dev, NULL); + if (IS_ERR(ebm->clk)) + return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk), + "Failed to get 'eb' clock\n"); + + ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ebm->mbox_mmio)) + return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio), + "Couldn't map mailbox data registers\n"); + + ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(ebm->mbox_ctl)) + return dev_err_probe( + ebm->dev, PTR_ERR(ebm->mbox_ctl), + "Couldn't map mailbox control registers\n"); + + ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels, + sizeof(*ebm->ch), GFP_KERNEL); + if (!ebm->ch) + return -ENOMEM; + + ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels, + sizeof(struct mbox_chan), GFP_KERNEL); + if (!ebm->mbox.chans) + return -ENOMEM; + + for (i = 0; i < ebm->v->num_channels; i++) { + ch = &ebm->ch[i]; + ch->c = &ebm->v->channels[i]; + if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) { + dev_err(ebm->dev, "Channel %s RX size (%d) too large\n", + ch->c->name, ch->c->rx_len); + return -EINVAL; + } + ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s", + dev_name(ebm->dev), ch->c->name); + if (!ch->full_name) + return -ENOMEM; + + ch->ebm = ebm; + ch->num = i; + spin_lock_init(&ebm->mbox.chans[i].lock); + ebm->mbox.chans[i].con_priv = ch; + atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED); + } + + ebm->mbox.dev = ebm->dev; + ebm->mbox.num_chans = ebm->v->num_channels; + ebm->mbox.txdone_poll = true; + ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */ + ebm->mbox.ops = &mtk_gpueb_mbox_ops; + + dev_set_drvdata(ebm->dev, ebm); + + return devm_mbox_controller_register(ebm->dev, &ebm->mbox); +} + +static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = { + .num_channels = 12, + .channels = { + { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 }, + { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 }, + { "sleep", 2, 0x0030, 12, 0x0110, 4 }, + { "timer", 3, 0x003c, 24, 0x0114, 4 }, + { "fhctl", 4, 0x0054, 36, 0x0118, 4 }, + { "ccf", 5, 0x0078, 16, 0x011c, 16 }, + { "gpumpu", 6, 0x0088, 24, 0x012c, 4 }, + { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 }, + { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 }, + { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 }, + { "brisket", 10, 0x00cc, 16, 0x015c, 16 }, + { "ppb", 11, 0x00dc, 4, 0x016c, 4 }, + }, +}; + +static const struct of_device_id mtk_gpueb_mbox_of_ids[] = { + { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids); + +static struct platform_driver mtk_gpueb_mbox_drv = { + .probe = mtk_gpueb_mbox_probe, + .driver = { + .name = "mtk-gpueb-mbox", + .of_match_table = mtk_gpueb_mbox_of_ids, + } +}; +module_platform_driver(mtk_gpueb_mbox_drv); + +MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); +MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 8b24ec0fa191..d3a8f6b4a03b 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = { .reg_stride = 4, .val_bits = 32, .max_register = 0x1008, - .fast_io = true, }; static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data) diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c index 0c143beaafda..967967b2b8a9 100644 --- a/drivers/mailbox/zynqmp-ipi-mailbox.c +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c @@ -62,7 +62,8 @@ #define DST_BIT_POS 9U #define SRC_BITMASK GENMASK(11, 8) -#define MAX_SGI 16 +/* Macro to represent SGI type for IPI IRQs */ +#define IPI_IRQ_TYPE_SGI 2 /* * Module parameters @@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox { * @dev: device pointer corresponding to the Xilinx ZynqMP * IPI agent * @irq: IPI agent interrupt ID + * @irq_type: IPI SGI or SPI IRQ type * @method: IPI SMC or HVC is going to be used * @local_id: local IPI agent ID * @virq_sgi: IRQ number mapped to SGI @@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox { struct zynqmp_ipi_pdata { struct device *dev; int irq; + unsigned int irq_type; unsigned int method; u32 local_id; int virq_sgi; @@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) struct zynqmp_ipi_mbox *ipi_mbox; int i; - if (pdata->irq < MAX_SGI) + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) xlnx_mbox_cleanup_sgi(pdata); - i = pdata->num_mboxes; + i = pdata->num_mboxes - 1; for (; i >= 0; i--) { ipi_mbox = &pdata->ipi_mboxes[i]; - if (ipi_mbox->dev.parent) { - mbox_controller_unregister(&ipi_mbox->mbox); - if (device_is_registered(&ipi_mbox->dev)) - device_unregister(&ipi_mbox->dev); - } + if (device_is_registered(&ipi_mbox->dev)) + device_unregister(&ipi_mbox->dev); } } @@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev) dev_err(dev, "failed to parse interrupts\n"); goto free_mbox_dev; } - ret = out_irq.args[1]; + + /* Use interrupt type to distinguish SGI and SPI interrupts */ + pdata->irq_type = out_irq.args[0]; /* * If Interrupt number is in SGI range, then request SGI else request * IPI system IRQ. */ - if (ret < MAX_SGI) { - pdata->irq = ret; + if (pdata->irq_type == IPI_IRQ_TYPE_SGI) { + pdata->irq = out_irq.args[1]; ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata); if (ret) goto free_mbox_dev; diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 81ea01a652b9..833dd911980b 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -1710,7 +1710,9 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, - FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); + FIELD_PREP(LPBK_CHAN_MASK, chan) | + LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK | + LBK_CHAN_MODE_MASK | LPBK_EN_MASK); airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h index e1c15c20be8e..69c5a143db8c 100644 --- a/drivers/net/ethernet/airoha/airoha_regs.h +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -151,6 +151,9 @@ #define LPBK_LEN_MASK GENMASK(23, 10) #define LPBK_CHAN_MASK GENMASK(8, 4) #define LPBK_MODE_MASK GENMASK(3, 1) +#define LBK_GAP_MODE_MASK BIT(3) +#define LBK_LEN_MODE_MASK BIT(2) +#define LBK_CHAN_MODE_MASK BIT(1) #define LPBK_EN_MASK BIT(0) #define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 577f9b1780ad..de88776dd2a2 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) "missing 'reg' property in node %pOF\n", tbi); err = -EBUSY; + of_node_put(tbi); goto error; } set_tbipa(*prop, pdev, data->get_tbipa, priv->map, &res); + of_node_put(tbi); } } diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index b53561c34708..0a8a48cd4bce 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -99,19 +99,21 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) index = ice_adapter_xa_index(pdev); scoped_guard(mutex, &ice_adapters_mutex) { - err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); - if (err == -EBUSY) { - adapter = xa_load(&ice_adapters, index); + adapter = xa_load(&ice_adapters, index); + if (adapter) { refcount_inc(&adapter->refcount); WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev)); return adapter; } + err = xa_reserve(&ice_adapters, index, GFP_KERNEL); if (err) return ERR_PTR(err); adapter = ice_adapter_new(pdev); - if (!adapter) + if (!adapter) { + xa_release(&ice_adapters, index); return ERR_PTR(-ENOMEM); + } xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } return adapter; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d2071aff7b8f..308b4458e0d4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, mlx4_unregister_mac(mdev->dev, priv->port, mac); hlist_del_rcu(&entry->hlist); - kfree_rcu(entry, rcu); en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", entry->mac, priv->port); + kfree_rcu(entry, rcu); ++removed; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 00e77c71e201..0a4fb8c92268 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -772,6 +772,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, struct netlink_ext_ack *extack) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; + bool allow_tunnel_mode = false; struct mlx5e_ipsec *ipsec; struct mlx5e_priv *priv; gfp_t gfp; @@ -803,6 +804,20 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, goto err_xfrm; } + if (mlx5_eswitch_block_mode(priv->mdev)) + goto unblock_ipsec; + + if (x->props.mode == XFRM_MODE_TUNNEL && + x->xso.type == XFRM_DEV_OFFLOAD_PACKET) { + allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry); + if (!allow_tunnel_mode) { + NL_SET_ERR_MSG_MOD(extack, + "Packet offload tunnel mode is disabled due to encap settings"); + err = -EINVAL; + goto unblock_mode; + } + } + /* check esn */ if (x->props.flags & XFRM_STATE_ESN) mlx5e_ipsec_update_esn_state(sa_entry); @@ -817,7 +832,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, err = mlx5_ipsec_create_work(sa_entry); if (err) - goto unblock_ipsec; + goto unblock_encap; err = mlx5e_ipsec_create_dwork(sa_entry); if (err) @@ -832,14 +847,6 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, if (err) goto err_hw_ctx; - if (x->props.mode == XFRM_MODE_TUNNEL && - x->xso.type == XFRM_DEV_OFFLOAD_PACKET && - !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) { - NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings"); - err = -EINVAL; - goto err_add_rule; - } - /* We use *_bh() variant because xfrm_timer_handler(), which runs * in softirq context, can reach our state delete logic and we need * xa_erase_bh() there. @@ -855,8 +862,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, MLX5_IPSEC_RESCHED); - if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && - x->props.mode == XFRM_MODE_TUNNEL) { + if (allow_tunnel_mode) { xa_lock_bh(&ipsec->sadb); __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id, MLX5E_IPSEC_TUNNEL_SA); @@ -865,6 +871,11 @@ static int mlx5e_xfrm_add_state(struct net_device *dev, out: x->xso.offload_handle = (unsigned long)sa_entry; + if (allow_tunnel_mode) + mlx5_eswitch_unblock_encap(priv->mdev); + + mlx5_eswitch_unblock_mode(priv->mdev); + return 0; err_add_rule: @@ -877,6 +888,11 @@ release_work: if (sa_entry->work) kfree(sa_entry->work->data); kfree(sa_entry->work); +unblock_encap: + if (allow_tunnel_mode) + mlx5_eswitch_unblock_encap(priv->mdev); +unblock_mode: + mlx5_eswitch_unblock_mode(priv->mdev); unblock_ipsec: mlx5_eswitch_unblock_ipsec(priv->mdev); err_xfrm: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 23703f28386a..5d7c15abfcaf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -319,7 +319,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry); void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry); -bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry); +bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 6ccfc2af07b7..bf1d2769d4f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -1069,7 +1069,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, /* Create FT */ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) - rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); + rx->allow_tunnel_mode = + mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw); + if (rx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags); @@ -1310,7 +1312,9 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, goto err_status_rule; if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) - tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); + tx->allow_tunnel_mode = + mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw); + if (tx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags); @@ -2846,18 +2850,24 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry) memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry)); } -bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) +bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; - struct mlx5e_ipsec_rx *rx; - struct mlx5e_ipsec_tx *tx; + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct xfrm_state *x = sa_entry->x; + bool from_fdb; - rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type); - tx = ipsec_tx(sa_entry->ipsec, attrs->type); - if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) - return tx->allow_tunnel_mode; + if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) { + struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type); + + from_fdb = (tx == ipsec->tx_esw); + } else { + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family, + x->xso.type); + + from_fdb = (rx == ipsec->rx_esw); + } - return rx->allow_tunnel_mode; + return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb); } void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c index b4cb131c5f81..8565cfe8d7dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c @@ -893,27 +893,27 @@ int mlx5e_psp_init(struct mlx5e_priv *priv) if (!mlx5_is_psp_device(mdev)) { mlx5_core_dbg(mdev, "PSP offload not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "SWP not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_csum)) { mlx5_core_dbg(mdev, "SWP checksum not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) { mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n"); - return -EOPNOTSUPP; + return 0; } if (!MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "PSP LSO not supported\n"); - return -EOPNOTSUPP; + return 0; } psp = kzalloc(sizeof(*psp), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index df3756d7e52e..16eb99aba2a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -879,7 +879,7 @@ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw); int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); -bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); +bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb); void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); @@ -974,7 +974,8 @@ mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) return 0; } -static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) +static inline bool +mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) { return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 52c3de24bea3..4cf995be127d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -4006,23 +4006,25 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } -bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) +bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb) { struct mlx5_eswitch *esw = dev->priv.eswitch; + enum devlink_eswitch_encap_mode encap; + bool allow_tunnel = false; if (!mlx5_esw_allowed(esw)) return true; down_write(&esw->mode_lock); - if (esw->mode != MLX5_ESWITCH_LEGACY && - esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { - up_write(&esw->mode_lock); - return false; + encap = esw->offloads.encap; + if (esw->mode == MLX5_ESWITCH_LEGACY || + (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) { + allow_tunnel = true; + esw->offloads.num_block_encap++; } - - esw->offloads.num_block_encap++; up_write(&esw->mode_lock); - return true; + + return allow_tunnel; } void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c index c281153bd411..05e5fd777d4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c @@ -266,7 +266,7 @@ static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16], if (cpu_has_neon()) { kernel_neon_begin(); asm volatile - (".arch_extension simd;\n\t" + (".arch_extension simd\n\t" "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t" "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]" : diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index a1c2db69b198..95fac020eb93 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -185,13 +185,13 @@ static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn, for (i = 0; i < nv->txt_count; i++) { fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0); - fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1); fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl); } for (j = 0; j < nv->rxt_count; j++, i++) { - fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0); - fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1); + fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0); + fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1); fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl); } } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 8f998d26b9a3..2a84bd1d7e26 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -83,8 +83,16 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd) static void fbnic_mac_init_qm(struct fbnic_dev *fbd) { + u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) | + FBNIC_TWD_FLAG_REQ_COMPLETION; u32 clock_freq; + /* Configure default TWQ Metadata descriptor */ + wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L, + lower_32_bits(default_meta)); + wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H, + upper_32_bits(default_meta)); + /* Configure TSO behavior */ wr32(fbd, FBNIC_QM_TQS_CTL0, FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index d12b4cad84a5..e95be0e7bd9e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -543,17 +543,21 @@ static const struct net_device_ops fbnic_netdev_ops = { static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, struct netdev_queue_stats_rx *rx) { + u64 bytes, packets, alloc_fail, alloc_fail_bdq; struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *rxr = fbn->rx[idx]; struct fbnic_dev *fbd = fbn->fbd; struct fbnic_queue_stats *stats; - u64 bytes, packets, alloc_fail; u64 csum_complete, csum_none; + struct fbnic_q_triad *qt; unsigned int start; if (!rxr) return; + /* fbn->rx points to completion queues */ + qt = container_of(rxr, struct fbnic_q_triad, cmpl); + stats = &rxr->stats; do { start = u64_stats_fetch_begin(&stats->syncp); @@ -564,6 +568,20 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, csum_none = stats->rx.csum_none; } while (u64_stats_fetch_retry(&stats->syncp, start)); + stats = &qt->sub0.stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + alloc_fail_bdq = stats->bdq.alloc_failed; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + alloc_fail += alloc_fail_bdq; + + stats = &qt->sub1.stats; + do { + start = u64_stats_fetch_begin(&stats->syncp); + alloc_fail_bdq = stats->bdq.alloc_failed; + } while (u64_stats_fetch_retry(&stats->syncp, start)); + alloc_fail += alloc_fail_bdq; + rx->bytes = bytes; rx->packets = packets; rx->alloc_fail = alloc_fail; @@ -641,7 +659,8 @@ static void fbnic_get_base_stats(struct net_device *dev, rx->bytes = fbn->rx_stats.bytes; rx->packets = fbn->rx_stats.packets; - rx->alloc_fail = fbn->rx_stats.rx.alloc_failed; + rx->alloc_fail = fbn->rx_stats.rx.alloc_failed + + fbn->bdq_stats.bdq.alloc_failed; rx->csum_complete = fbn->rx_stats.rx.csum_complete; rx->csum_none = fbn->rx_stats.rx.csum_none; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index e84e0527c3a9..b0a87c57910f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -68,6 +68,7 @@ struct fbnic_net { /* Storage for stats after ring destruction */ struct fbnic_queue_stats tx_stats; struct fbnic_queue_stats rx_stats; + struct fbnic_queue_stats bdq_stats; u64 link_down_events; /* Time stamping filter config */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index cf773cc78e40..b1e8ce89870f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -904,7 +904,7 @@ static void fbnic_fill_bdq(struct fbnic_ring *bdq) netmem = page_pool_dev_alloc_netmems(bdq->page_pool); if (!netmem) { u64_stats_update_begin(&bdq->stats.syncp); - bdq->stats.rx.alloc_failed++; + bdq->stats.bdq.alloc_failed++; u64_stats_update_end(&bdq->stats.syncp); break; @@ -1242,6 +1242,7 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* Walk the completion queue collecting the heads reported by NIC */ while (likely(packets < budget)) { struct sk_buff *skb = ERR_PTR(-EINVAL); + u32 pkt_bytes; u64 rcd; if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done) @@ -1272,37 +1273,38 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* We currently ignore the action table index */ break; case FBNIC_RCD_TYPE_META: - if (unlikely(pkt->add_frag_failed)) - skb = NULL; - else if (likely(!fbnic_rcd_metadata_err(rcd))) + if (likely(!fbnic_rcd_metadata_err(rcd) && + !pkt->add_frag_failed)) { + pkt_bytes = xdp_get_buff_len(&pkt->buff); skb = fbnic_run_xdp(nv, pkt); + } /* Populate skb and invalidate XDP */ if (!IS_ERR_OR_NULL(skb)) { fbnic_populate_skb_fields(nv, rcd, skb, qt, &csum_complete, &csum_none); - - packets++; - bytes += skb->len; - napi_gro_receive(&nv->napi, skb); } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) { pkt_tail = nv->qt[0].sub1.tail; - bytes += xdp_get_buff_len(&pkt->buff); + } else if (PTR_ERR(skb) == -FBNIC_XDP_CONSUME) { + fbnic_put_pkt_buff(qt, pkt, 1); } else { - if (!skb) { + if (!skb) alloc_failed++; - dropped++; - } else if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) { + + if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR)) length_errors++; - } else { + else dropped++; - } fbnic_put_pkt_buff(qt, pkt, 1); + goto next_dont_count; } + packets++; + bytes += pkt_bytes; +next_dont_count: pkt->buff.data_hard_start = NULL; break; @@ -1319,8 +1321,6 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, u64_stats_update_begin(&rcq->stats.syncp); rcq->stats.packets += packets; rcq->stats.bytes += bytes; - /* Re-add ethernet header length (removed in fbnic_build_skb) */ - rcq->stats.bytes += ETH_HLEN * packets; rcq->stats.dropped += dropped; rcq->stats.rx.alloc_failed += alloc_failed; rcq->stats.rx.csum_complete += csum_complete; @@ -1414,6 +1414,17 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4); } +void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn, + struct fbnic_ring *bdq) +{ + struct fbnic_queue_stats *stats = &bdq->stats; + + /* Capture stats from queues before dissasociating them */ + fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed; + /* Remember to add new stats here */ + BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1); +} + void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, struct fbnic_ring *txr) { @@ -1433,8 +1444,8 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6); } -static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, - struct fbnic_ring *xdpr) +void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, + struct fbnic_ring *xdpr) { struct fbnic_queue_stats *stats = &xdpr->stats; @@ -1442,9 +1453,7 @@ static void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, return; /* Capture stats from queues before dissasociating them */ - fbn->rx_stats.bytes += stats->bytes; - fbn->rx_stats.packets += stats->packets; - fbn->rx_stats.dropped += stats->dropped; + fbn->tx_stats.dropped += stats->dropped; fbn->tx_stats.bytes += stats->bytes; fbn->tx_stats.packets += stats->packets; } @@ -1488,6 +1497,15 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn, fbn->rx[rxr->q_idx] = NULL; } +static void fbnic_remove_bdq_ring(struct fbnic_net *fbn, + struct fbnic_ring *bdq) +{ + if (!(bdq->flags & FBNIC_RING_F_STATS)) + return; + + fbnic_aggregate_ring_bdq_counters(fbn, bdq); +} + static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt) { page_pool_destroy(qt->sub0.page_pool); @@ -1507,8 +1525,8 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn, } for (j = 0; j < nv->rxt_count; j++, i++) { - fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); - fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); + fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1); fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); } @@ -1707,11 +1725,13 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, while (rxt_count) { /* Configure header queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL]; - fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); + fbnic_ring_init(&qt->sub0, db, 0, + FBNIC_RING_F_CTX | FBNIC_RING_F_STATS); /* Configure payload queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL]; - fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); + fbnic_ring_init(&qt->sub1, db, 0, + FBNIC_RING_F_CTX | FBNIC_RING_F_STATS); /* Configure Rx completion queue */ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD]; @@ -2830,8 +2850,8 @@ static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx) real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl); nv = fbn->napi[idx % fbn->num_napi]; - fbnic_aggregate_ring_rx_counters(fbn, &real->sub0); - fbnic_aggregate_ring_rx_counters(fbn, &real->sub1); + fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0); + fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1); fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl); memcpy(real, qmem, sizeof(*real)); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index 31fac0ba0902..ca37da5a0b17 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -92,6 +92,9 @@ struct fbnic_queue_stats { u64 csum_none; u64 length_errors; } rx; + struct { + u64 alloc_failed; + } bdq; }; u64 dropped; struct u64_stats_sync syncp; @@ -165,8 +168,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev, void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, struct fbnic_ring *rxr); +void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn, + struct fbnic_ring *rxr); void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, struct fbnic_ring *txr); +void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn, + struct fbnic_ring *xdpr); int fbnic_alloc_napi_vectors(struct fbnic_net *fbn); void fbnic_free_napi_vectors(struct fbnic_net *fbn); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 74ad1d73b465..40b1bfc600a7 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -708,6 +708,11 @@ static int sparx5_start(struct sparx5 *sparx5) /* Init masks */ sparx5_update_fwd(sparx5); + /* Init flood masks */ + for (int pgid = sparx5_get_pgid(sparx5, PGID_UC_FLOOD); + pgid <= sparx5_get_pgid(sparx5, PGID_BCAST); pgid++) + sparx5_pgid_clear(sparx5, pgid); + /* CPU copy CPU pgids */ spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5, ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU))); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index bc9ecb9392cd..0a71abbd3da5 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -176,6 +176,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, struct net_device *bridge, struct netlink_ext_ack *extack) { + struct switchdev_brport_flags flags = {0}; struct sparx5 *sparx5 = port->sparx5; struct net_device *ndev = port->ndev; int err; @@ -205,6 +206,11 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, */ __dev_mc_unsync(ndev, sparx5_mc_unsync); + /* Enable uc/mc/bc flooding */ + flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = flags.mask; + sparx5_port_attr_bridge_flags(port, flags); + return 0; err_switchdev_offload: @@ -215,6 +221,7 @@ err_switchdev_offload: static void sparx5_port_bridge_leave(struct sparx5_port *port, struct net_device *bridge) { + struct switchdev_brport_flags flags = {0}; struct sparx5 *sparx5 = port->sparx5; switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); @@ -234,6 +241,11 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port, /* Port enters in host more therefore restore mc list */ __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync); + + /* Disable uc/mc/bc flooding */ + flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD; + flags.val = 0; + sparx5_port_attr_bridge_flags(port, flags); } static int sparx5_port_changeupper(struct net_device *dev, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c index d42097aa60a0..494782871903 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c @@ -167,16 +167,6 @@ void sparx5_update_fwd(struct sparx5 *sparx5) /* Divide up fwd mask in 32 bit words */ bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS); - /* Update flood masks */ - for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD); - port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) { - spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port)); - if (is_sparx5(sparx5)) { - spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port)); - spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port)); - } - } - /* Update SRC masks */ for (port = 0; port < sparx5->data->consts->n_ports; port++) { if (test_bit(port, sparx5->bridge_fwd_mask)) { diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index 545710dadcf5..d2be1be37716 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -1021,6 +1021,6 @@ int ocelot_stats_init(struct ocelot *ocelot) void ocelot_stats_deinit(struct ocelot *ocelot) { - cancel_delayed_work(&ocelot->stats_work); + disable_delayed_work_sync(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); } diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index 53e96bfab542..ed20352a589a 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -116,17 +116,23 @@ static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id, if (!i2c_mii_valid_phy_id(phy_id)) return 0; - ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, - I2C_SMBUS_READ, reg, - I2C_SMBUS_BYTE_DATA, &smbus_data); + i2c_lock_bus(i2c, I2C_LOCK_SEGMENT); + + ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); if (ret < 0) - return ret; + goto unlock; val = (smbus_data.byte & 0xff) << 8; - ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, - I2C_SMBUS_READ, reg, - I2C_SMBUS_BYTE_DATA, &smbus_data); + ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + +unlock: + i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT); + if (ret < 0) return ret; @@ -147,17 +153,22 @@ static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id, smbus_data.byte = (val & 0xff00) >> 8; - ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, - I2C_SMBUS_WRITE, reg, - I2C_SMBUS_BYTE_DATA, &smbus_data); + i2c_lock_bus(i2c, I2C_LOCK_SEGMENT); + + ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); if (ret < 0) - return ret; + goto unlock; smbus_data.byte = val & 0xff; - ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, - I2C_SMBUS_WRITE, reg, - I2C_SMBUS_BYTE_DATA, &smbus_data); + ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + +unlock: + i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT); return ret < 0 ? ret : 0; } diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 63f8f43062bc..b724b222ab44 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -62,7 +62,7 @@ #define TPS23881_REG_SRAM_DATA 0x61 #define TPS23881_UV_STEP 3662 -#define TPS23881_NA_STEP 70190 +#define TPS23881_NA_STEP 89500 #define TPS23881_MW_STEP 500 #define TPS23881_MIN_PI_PW_LIMIT_MW 2000 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 792ddda1ad49..85bd5d845409 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev) asix_read_medium_status(dev, 1)); } +/* Notes on PM callbacks and locking context: + * + * - asix_suspend()/asix_resume() are invoked for both runtime PM and + * system-wide suspend/resume. For struct usb_driver the ->resume() + * callback does not receive pm_message_t, so the resume type cannot + * be distinguished here. + * + * - The MAC driver must hold RTNL when calling phylink interfaces such as + * phylink_suspend()/resume(). Those calls will also perform MDIO I/O. + * + * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while + * the USB PM lock is held) is fragile. Since autosuspend brings no + * measurable power saving here, we block it by holding a PM usage + * reference in ax88772_bind(). + */ static int asix_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); @@ -919,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) if (ret) goto initphy_err; + /* Keep this interface runtime-PM active by taking a usage ref. + * Prevents runtime suspend while bound and avoids resume paths + * that could deadlock (autoresume under RTNL while USB PM lock + * is held, phylink/MDIO wants RTNL). + */ + pm_runtime_get_noresume(&intf->dev); + return 0; initphy_err: @@ -948,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) phylink_destroy(priv->phylink); ax88772_mdio_unregister(priv); asix_rx_fixup_common_free(dev->driver_priv); + /* Drop the PM usage ref taken in bind() */ + pm_runtime_put(&intf->dev); } static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1600,6 +1624,11 @@ static struct usb_driver asix_driver = { .resume = asix_resume, .reset_resume = asix_resume, .disconnect = usbnet_disconnect, + /* usbnet enables autosuspend by default (supports_autosuspend=1). + * We keep runtime-PM active for AX88772* by taking a PM usage + * reference in ax88772_bind() (pm_runtime_get_noresume()) and + * dropping it in unbind(), which effectively blocks autosuspend. + */ .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index b56e2459ee3c..42d35cc6b421 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1080,10 +1080,13 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, } read_raw_eeprom_done: - if (dev->chipid == ID_REV_CHIP_ID_7800_) - return lan78xx_write_reg(dev, HW_CFG, saved); - - return 0; + if (dev->chipid == ID_REV_CHIP_ID_7800_) { + int rc = lan78xx_write_reg(dev, HW_CFG, saved); + /* If USB fails, there is nothing to do */ + if (rc < 0) + return rc; + } + return ret; } static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 8bf63f2dcbbf..eb137e078423 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -939,6 +939,7 @@ static void t7xx_pci_remove(struct pci_dev *pdev) static const struct pci_device_id t7xx_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, + { PCI_DEVICE(0x03f0, 0x09c8) }, // HP DRMR-H01 { PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e { } }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 362ad108794d..4a8735b275e4 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -2085,7 +2085,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, int i; for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) { - struct resource *res = pci_bus_resource_n(bus, i); + struct resource *res = + pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i); available[i] = available_in[i]; @@ -2158,7 +2159,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, continue; for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) { - res = pci_bus_resource_n(bus, i); + res = pci_resource_n(dev, PCI_BRIDGE_RESOURCES + i); /* * Make sure the split resource space is properly diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index b5bd40f13c75..55e50d428aab 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -51,7 +51,7 @@ void sclp_early_wait_irq(void) " stg %[addr],%[psw_wait_addr]\n" " stg %[addr],%[psw_ext_addr]\n" " lpswe %[psw_wait]\n" - "0:\n" + "0:" : [addr] "=&d" (addr), [psw_wait_addr] "=Q" (psw_wait.addr), [psw_ext_addr] "=Q" (get_lowcore()->external_new_psw.addr) diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index fdab760f1f28..b7048f2b036e 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -167,7 +167,7 @@ static inline void cmf_activate(void *area, unsigned int onoff) asm volatile( " lgr 1,%[r1]\n" " lgr 2,%[mbo]\n" - " schm\n" + " schm" : : [r1] "d" ((unsigned long)onoff), [mbo] "d" (virt_to_phys(area)) diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index fb2c07cb4d3d..4b2dae6eb376 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1316,23 +1316,34 @@ void ccw_device_schedule_recovery(void) spin_unlock_irqrestore(&recovery_lock, flags); } -static int purge_fn(struct device *dev, void *data) +static int purge_fn(struct subchannel *sch, void *data) { - struct ccw_device *cdev = to_ccwdev(dev); - struct ccw_dev_id *id = &cdev->private->dev_id; - struct subchannel *sch = to_subchannel(cdev->dev.parent); + struct ccw_device *cdev; - spin_lock_irq(cdev->ccwlock); - if (is_blacklisted(id->ssid, id->devno) && - (cdev->private->state == DEV_STATE_OFFLINE) && - (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { - CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, - id->devno); + spin_lock_irq(&sch->lock); + if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv) + goto unlock; + + if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) + goto unlock; + + cdev = sch_get_cdev(sch); + if (cdev) { + if (cdev->private->state != DEV_STATE_OFFLINE) + goto unlock; + + if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) + goto unlock; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); - css_sched_sch_todo(sch, SCH_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } - spin_unlock_irq(cdev->ccwlock); + + css_sched_sch_todo(sch, SCH_TODO_UNREG); + CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid, + sch->schib.pmcw.dev, cdev ? "" : " (no cdev)"); + +unlock: + spin_unlock_irq(&sch->lock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; @@ -1348,7 +1359,7 @@ static int purge_fn(struct device *dev, void *data) int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); - bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); + for_each_subchannel_staged(purge_fn, NULL, NULL); return 0; } diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index a540045b64a6..8b06b234e110 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -253,11 +253,10 @@ static inline int __xsch(struct subchannel_id schid) asm volatile( " lgr 1,%[r1]\n" " xsch\n" - " ipm %[cc]\n" - " srl %[cc],28\n" - : [cc] "=&d" (ccode) + CC_IPM(cc) + : CC_OUT(cc, ccode) : [r1] "d" (r1) - : "cc", "1"); + : CC_CLOBBER_LIST("1")); return CC_TRANSFORM(ccode); } |