summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2026-01-22 20:13:25 -0800
committerJakub Kicinski <kuba@kernel.org>2026-02-05 09:54:08 -0800
commita182a62ff77f705f7dd3d98cf05cb3d03751a8f0 (patch)
tree1bb1f12442bdb77d77acf067fed8ad84c16ec011
parenta90f6dcefca6d5ad765435b3188a3a440ed193a1 (diff)
parent8fdb05de0e2db89d8f56144c60ab784812e8c3b7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.19-rc9). No adjacent changes, conflicts: drivers/net/ethernet/spacemit/k1_emac.c 3125fc1701694 ("net: spacemit: k1-emac: fix jumbo frame support") f66086798f91f ("net: spacemit: Remove broken flow control support") https://lore.kernel.org/aYIysFIE9ooavWia@sirena.org.uk Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap5
-rw-r--r--Documentation/ABI/testing/sysfs-class-tsm10
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,sai.yaml1
-rw-r--r--MAINTAINERS53
-rw-r--r--Makefile5
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c2
-rw-r--r--arch/riscv/errata/sifive/errata.c18
-rw-r--r--arch/riscv/include/asm/compat.h2
-rw-r--r--arch/riscv/include/asm/syscall.h2
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/x86/include/asm/kfence.h7
-rw-r--r--arch/x86/kvm/irq.c3
-rw-r--r--arch/x86/kvm/svm/avic.c4
-rw-r--r--arch/x86/kvm/svm/svm.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c30
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c1
-rw-r--r--drivers/bus/simple-pm-bus.c6
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c1
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c15
-rw-r--r--drivers/firewire/core-transaction.c19
-rw-r--r--drivers/gpio/gpio-brcmstb.c8
-rw-r--r--drivers/gpio/gpio-omap.c22
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-rockchip.c8
-rw-r--r--drivers/gpio/gpio-sprd.c8
-rw-r--r--drivers/gpio/gpio-virtuser.c8
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c18
-rw-r--r--drivers/gpu/drm/drm_pagemap.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/tyr/Kconfig1
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c6
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c55
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c6
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c3
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h11
-rw-r--r--drivers/iommu/iommufd/pages.c1
-rw-r--r--drivers/irqchip/irq-ls-extirq.c75
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/mtd/nand/spi/esmt.c2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h17
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c77
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h18
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c41
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c34
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/usb/r8152.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c6
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c3
-rw-r--r--drivers/of/of_reserved_mem.c19
-rw-r--r--drivers/pci/ide.c10
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c7
-rw-r--r--drivers/platform/x86/classmate-laptop.c32
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c5
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/telemetry/debugfs.c4
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c2
-rw-r--r--drivers/platform/x86/intel/vsec.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c11
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_haps.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/soc/qcom/smem.c5
-rw-r--r--drivers/target/sbp/sbp_target.c4
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c2
-rw-r--r--drivers/virt/coco/tsm-core.c30
-rw-r--r--fs/btrfs/raid56.c1
-rw-r--r--fs/efivarfs/vars.c2
-rw-r--r--fs/smb/client/cifstransport.c4
-rw-r--r--fs/smb/client/smb2file.c1
-rw-r--r--include/linux/cma.h9
-rw-r--r--include/linux/kasan.h14
-rw-r--r--include/linux/memfd.h6
-rw-r--r--include/linux/memremap.h9
-rw-r--r--include/linux/pci-ide.h4
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/tsm.h3
-rw-r--r--kernel/cgroup/dmem.c70
-rw-r--r--kernel/dma/contiguous.c16
-rw-r--r--kernel/dma/pool.c7
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/liveupdate/kexec_handover.c12
-rw-r--r--kernel/liveupdate/luo_file.c2
-rw-r--r--kernel/sched/deadline.c12
-rw-r--r--kernel/sched/ext.c48
-rw-r--r--kernel/vmcore_info.c6
-rw-r--r--lib/flex_proportions.c5
-rw-r--r--lib/test_hmm.c4
-rw-r--r--mm/kasan/common.c21
-rw-r--r--mm/kfence/core.c23
-rw-r--r--mm/memfd.c4
-rw-r--r--mm/memfd_luo.c10
-rw-r--r--mm/memory-failure.c99
-rw-r--r--mm/memremap.c35
-rw-r--r--mm/mm_init.c12
-rw-r--r--mm/shmem.c54
-rw-r--r--mm/swap.h2
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/vmalloc.c7
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/gro.c2
-rw-r--r--net/core/link_watch.c20
-rw-r--r--net/core/net-procfs.c50
-rw-r--r--net/ethtool/common.c3
-rw-r--r--net/ethtool/rss.c9
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/sched/cls_u32.c13
-rw-r--r--net/tipc/crypto.c4
-rw-r--r--rust/Makefile1
-rw-r--r--rust/kernel/bits.rs6
-rw-r--r--rust/kernel/fmt.rs2
-rw-r--r--rust/kernel/num/bounded.rs49
-rw-r--r--rust/kernel/rbtree.rs4
-rw-r--r--rust/kernel/sync/atomic/predefine.rs11
-rw-r--r--rust/kernel/sync/refcount.rs3
-rw-r--r--rust/macros/fmt.rs2
-rw-r--r--rust/macros/lib.rs2
-rw-r--r--rust/proc-macro2/lib.rs4
-rw-r--r--scripts/Makefile.build4
-rw-r--r--scripts/Makefile.vmlinux3
-rwxr-xr-xscripts/generate_rust_analyzer.py45
-rwxr-xr-xscripts/livepatch/klp-build8
-rw-r--r--scripts/package/kernel.spec65
-rw-r--r--scripts/rustdoc_test_gen.rs2
-rw-r--r--security/lsm.h9
-rw-r--r--security/lsm_init.c7
-rw-r--r--security/min_addr.c5
-rw-r--r--sound/hda/codecs/realtek/alc269.c18
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c15
-rw-r--r--sound/soc/codecs/cs35l45.c2
-rw-r--r--sound/soc/fsl/imx-card.c1
-rw-r--r--sound/soc/intel/boards/sof_es8336.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c2
-rw-r--r--tools/objtool/check.c3
-rw-r--r--tools/objtool/disas.c14
-rw-r--r--tools/objtool/elf.c13
-rw-r--r--tools/objtool/klp-diff.c14
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm1
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh64
-rw-r--r--virt/kvm/eventfd.c44
187 files changed, 1545 insertions, 815 deletions
diff --git a/.mailmap b/.mailmap
index 428d721ffbb1..da4afd2b2415 100644
--- a/.mailmap
+++ b/.mailmap
@@ -34,6 +34,7 @@ Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com>
Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com>
+Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@futurfusion.io>
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin.ext@nsn.com>
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@gmx.de>
Alexander Sverdlin <alexander.sverdlin@gmail.com> <alexander.sverdlin@nokia.com>
@@ -786,7 +787,8 @@ Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com> <subashab@codeaurora.
Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
-Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+Sudeep Holla <sudeep.holla@kernel.org> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+Sudeep Holla <sudeep.holla@kernel.org> <sudeep.holla@arm.com>
Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org>
Sumit Semwal <sumit.semwal@ti.com>
Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
@@ -851,6 +853,7 @@ Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com>
Veera Sundaram Sankaran <quic_veeras@quicinc.com> <veeras@codeaurora.org>
Veerabhadrarao Badiganti <quic_vbadigan@quicinc.com> <vbadigan@codeaurora.org>
Venkateswara Naralasetty <quic_vnaralas@quicinc.com> <vnaralas@codeaurora.org>
+Viacheslav Bocharov <v@baodeep.com> <adeep@lexina.in>
Vikash Garodia <vikash.garodia@oss.qualcomm.com> <vgarodia@codeaurora.org>
Vikash Garodia <vikash.garodia@oss.qualcomm.com> <quic_vgarodia@quicinc.com>
Vincent Mailhol <mailhol@kernel.org> <mailhol.vincent@wanadoo.fr>
diff --git a/Documentation/ABI/testing/sysfs-class-tsm b/Documentation/ABI/testing/sysfs-class-tsm
index 6fc1a5ac6da1..2949468deaf7 100644
--- a/Documentation/ABI/testing/sysfs-class-tsm
+++ b/Documentation/ABI/testing/sysfs-class-tsm
@@ -7,13 +7,3 @@ Description:
signals when the PCI layer is able to support establishment of
link encryption and other device-security features coordinated
through a platform tsm.
-
-What: /sys/class/tsm/tsmN/streamH.R.E
-Contact: linux-pci@vger.kernel.org
-Description:
- (RO) When a host bridge has established a secure connection via
- the platform TSM, symlink appears. The primary function of this
- is have a system global review of TSM resource consumption
- across host bridges. The link points to the endpoint PCI device
- and matches the same link published by the host bridge. See
- Documentation/ABI/testing/sysfs-devices-pci-host-bridge.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1058f2a6d6a8..aa0031108bc1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3472,6 +3472,11 @@ Kernel parameters
If there are multiple matching configurations changing
the same attribute, the last one is used.
+ liveupdate= [KNL,EARLY]
+ Format: <bool>
+ Enable Live Update Orchestrator (LUO).
+ Default: off.
+
load_ramdisk= [RAM] [Deprecated]
lockd.nlm_grace_period=P [NFS] Assign grace period.
diff --git a/Documentation/devicetree/bindings/sound/fsl,sai.yaml b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
index 0d733e5b08a4..d838ee0b61cb 100644
--- a/Documentation/devicetree/bindings/sound/fsl,sai.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
@@ -44,6 +44,7 @@ properties:
- items:
- enum:
- fsl,imx94-sai
+ - fsl,imx952-sai
- const: fsl,imx95-sai
reg:
diff --git a/MAINTAINERS b/MAINTAINERS
index ce6f49c054c7..34c2ed4da1f9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -335,7 +335,7 @@ F: tools/power/acpi/
ACPI FOR ARM64 (ACPI/arm64)
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Hanjun Guo <guohanjun@huawei.com>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-acpi@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -351,7 +351,7 @@ F: drivers/acpi/riscv/
F: include/linux/acpi_rimt.h
ACPI PCC(Platform Communication Channel) MAILBOX DRIVER
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-acpi@vger.kernel.org
S: Supported
F: drivers/mailbox/pcc.c
@@ -2747,14 +2747,14 @@ F: arch/arm/include/asm/hardware/dec21285.h
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
-M: Shawn Guo <shawnguo@kernel.org>
+M: Frank Li <Frank.Li@nxp.com>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Fabio Estevam <festevam@gmail.com>
L: imx@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git
F: Documentation/devicetree/bindings/firmware/fsl*
F: Documentation/devicetree/bindings/firmware/nxp*
F: arch/arm/boot/dts/nxp/imx/
@@ -2769,22 +2769,22 @@ N: mxs
N: \bmxc[^\d]
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
-M: Shawn Guo <shawnguo@kernel.org>
+M: Frank Li <Frank.Li@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git
F: arch/arm/boot/dts/nxp/ls/
F: arch/arm64/boot/dts/freescale/fsl-*
F: arch/arm64/boot/dts/freescale/qoriq-*
ARM/FREESCALE VYBRID ARM ARCHITECTURE
-M: Shawn Guo <shawnguo@kernel.org>
+M: Frank Li <Frank.Li@nxp.com>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Stefan Agner <stefan@agner.ch>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git
F: arch/arm/boot/dts/nxp/vf/
F: arch/arm/mach-imx/*vf610*
@@ -3681,7 +3681,7 @@ N: uniphier
ARM/VERSATILE EXPRESS PLATFORM
M: Liviu Dudau <liviu.dudau@arm.com>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -6514,7 +6514,7 @@ F: drivers/i2c/busses/i2c-cp2615.c
CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -6610,7 +6610,7 @@ F: include/linux/platform_data/cpuidle-exynos.h
CPUIDLE DRIVER - ARM PSCI
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9819,7 +9819,7 @@ F: include/uapi/linux/firewire*.h
F: tools/firewire/
FIRMWARE FRAMEWORK FOR ARMV8-A
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/arm_ffa/
@@ -10517,7 +10517,7 @@ S: Maintained
F: scripts/gendwarfksyms/
GENERIC ARCHITECTURE TOPOLOGY
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/base/arch_topology.c
@@ -11371,6 +11371,11 @@ F: Documentation/ABI/testing/sysfs-devices-platform-kunpeng_hccs
F: drivers/soc/hisilicon/kunpeng_hccs.c
F: drivers/soc/hisilicon/kunpeng_hccs.h
+HISILICON SOC HHA DRIVER
+M: Yushan Wang <wangyushan12@huawei.com>
+S: Maintained
+F: drivers/cache/hisi_soc_hha.c
+
HISILICON LPC BUS DRIVER
M: Jay Fang <f.fangjian@huawei.com>
S: Maintained
@@ -15096,7 +15101,7 @@ F: drivers/mailbox/arm_mhuv2.c
F: include/linux/mailbox/arm_mhuv2_message.h
MAILBOX ARM MHUv3
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
M: Cristian Marussi <cristian.marussi@arm.com>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -20589,7 +20594,7 @@ F: drivers/pinctrl/pinctrl-amd.c
PIN CONTROLLER - FREESCALE
M: Dong Aisheng <aisheng.dong@nxp.com>
M: Fabio Estevam <festevam@gmail.com>
-M: Shawn Guo <shawnguo@kernel.org>
+M: Frank Li <Frank.Li@nxp.com>
M: Jacky Bai <ping.bai@nxp.com>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: NXP S32 Linux Team <s32@nxp.com>
@@ -20985,6 +20990,18 @@ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
F: net/ethtool/pse-pd.c
+PSP SECURITY PROTOCOL
+M: Daniel Zahka <daniel.zahka@gmail.com>
+M: Jakub Kicinski <kuba@kernel.org>
+M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+F: Documentation/netlink/specs/psp.yaml
+F: Documentation/networking/psp.rst
+F: include/net/psp/
+F: include/net/psp.h
+F: include/uapi/linux/psp.h
+F: net/psp/
+K: struct\ psp(_assoc|_dev|hdr)\b
+
PSTORE FILESYSTEM
M: Kees Cook <kees@kernel.org>
R: Tony Luck <tony.luck@intel.com>
@@ -23644,7 +23661,7 @@ F: include/uapi/linux/sed*
SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/firmware/smccc/
@@ -25408,7 +25425,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
F: drivers/mfd/syscon.c
SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
R: Cristian Marussi <cristian.marussi@arm.com>
L: arm-scmi@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -26560,7 +26577,7 @@ F: samples/tsm-mr/
TRUSTED SERVICES TEE DRIVER
M: Balint Dobszay <balint.dobszay@arm.com>
-M: Sudeep Holla <sudeep.holla@arm.com>
+M: Sudeep Holla <sudeep.holla@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: trusted-services@lists.trustedfirmware.org
S: Maintained
diff --git a/Makefile b/Makefile
index 3373308d2217..bde507d5c03d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1624,7 +1624,8 @@ MRPROPER_FILES += include/config include/generated \
certs/x509.genkey \
vmlinux-gdb.py \
rpmbuild \
- rust/libmacros.so rust/libmacros.dylib
+ rust/libmacros.so rust/libmacros.dylib \
+ rust/libpin_init_internal.so rust/libpin_init_internal.dylib
# clean - Delete most, but leave enough to build external modules
#
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e5000bef90f2..7cf9310de0ec 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -723,7 +723,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
dpage = pfn_to_page(uvmem_pfn);
dpage->zone_device_data = pvt;
- zone_device_page_init(dpage, 0);
+ zone_device_page_init(dpage, &kvmppc_uvmem_pgmap, 0);
return dpage;
out_clear:
spin_lock(&kvmppc_uvmem_bitmap_lock);
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
index 38aac2c47845..d0c61f86cba3 100644
--- a/arch/riscv/errata/sifive/errata.c
+++ b/arch/riscv/errata/sifive/errata.c
@@ -75,26 +75,12 @@ static u32 __init_or_module sifive_errata_probe(unsigned long archid,
return cpu_req_errata;
}
-static void __init_or_module warn_miss_errata(u32 miss_errata)
-{
- int i;
-
- pr_warn("----------------------------------------------------------------\n");
- pr_warn("WARNING: Missing the following errata may cause potential issues\n");
- for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++)
- if (miss_errata & 0x1 << i)
- pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name);
- pr_warn("Please enable the corresponding Kconfig to apply them\n");
- pr_warn("----------------------------------------------------------------\n");
-}
-
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata;
- u32 cpu_apply_errata = 0;
u32 tmp;
BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
@@ -118,10 +104,6 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt),
alt->alt_len);
mutex_unlock(&text_mutex);
- cpu_apply_errata |= tmp;
}
}
- if (stage != RISCV_ALTERNATIVES_MODULE &&
- cpu_apply_errata != cpu_req_errata)
- warn_miss_errata(cpu_req_errata - cpu_apply_errata);
}
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
index 6081327e55f5..28e115eed218 100644
--- a/arch/riscv/include/asm/compat.h
+++ b/arch/riscv/include/asm/compat.h
@@ -2,7 +2,7 @@
#ifndef __ASM_COMPAT_H
#define __ASM_COMPAT_H
-#define COMPAT_UTS_MACHINE "riscv\0\0"
+#define COMPAT_UTS_MACHINE "riscv32\0\0"
/*
* Architecture specific compatibility types
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 34313387f977..8067e666a4ca 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -20,7 +20,7 @@ extern void * const sys_call_table[];
extern void * const compat_sys_call_table[];
/*
- * Only the low 32 bits of orig_r0 are meaningful, so we return int.
+ * Only the low 32 bits of orig_a0 are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
* sign-extend the low 32 bits.
*/
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 5a956108b1ea..dbb067e345f0 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -145,14 +145,14 @@ struct arch_ext_priv {
long (*save)(struct pt_regs *regs, void __user *sc_vec);
};
-struct arch_ext_priv arch_ext_list[] = {
+static struct arch_ext_priv arch_ext_list[] = {
{
.magic = RISCV_V_MAGIC,
.save = &save_v_state,
},
};
-const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
+static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list);
static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
@@ -297,7 +297,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
} else {
err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic);
err |= __put_user(ext_size, &sc_ext_ptr->size);
- sc_ext_ptr = (void *)sc_ext_ptr + ext_size;
+ sc_ext_ptr = (void __user *)sc_ext_ptr + ext_size;
}
}
/* Write zero to fp-reserved space and check it on restore_sigcontext */
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index acf9ffa1a171..dfd5c74ba41a 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -42,7 +42,7 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
unsigned int level;
pte_t *pte = lookup_address(addr, &level);
- pteval_t val;
+ pteval_t val, new;
if (WARN_ON(!pte || level != PG_LEVEL_4K))
return false;
@@ -57,11 +57,12 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
/*
- * Otherwise, invert the entire PTE. This avoids writing out an
+ * Otherwise, flip the Present bit, taking care to avoid writing an
* L1TF-vulnerable PTE (not present, without the high address bits
* set).
*/
- set_pte(pte, __pte(~val));
+ new = val ^ _PAGE_PRESENT;
+ set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));
/*
* If the page was protected (non-present) and we're making it
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 7cc8950005b6..4c7688670c2d 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -514,7 +514,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
*/
spin_lock_irq(&kvm->irqfds.lock);
- if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
+ if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI ||
+ WARN_ON_ONCE(irqfd->irq_bypass_vcpu)) {
ret = kvm_pi_update_irte(irqfd, NULL);
if (ret)
pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n",
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 6b77b2033208..0f6c8596719b 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -376,6 +376,7 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
{
+ u32 max_id = x2avic_enabled ? x2avic_max_physical_id : AVIC_MAX_PHYSICAL_ID;
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
u32 id = vcpu->vcpu_id;
@@ -388,8 +389,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
* avic_vcpu_load() expects to be called if and only if the vCPU has
* fully initialized AVIC.
*/
- if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||
- (id > x2avic_max_physical_id)) {
+ if (id > max_id) {
kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG);
vcpu->arch.apic->apicv_active = false;
return 0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 24d59ccfa40d..4394be40fe78 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5284,6 +5284,8 @@ static __init void svm_set_cpu_caps(void)
*/
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);
+
+ kvm_setup_xss_caps();
}
static __init int svm_hardware_setup(void)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6b96f7aea20b..8c94241fbcca 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8051,6 +8051,8 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
kvm_cpu_cap_clear(X86_FEATURE_IBT);
}
+
+ kvm_setup_xss_caps();
}
static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63afdb6bb078..72d37c8930ad 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9953,6 +9953,23 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
+void kvm_setup_xss_caps(void)
+{
+ if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
+ kvm_caps.supported_xss = 0;
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
+ !kvm_cpu_cap_has(X86_FEATURE_IBT))
+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
+
+ if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT);
+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
+ }
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps);
+
static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
{
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
@@ -10125,19 +10142,6 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (!tdp_enabled)
kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
- if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
- kvm_caps.supported_xss = 0;
-
- if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
- !kvm_cpu_cap_has(X86_FEATURE_IBT))
- kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
-
- if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {
- kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
- kvm_cpu_cap_clear(X86_FEATURE_IBT);
- kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
- }
-
if (kvm_caps.has_tsc_control) {
/*
* Make sure the user can only configure tsc_khz values that
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index fdab0ad49098..00de24f55b1f 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -471,6 +471,8 @@ extern struct kvm_host_values kvm_host;
extern bool enable_pmu;
+void kvm_setup_xss_caps(void);
+
/*
* Get a filtered version of KVM's supported XCR0 that strips out dynamic
* features for which the current process doesn't (yet) have permission to use.
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 8194a970f002..d1c354636315 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1662,6 +1662,7 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev,
/* To avoid deadlock firstly remove itself */
sysfs_remove_file_self(&dev->kobj, sysfs_self);
kobject_del(&dev->kobj);
+ kobject_put(&dev->kobj);
}
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index d8e029e7e53f..3f00d953fb9a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -142,6 +142,12 @@ static const struct of_device_id simple_pm_bus_of_match[] = {
{ .compatible = "simple-mfd", .data = ONLY_BUS },
{ .compatible = "isa", .data = ONLY_BUS },
{ .compatible = "arm,amba-bus", .data = ONLY_BUS },
+ { .compatible = "fsl,ls1021a-scfg", },
+ { .compatible = "fsl,ls1043a-scfg", },
+ { .compatible = "fsl,ls1046a-scfg", },
+ { .compatible = "fsl,ls1088a-isc", },
+ { .compatible = "fsl,ls2080a-isc", },
+ { .compatible = "fsl,lx2160a-isc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 81e16b5a0245..b8081acba928 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -263,6 +263,7 @@ static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unuse
{ .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
{ .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
{ .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
+ { /* sentinel */ }
};
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
index ea29cd5d0ff9..40d02adaf3f6 100644
--- a/drivers/crypto/ccp/sev-dev-tsm.c
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -19,12 +19,6 @@
MODULE_IMPORT_NS("PCI_IDE");
-#define TIO_DEFAULT_NR_IDE_STREAMS 1
-
-static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
-module_param_named(ide_nr, nr_ide_streams, uint, 0644);
-MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
-
#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
@@ -193,7 +187,6 @@ static void streams_teardown(struct pci_ide **ide)
static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
unsigned int tc)
{
- struct pci_dev *rp = pcie_find_root_port(pdev);
struct pci_ide *ide1;
if (ide[tc]) {
@@ -201,17 +194,11 @@ static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
return -EBUSY;
}
- /* FIXME: find a better way */
- if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
- pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
- pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
-
ide1 = pci_ide_stream_alloc(pdev);
if (!ide1)
return -EFAULT;
- /* Blindly assign streamid=0 to TC=0, and so on */
- ide1->stream_id = tc;
+ ide1->stream_id = ide1->host_bridge_stream;
ide[tc] = ide1;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 7fea11a5e359..22ae387ae03c 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -173,20 +173,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
}
}
-static void start_split_transaction_timeout(struct fw_transaction *t,
- struct fw_card *card)
+// card->transactions.lock should be acquired in advance for the linked list.
+static void start_split_transaction_timeout(struct fw_transaction *t, unsigned int delta)
{
- unsigned long delta;
-
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
- // local destination never runs in any type of IRQ context.
- scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
- delta = card->split_timeout.jiffies;
mod_timer(&t->split_timeout_timer, jiffies + delta);
}
@@ -207,13 +201,20 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
+ unsigned int delta;
+
// NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
// local destination never runs in any type of IRQ context.
scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
t->split_timeout_cycle =
compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ delta = card->split_timeout.jiffies;
}
- start_split_transaction_timeout(t, card);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ start_split_transaction_timeout(t, delta);
break;
}
case ACK_BUSY_X:
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index af9287ff5dc4..2352d099709c 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -301,12 +301,10 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq)
{
struct brcmstb_gpio_bank *bank;
- int i = 0;
- /* banks are in descending order */
- list_for_each_entry_reverse(bank, &priv->bank_list, node) {
- i += bank->chip.gc.ngpio;
- if (hwirq < i)
+ list_for_each_entry(bank, &priv->bank_list, node) {
+ if (hwirq >= bank->chip.gc.offset &&
+ hwirq < (bank->chip.gc.offset + bank->chip.gc.ngpio))
return bank;
}
return NULL;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e136e81794df..e39723b5901b 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -799,10 +799,13 @@ static struct platform_device omap_mpuio_device = {
static inline void omap_mpuio_init(struct gpio_bank *bank)
{
- platform_set_drvdata(&omap_mpuio_device, bank);
+ static bool registered;
- if (platform_driver_register(&omap_mpuio_driver) == 0)
- (void) platform_device_register(&omap_mpuio_device);
+ platform_set_drvdata(&omap_mpuio_device, bank);
+ if (!registered) {
+ (void)platform_device_register(&omap_mpuio_device);
+ registered = true;
+ }
}
/*---------------------------------------------------------------------*/
@@ -1575,13 +1578,24 @@ static struct platform_driver omap_gpio_driver = {
*/
static int __init omap_gpio_drv_reg(void)
{
- return platform_driver_register(&omap_gpio_driver);
+ int ret;
+
+ ret = platform_driver_register(&omap_mpuio_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&omap_gpio_driver);
+ if (ret)
+ platform_driver_unregister(&omap_mpuio_driver);
+
+ return ret;
}
postcore_initcall(omap_gpio_drv_reg);
static void __exit omap_gpio_exit(void)
{
platform_driver_unregister(&omap_gpio_driver);
+ platform_driver_unregister(&omap_mpuio_driver);
}
module_exit(omap_gpio_exit);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 8727ae54bc57..f93a3dbb2daa 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -914,6 +914,8 @@ static void pca953x_irq_shutdown(struct irq_data *d)
clear_bit(hwirq, chip->irq_trig_fall);
clear_bit(hwirq, chip->irq_trig_level_low);
clear_bit(hwirq, chip->irq_trig_level_high);
+
+ pca953x_irq_mask(d);
}
static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index bae2061f15fc..0fff4a699f12 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -164,12 +163,6 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
unsigned long flags;
u32 data = input ? 0 : 1;
-
- if (input)
- pinctrl_gpio_direction_input(chip, offset);
- else
- pinctrl_gpio_direction_output(chip, offset);
-
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
@@ -593,7 +586,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
- gc->can_sleep = true;
ret = gpiochip_add_data(gc, bank);
if (ret) {
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 413bcd0a4240..2cc8abe705cd 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -35,7 +35,7 @@
struct sprd_gpio {
struct gpio_chip chip;
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
int irq;
};
@@ -54,7 +54,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset,
unsigned long flags;
u32 tmp;
- spin_lock_irqsave(&sprd_gpio->lock, flags);
+ raw_spin_lock_irqsave(&sprd_gpio->lock, flags);
tmp = readl_relaxed(base + reg);
if (val)
@@ -63,7 +63,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset,
tmp &= ~BIT(SPRD_GPIO_BIT(offset));
writel_relaxed(tmp, base + reg);
- spin_unlock_irqrestore(&sprd_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&sprd_gpio->lock, flags);
}
static int sprd_gpio_read(struct gpio_chip *chip, unsigned int offset, u16 reg)
@@ -236,7 +236,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
if (IS_ERR(sprd_gpio->base))
return PTR_ERR(sprd_gpio->base);
- spin_lock_init(&sprd_gpio->lock);
+ raw_spin_lock_init(&sprd_gpio->lock);
sprd_gpio->chip.label = dev_name(&pdev->dev);
sprd_gpio->chip.ngpio = SPRD_GPIO_NR;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 37f2ce20f1ae..098e67d70ffa 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -1682,10 +1682,10 @@ static void gpio_virtuser_device_config_group_release(struct config_item *item)
{
struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item);
- guard(mutex)(&dev->lock);
-
- if (gpio_virtuser_device_is_live(dev))
- gpio_virtuser_device_deactivate(dev);
+ scoped_guard(mutex, &dev->lock) {
+ if (gpio_virtuser_device_is_live(dev))
+ gpio_virtuser_device_deactivate(dev);
+ }
mutex_destroy(&dev->lock);
ida_free(&gpio_virtuser_ida, dev->id);
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 83dd227dbbec..9627b3a9c7f3 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -1104,6 +1104,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
+ u16 word, shift;
bool found;
mutex_lock(&achip->conn_lock);
@@ -1158,10 +1159,22 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
mutex_unlock(&achip->conn_lock);
- if (function == ACPI_WRITE)
- gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
- else
- *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
+ /*
+ * For the cases when OperationRegion() consists of more than
+ * 64 bits calculate the word and bit shift to use that one to
+ * access the value.
+ */
+ word = i / 64;
+ shift = i % 64;
+
+ if (function == ACPI_WRITE) {
+ gpiod_set_raw_value_cansleep(desc, value[word] & BIT_ULL(shift));
+ } else {
+ if (gpiod_get_raw_value_cansleep(desc))
+ value[word] |= BIT_ULL(shift);
+ else
+ value[word] &= ~BIT_ULL(shift);
+ }
}
out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 7e623f91f2d7..d9c7ad297293 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -498,8 +498,13 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
if (adev->irq.retry_cam_enabled)
return;
+ else if (adev->irq.ih1.ring_size)
+ ih = &adev->irq.ih1;
+ else if (adev->irq.ih_soft.enabled)
+ ih = &adev->irq.ih_soft;
+ else
+ return;
- ih = &adev->irq.ih1;
/* Get the WPTR of the last entry in IH ring */
last_wptr = amdgpu_ih_get_wptr(adev, ih);
/* Order wptr with ring data. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 72ec455fa932..44f230d67da2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -235,7 +235,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_ib_begin(ring);
- if (ring->funcs->emit_gfx_shadow)
+ if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow)
amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
init_shadow, vmid);
@@ -291,7 +291,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
- if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) {
+ if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec &&
+ adev->gfx.cp_gfx_shadow) {
amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d75b9940f248..fc65fb36e115 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6879,7 +6879,7 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
- *ring->wptr_cpu_addr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 8a2ee2de390f..e642236ea2c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -4201,7 +4201,7 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
- *ring->wptr_cpu_addr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
@@ -6823,11 +6823,12 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
+ bool use_mmio = false;
int r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
@@ -6836,16 +6837,18 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
return r;
}
- r = gfx_v11_0_kgq_init_queue(ring, true);
- if (r) {
- dev_err(adev->dev, "failed to init kgq\n");
- return r;
- }
+ if (use_mmio) {
+ r = gfx_v11_0_kgq_init_queue(ring, true);
+ if (r) {
+ dev_err(adev->dev, "failed to init kgq\n");
+ return r;
+ }
- r = amdgpu_mes_map_legacy_queue(adev, ring);
- if (r) {
- dev_err(adev->dev, "failed to remap kgq\n");
- return r;
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
}
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index b786967022d2..4aab89a9ab40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3079,7 +3079,7 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
- *ring->wptr_cpu_addr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
@@ -5297,11 +5297,12 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
+ bool use_mmio = false;
int r;
amdgpu_ring_reset_helper_begin(ring, timedout_fence);
- r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio);
if (r) {
dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r);
r = gfx_v12_reset_gfx_pipe(ring);
@@ -5309,16 +5310,18 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
return r;
}
- r = gfx_v12_0_kgq_init_queue(ring, true);
- if (r) {
- dev_err(adev->dev, "failed to init kgq\n");
- return r;
- }
+ if (use_mmio) {
+ r = gfx_v12_0_kgq_init_queue(ring, true);
+ if (r) {
+ dev_err(adev->dev, "failed to init kgq\n");
+ return r;
+ }
- r = amdgpu_mes_map_legacy_queue(adev, ring);
- if (r) {
- dev_err(adev->dev, "failed to remap kgq\n");
- return r;
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
}
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index ad36c96478a8..25536d89635d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -225,7 +225,13 @@ static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
static u32 soc21_get_xclk(struct amdgpu_device *adev)
{
- return adev->clock.spll.reference_freq;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ /* reference clock is actually 99.81 Mhz rather than 100 Mhz */
+ if ((adev->flags & AMD_IS_APU) && reference_clock == 10000)
+ return 9981;
+
+ return reference_clock;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index af53e796ea1b..6ada7b4af7c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page, 0);
+ zone_device_page_init(page, page_pgmap(page), 0);
}
static void
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ea5a250440f..a8a59126b2d2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7754,10 +7754,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
/* Cancel and flush any pending HDMI HPD debounce work */
- cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
- if (aconnector->hdmi_prev_sink) {
- dc_sink_release(aconnector->hdmi_prev_sink);
- aconnector->hdmi_prev_sink = NULL;
+ if (aconnector->hdmi_hpd_debounce_delay_ms) {
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
}
if (aconnector->bl_idx != -1) {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 79b174e5326d..302af1fb6901 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -80,15 +80,15 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
+ mutex_lock(&adev->pm.mutex);
+
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
- return 0;
+ goto out_unlock;
}
- mutex_lock(&adev->pm.mutex);
-
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
@@ -115,6 +115,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+out_unlock:
mutex_unlock(&adev->pm.mutex);
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 4263798d716b..8e592a477c33 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -56,6 +56,7 @@
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define SMU_V13_SOFT_FREQ_ROUND(x) ((x) + 1)
extern const int pmfw_decoded_link_speed[5];
extern const int pmfw_decoded_link_width[7];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 29a4583db873..0b1e6f25e611 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -57,6 +57,7 @@ extern const int decoded_link_width[8];
#define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx])
#define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx])
+#define SMU_V14_SOFT_FREQ_ROUND(x) ((x) + 1)
struct smu_14_0_max_sustainable_clocks {
uint32_t display_clock;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a89075e25717..2efd914d81e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1555,6 +1555,7 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
+ max = SMU_V13_SOFT_FREQ_ROUND(max);
if (automatic)
param = (uint32_t)((clk_id << 16) | 0xffff);
else
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index f2a16dfee599..06a81533759c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1178,6 +1178,7 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
+ max = SMU_V14_SOFT_FREQ_ROUND(max);
if (automatic)
param = (uint32_t)((clk_id << 16) | 0xffff);
else
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e4df43427394..25f68fed9b48 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -960,16 +960,21 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
{
struct drm_gem_change_handle *args = data;
struct drm_gem_object *obj;
- int ret;
+ int handle, ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -EOPNOTSUPP;
+ /* idr_alloc() limitation. */
+ if (args->new_handle > INT_MAX)
+ return -EINVAL;
+ handle = args->new_handle;
+
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
- if (args->handle == args->new_handle) {
+ if (args->handle == handle) {
ret = 0;
goto out;
}
@@ -977,18 +982,19 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&file_priv->prime.lock);
spin_lock(&file_priv->table_lock);
- ret = idr_alloc(&file_priv->object_idr, obj,
- args->new_handle, args->new_handle + 1, GFP_NOWAIT);
+ ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1,
+ GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
if (ret < 0)
goto out_unlock;
if (obj->dma_buf) {
- ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
+ ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf,
+ handle);
if (ret < 0) {
spin_lock(&file_priv->table_lock);
- idr_remove(&file_priv->object_idr, args->new_handle);
+ idr_remove(&file_priv->object_idr, handle);
spin_unlock(&file_priv->table_lock);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 06c1bd8fc4d1..704f2f945019 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -197,7 +197,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
- zone_device_page_init(page, 0);
+ zone_device_page_init(page, page_pgmap(page), 0);
}
/**
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index c5c6e070cc06..e861b8b9d8fa 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -528,6 +528,13 @@ static const struct component_ops imx_tve_ops = {
.bind = imx_tve_bind,
};
+static void imx_tve_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int imx_tve_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -549,6 +556,12 @@ static int imx_tve_probe(struct platform_device *pdev)
if (ddc_node) {
tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
+ if (tve->ddc) {
+ ret = devm_add_action_or_reset(dev, imx_tve_put_device,
+ &tve->ddc->dev);
+ if (ret)
+ return ret;
+ }
}
tve->mode = of_get_tve_mode(np);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index ac9a95aab2fb..4c042133261c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -501,8 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
- {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
- {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 58071652679d..3d8031296eed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -425,7 +425,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
order = ilog2(DMEM_CHUNK_NPAGES);
}
- zone_device_folio_init(folio, order);
+ zone_device_folio_init(folio, page_pgmap(folio_page(folio, 0)), order);
return page;
}
diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig
index 4b55308fd2eb..e933e6478027 100644
--- a/drivers/gpu/drm/tyr/Kconfig
+++ b/drivers/gpu/drm/tyr/Kconfig
@@ -6,6 +6,7 @@ config DRM_TYR
depends on RUST
depends on ARM || ARM64 || COMPILE_TEST
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ depends on COMMON_CLK
default n
help
Rust DRM driver for ARM Mali CSF-based GPUs.
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index 9f6251b1008b..82edd0466005 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -347,11 +347,10 @@ static bool is_bound(struct xe_config_group_device *dev)
return false;
ret = pci_get_drvdata(pdev);
- pci_dev_put(pdev);
-
if (ret)
pci_dbg(pdev, "Already bound to driver\n");
+ pci_dev_put(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index cf29e259861f..9a6d49fcd8e4 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -984,8 +984,6 @@ void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
- xe_nvm_fini(xe);
-
drm_dev_unplug(&xe->drm);
xe_bo_pci_dev_remove_all(xe);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index fd9480031750..8e3614b24010 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -190,9 +190,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_syncs;
}
- if (xe_exec_queue_is_parallel(q)) {
- err = copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ if (args->num_batch_buffer && xe_exec_queue_is_parallel(q)) {
+ err = copy_from_user(addresses, addresses_user,
+ sizeof(u64) * q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 281286f2b5f9..b8c1dd953665 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1185,7 +1185,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
return -ENOSPC;
*cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
- *cmd++ = CS_DEBUG_MODE1(0).addr;
+ *cmd++ = CS_DEBUG_MODE2(0).addr;
*cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cmd - batch;
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 33f4ac82fc80..6da42b2b5e46 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -83,6 +83,27 @@ static bool xe_nvm_writable_override(struct xe_device *xe)
return writable_override;
}
+static void xe_nvm_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct intel_dg_nvm_dev *nvm = xe->nvm;
+
+ if (!xe->info.has_gsc_nvm)
+ return;
+
+ /* No access to internal NVM from VFs */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /* Nvm pointer should not be NULL here */
+ if (WARN_ON(!nvm))
+ return;
+
+ auxiliary_device_delete(&nvm->aux_dev);
+ auxiliary_device_uninit(&nvm->aux_dev);
+ xe->nvm = NULL;
+}
+
int xe_nvm_init(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -132,39 +153,17 @@ int xe_nvm_init(struct xe_device *xe)
ret = auxiliary_device_init(aux_dev);
if (ret) {
drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret);
- goto err;
+ kfree(nvm);
+ xe->nvm = NULL;
+ return ret;
}
ret = auxiliary_device_add(aux_dev);
if (ret) {
drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret);
auxiliary_device_uninit(aux_dev);
- goto err;
+ xe->nvm = NULL;
+ return ret;
}
- return 0;
-
-err:
- kfree(nvm);
- xe->nvm = NULL;
- return ret;
-}
-
-void xe_nvm_fini(struct xe_device *xe)
-{
- struct intel_dg_nvm_dev *nvm = xe->nvm;
-
- if (!xe->info.has_gsc_nvm)
- return;
-
- /* No access to internal NVM from VFs */
- if (IS_SRIOV_VF(xe))
- return;
-
- /* Nvm pointer should not be NULL here */
- if (WARN_ON(!nvm))
- return;
-
- auxiliary_device_delete(&nvm->aux_dev);
- auxiliary_device_uninit(&nvm->aux_dev);
- xe->nvm = NULL;
+ return devm_add_action_or_reset(xe->drm.dev, xe_nvm_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h
index 7f3d5f57bed0..fd3467ad35a4 100644
--- a/drivers/gpu/drm/xe/xe_nvm.h
+++ b/drivers/gpu/drm/xe/xe_nvm.h
@@ -10,6 +10,4 @@ struct xe_device;
int xe_nvm_init(struct xe_device *xe);
-void xe_nvm_fini(struct xe_device *xe);
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 9c9ea10d994c..2aa883f5ef79 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -342,7 +342,6 @@ static const struct xe_device_desc lnl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_pxp = true,
- .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.va_bits = 48,
@@ -363,7 +362,6 @@ static const struct xe_device_desc bmg_desc = {
.has_heci_cscfi = 1,
.has_late_bind = true,
.has_sriov = true,
- .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -380,7 +378,6 @@ static const struct xe_device_desc ptl_desc = {
.has_display = true,
.has_flat_ccs = 1,
.has_sriov = true,
- .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
.needs_shared_vf_gt_wq = true,
@@ -393,7 +390,6 @@ static const struct xe_device_desc nvls_desc = {
.dma_mask_size = 46,
.has_display = true,
.has_flat_ccs = 1,
- .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.require_force_probe = true,
.va_bits = 48,
@@ -675,7 +671,6 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
desc->has_sriov;
- xe->info.has_mem_copy_instr = desc->has_mem_copy_instr;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
@@ -864,6 +859,7 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
+ xe->info.has_mem_copy_instr = GRAPHICS_VER(xe) >= 20;
xe_info_probe_tile_count(xe);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 9892c063a9c5..a4451bdc79fb 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -46,7 +46,6 @@ struct xe_device_desc {
u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
- u8 has_mem_copy_instr:1;
u8 has_pxp:1;
u8 has_sriov:1;
u8 needs_scratch:1;
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index 378104cd395e..04cc7a9036e4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -1078,6 +1078,9 @@ static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq)
{
char header[64];
+ /* Reset VCMDQ */
+ tegra241_vcmdq_hw_deinit(vcmdq);
+
/* Configure the vcmdq only; User space does the enabling */
writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 52ef028ed2db..d575f3ba9d34 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -931,6 +931,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
struct pt_table_p *table)
{
struct pt_state pts = pt_init(range, level, table);
+ unsigned int flush_start_index = UINT_MAX;
+ unsigned int flush_end_index = UINT_MAX;
struct pt_unmap_args *unmap = arg;
unsigned int num_oas = 0;
unsigned int start_index;
@@ -986,6 +988,9 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
iommu_pages_list_add(&unmap->free_list,
pts.table_lower);
pt_clear_entries(&pts, ilog2(1));
+ if (pts.index < flush_start_index)
+ flush_start_index = pts.index;
+ flush_end_index = pts.index + 1;
}
pts.index++;
} else {
@@ -999,7 +1004,10 @@ start_oa:
num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
pt_clear_entries(&pts, num_contig_lg2);
num_oas += log2_to_int(num_contig_lg2);
+ if (pts.index < flush_start_index)
+ flush_start_index = pts.index;
pts.index += log2_to_int(num_contig_lg2);
+ flush_end_index = pts.index;
}
if (pts.index >= pts.end_index)
break;
@@ -1007,7 +1015,8 @@ start_oa:
} while (true);
unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
- flush_writes_range(&pts, start_index, pts.index);
+ if (flush_start_index != flush_end_index)
+ flush_writes_range(&pts, flush_start_index, flush_end_index);
return ret;
}
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index dbe51ecb9a20..f606148920fa 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -289,6 +289,7 @@ static void batch_clear(struct pfn_batch *batch)
batch->end = 0;
batch->pfns[0] = 0;
batch->npfns[0] = 0;
+ batch->kind = 0;
}
/*
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
index 50a7b38381b9..96f9c20621cf 100644
--- a/drivers/irqchip/irq-ls-extirq.c
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -168,40 +168,34 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
return 0;
}
-static int __init
-ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+static int ls_extirq_probe(struct platform_device *pdev)
{
struct irq_domain *domain, *parent_domain;
+ struct device_node *node, *parent;
+ struct device *dev = &pdev->dev;
struct ls_extirq_data *priv;
int ret;
+ node = dev->of_node;
+ parent = of_irq_find_parent(node);
+ if (!parent)
+ return dev_err_probe(dev, -ENODEV, "Failed to get IRQ parent node\n");
+
parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("Cannot find parent domain\n");
- ret = -ENODEV;
- goto err_irq_find_host;
- }
+ if (!parent_domain)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Cannot find parent domain\n");
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_alloc_priv;
- }
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n");
- /*
- * All extirq OF nodes are under a scfg/syscon node with
- * the 'ranges' property
- */
- priv->intpcr = of_iomap(node, 0);
- if (!priv->intpcr) {
- pr_err("Cannot ioremap OF node %pOF\n", node);
- ret = -ENOMEM;
- goto err_iomap;
- }
+ priv->intpcr = devm_of_iomap(dev, node, 0, NULL);
+ if (!priv->intpcr)
+ return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node);
ret = ls_extirq_parse_map(priv, node);
if (ret)
- goto err_parse_map;
+ return dev_err_probe(dev, ret, "Failed to parse IRQ map\n");
priv->big_endian = of_device_is_big_endian(node->parent);
priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") ||
@@ -210,23 +204,26 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent)
domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node),
&extirq_domain_ops, priv);
- if (!domain) {
- ret = -ENOMEM;
- goto err_add_hierarchy;
- }
+ if (!domain)
+ return dev_err_probe(dev, -ENOMEM, "Failed to add IRQ domain\n");
return 0;
-
-err_add_hierarchy:
-err_parse_map:
- iounmap(priv->intpcr);
-err_iomap:
- kfree(priv);
-err_alloc_priv:
-err_irq_find_host:
- return ret;
}
-IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
-IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init);
-IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init);
+static const struct of_device_id ls_extirq_dt_ids[] = {
+ { .compatible = "fsl,ls1021a-extirq" },
+ { .compatible = "fsl,ls1043a-extirq" },
+ { .compatible = "fsl,ls1088a-extirq" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ls_extirq_dt_ids);
+
+static struct platform_driver ls_extirq_driver = {
+ .probe = ls_extirq_probe,
+ .driver = {
+ .name = "ls-extirq",
+ .of_match_table = ls_extirq_dt_ids,
+ }
+};
+
+builtin_platform_driver(ls_extirq_driver);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a02aecac05cd..3fa3b13a410f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1107,17 +1107,13 @@ static void detached_dev_do_request(struct bcache_device *d,
if (bio_op(orig_bio) == REQ_OP_DISCARD &&
!bdev_max_discard_sectors(dc->bdev)) {
+ bio_end_io_acct(orig_bio, start_time);
bio_endio(orig_bio);
return;
}
clone_bio = bio_alloc_clone(dc->bdev, orig_bio, GFP_NOIO,
&d->bio_detached);
- if (!clone_bio) {
- orig_bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(orig_bio);
- return;
- }
ddip = container_of(clone_bio, struct detached_dev_io_private, bio);
/* Count on the bcache device */
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index e60e4ac1fd6f..3e86f346f751 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -215,7 +215,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
- 0x7f),
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 01b1bbcce481..1b4e37d000b9 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1089,6 +1089,9 @@ static int adin1110_check_spi(struct adin1110_priv *priv)
reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset",
GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(&priv->spidev->dev, PTR_ERR(reset_gpio),
+ "failed to get reset gpio\n");
if (reset_gpio) {
/* MISO pin is used for internal configuration, can't have
* anyone else disturbing the SDO line.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 0732440eeacd..c1a3df225254 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3505,6 +3505,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
*/
netdev->netdev_ops = &lionetdevops;
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+
retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3521,16 +3538,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_free;
}
- lio = GET_LIO(netdev);
-
- memset(lio, 0, sizeof(struct lio));
-
- lio->ifidx = ifidx_or_pfnum;
-
- props = &octeon_dev->props[i];
- props->gmxport = resp->cfg_info.linfo.gmxport;
- props->netdev = netdev;
-
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
for (j = 0; j < num_oqueues; j++) {
@@ -3596,13 +3603,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
netdev->min_mtu = LIO_MIN_MTU_SIZE;
netdev->max_mtu = LIO_MAX_MTU_SIZE;
- /* Point to the properties for octeon device to which this
- * interface belongs.
- */
- lio->oct_dev = octeon_dev;
- lio->octprops = props;
- lio->netdev = netdev;
-
dev_dbg(&octeon_dev->pci_dev->dev,
"if%d gmx: %d hw_addr: 0x%llx\n", i,
lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
@@ -3750,6 +3750,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (!devlink) {
device_unlock(&octeon_dev->pci_dev->dev);
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
+ i--;
goto setup_nic_dev_free;
}
@@ -3765,11 +3766,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
setup_nic_dev_free:
- while (i--) {
+ do {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
liquidio_destroy_nic_device(octeon_dev, i);
- }
+ } while (i--);
setup_nic_dev_done:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index e02942dbbcce..43c595f3b84e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -2212,11 +2212,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
setup_nic_dev_free:
- while (i--) {
+ do {
dev_err(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup failed\n", i);
liquidio_destroy_nic_device(octeon_dev, i);
- }
+ } while (i--);
setup_nic_dev_done:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index b1e1ad9e4b48..66240c340492 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1531,6 +1531,10 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
}
if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+ goto out;
+ }
port_priv = ethsw->ports[if_id];
if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
@@ -3024,6 +3028,12 @@ static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
goto err_close;
}
+ if (!ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "DPSW device has no interfaces\n");
+ err = -ENODEV;
+ goto err_close;
+ }
+
err = dpsw_get_api_version(ethsw->mc_io, 0,
&ethsw->major,
&ethsw->minor);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 53b26cece16a..e380a4f39855 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2512,10 +2512,13 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
struct enetc_hw *hw = &si->hw;
int err;
- /* set SI cache attributes */
- enetc_wr(hw, ENETC_SICAR0,
- ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
- enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+ if (is_enetc_rev1(si)) {
+ /* set SI cache attributes */
+ enetc_wr(hw, ENETC_SICAR0,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+ enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
+ }
+
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index 498346dd996a..5850540634b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -59,10 +59,10 @@ static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
if (si != 0) {
__raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si));
- __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si));
+ __raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si));
} else {
__raw_writel(upper, hw->port + ENETC4_PMAR0);
- __raw_writew(lower, hw->port + ENETC4_PMAR1);
+ __raw_writel(lower, hw->port + ENETC4_PMAR1);
}
}
@@ -73,7 +73,7 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
u16 lower;
upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si));
- lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si));
+ lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si));
put_unaligned_le32(upper, addr);
put_unaligned_le16(lower, addr + 4);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 3d5f31879d5c..a635bfdc30af 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -74,10 +74,6 @@ int enetc4_setup_cbdr(struct enetc_si *si)
if (!user->ring)
return -ENOMEM;
- /* set CBDR cache attributes */
- enetc_wr(hw, ENETC_SICAR2,
- ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
-
regs.pir = hw->reg + ENETC_SICBDRPIR;
regs.cir = hw->reg + ENETC_SICBDRCIR;
regs.mr = hw->reg + ENETC_SICBDRMR;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 7b882b8921fe..662e4fbafb74 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -708,13 +708,24 @@ struct enetc_cmd_rfse {
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
+static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+{
+ u32 upper;
+ u16 lower;
+
+ upper = __raw_readl(hw->reg + ENETC_SIPMAR0);
+ lower = __raw_readl(hw->reg + ENETC_SIPMAR1);
+
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
+}
+
static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
struct net_device *ndev)
{
- u8 addr[ETH_ALEN] __aligned(4);
+ u8 addr[ETH_ALEN];
- *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
- *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ enetc_get_primary_mac_addr(hw, addr);
eth_hw_addr_set(ndev, addr);
}
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 42f0fde07f01..42a0a6f7b296 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -152,11 +152,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
- tmp_tx_pkts, tmp_tx_bytes;
+ tmp_tx_pkts, tmp_tx_bytes,
+ tmp_xdp_tx_errors, tmp_xdp_redirect_errors;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
- tx_dropped;
- int stats_idx, base_stats_idx, max_stats_idx;
+ tx_dropped, xdp_tx_errors, xdp_redirect_errors;
+ int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx;
+ int stats_idx, stats_region_len, nic_stats_len;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
@@ -198,6 +200,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+ xdp_tx_errors = 0, xdp_redirect_errors = 0,
ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
@@ -215,6 +218,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
rx->rx_desc_err_dropped_pkt;
tmp_rx_hsplit_unsplit_pkt =
rx->rx_hsplit_unsplit_pkt;
+ tmp_xdp_tx_errors = rx->xdp_tx_errors;
+ tmp_xdp_redirect_errors =
+ rx->xdp_redirect_errors;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
@@ -224,6 +230,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
+ xdp_tx_errors += tmp_xdp_tx_errors;
+ xdp_redirect_errors += tmp_xdp_redirect_errors;
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
@@ -249,8 +257,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_bytes;
data[i++] = tx_bytes;
/* total rx dropped packets */
- data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
- rx_desc_err_dropped_pkt;
+ data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt +
+ xdp_tx_errors + xdp_redirect_errors;
data[i++] = tx_dropped;
data[i++] = priv->tx_timeo_cnt;
data[i++] = rx_skb_alloc_fail;
@@ -265,20 +273,38 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->stats_report_trigger_cnt;
i = GVE_MAIN_STATS_LEN;
- /* For rx cross-reporting stats, start from nic rx stats in report */
- base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
- GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
- /* The boundary between driver stats and NIC stats shifts if there are
- * stopped queues.
- */
- base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
- NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
- max_stats_idx = NIC_RX_STATS_REPORT_NUM *
- (priv->rx_cfg.num_queues - num_stopped_rxqs) +
- base_stats_idx;
+ rx_base_stats_idx = 0;
+ max_rx_stats_idx = 0;
+ max_tx_stats_idx = 0;
+ stats_region_len = priv->stats_report_len -
+ sizeof(struct gve_stats_report);
+ nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats);
+ if (unlikely((stats_region_len -
+ nic_stats_len) % sizeof(struct stats))) {
+ net_err_ratelimited("Starting index of NIC stats should be multiple of stats size");
+ } else {
+ /* For rx cross-reporting stats,
+ * start from nic rx stats in report
+ */
+ rx_base_stats_idx = (stats_region_len - nic_stats_len) /
+ sizeof(struct stats);
+ /* The boundary between driver stats and NIC stats
+ * shifts if there are stopped queues
+ */
+ rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM *
+ num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM *
+ num_stopped_txqs;
+ max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM *
+ (priv->rx_cfg.num_queues - num_stopped_rxqs) +
+ rx_base_stats_idx;
+ max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM *
+ (num_tx_queues - num_stopped_txqs) +
+ max_rx_stats_idx;
+ }
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
- for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx;
stats_idx += NIC_RX_STATS_REPORT_NUM) {
u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
@@ -311,6 +337,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
+ tmp_xdp_tx_errors = rx->xdp_tx_errors;
+ tmp_xdp_redirect_errors =
+ rx->xdp_redirect_errors;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
@@ -321,8 +350,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_frag_alloc_cnt;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
- tmp_rx_buf_alloc_fail +
- tmp_rx_desc_err_dropped_pkt;
+ tmp_rx_desc_err_dropped_pkt +
+ tmp_xdp_tx_errors +
+ tmp_xdp_redirect_errors;
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
@@ -354,14 +384,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
}
- /* For tx cross-reporting stats, start from nic tx stats in report */
- base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM *
- (num_tx_queues - num_stopped_txqs) +
- max_stats_idx;
- /* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
- for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ /* NIC TX stats start right after NIC RX stats */
+ for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx;
stats_idx += NIC_TX_STATS_REPORT_NUM) {
u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 97433829b9ed..0ee864b0afe0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -283,9 +283,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
int tx_stats_num, rx_stats_num;
tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
- gve_num_tx_queues(priv);
+ priv->tx_cfg.max_queues;
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
- priv->rx_cfg.num_queues;
+ priv->rx_cfg.max_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats,
size_add(tx_stats_num, rx_stats_num));
priv->stats_report =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0b1cc0481027..d3bc3207054f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -9030,7 +9030,6 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_FIN |
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
- udp_tunnel_get_rx_info(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3ce8bc7836cd..4da37caa3ec9 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3314,18 +3314,20 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
if (ice_is_reset_in_progress(pf->state))
goto skip_irq;
- if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
- /* Process outstanding Tx timestamps. If there is more work,
- * re-arm the interrupt to trigger again.
- */
- if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
- wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
- ice_flush(hw);
- }
- }
+ if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread))
+ ice_ptp_process_ts(pf);
skip_irq:
ice_irq_dynamic_ena(hw, NULL, NULL);
+ ice_flush(hw);
+
+ if (ice_ptp_tx_tstamps_pending(pf)) {
+ /* If any new Tx timestamps happened while in interrupt,
+ * re-arm the interrupt to trigger it again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
return IRQ_HANDLED;
}
@@ -7863,6 +7865,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* Restore timestamp mode settings after VSI rebuild */
ice_ptp_restore_timestamp_mode(pf);
+
+ /* Start PTP periodic work after VSI is fully rebuilt */
+ ice_ptp_queue_work(pf);
return;
err_vsi_rebuild:
@@ -9713,9 +9718,6 @@ int ice_open_internal(struct net_device *netdev)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
- /* Update existing tunnels information */
- udp_tunnel_get_rx_info(netdev);
-
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1d26be58e29a..22c3986b910a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -573,6 +573,9 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
pf = ptp_port_to_pf(ptp_port);
hw = &pf->hw;
+ if (!tx->init)
+ return;
+
/* Read the Tx ready status first */
if (tx->has_ready_bitmap) {
err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
@@ -674,14 +677,9 @@ skip_ts_read:
pf->ptp.tx_hwtstamp_good += tstamp_good;
}
-/**
- * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
- * @pf: Board private structure
- */
-static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
{
struct ice_ptp_port *port;
- unsigned int i;
mutex_lock(&pf->adapter->ports.lock);
list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {
@@ -693,49 +691,6 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
ice_ptp_process_tx_tstamp(tx);
}
mutex_unlock(&pf->adapter->ports.lock);
-
- for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
- u64 tstamp_ready;
- int err;
-
- /* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err)
- break;
- else if (tstamp_ready)
- return ICE_TX_TSTAMP_WORK_PENDING;
- }
-
- return ICE_TX_TSTAMP_WORK_DONE;
-}
-
-/**
- * ice_ptp_tx_tstamp - Process Tx timestamps for this function.
- * @tx: Tx tracking structure to initialize
- *
- * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete
- * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.
- */
-static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
-{
- bool more_timestamps;
- unsigned long flags;
-
- if (!tx->init)
- return ICE_TX_TSTAMP_WORK_DONE;
-
- /* Process the Tx timestamp tracker */
- ice_ptp_process_tx_tstamp(tx);
-
- /* Check if there are outstanding Tx timestamps */
- spin_lock_irqsave(&tx->lock, flags);
- more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (more_timestamps)
- return ICE_TX_TSTAMP_WORK_PENDING;
-
- return ICE_TX_TSTAMP_WORK_DONE;
}
/**
@@ -1379,9 +1334,12 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
/* Do not reconfigure E810 or E830 PHY */
return;
case ICE_MAC_GENERIC:
- case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
+ case ICE_MAC_GENERIC_3K_E825:
+ if (linkup)
+ ice_ptp_port_phy_restart(ptp_port);
+ return;
default:
dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
}
@@ -2695,30 +2653,92 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return idx + tx->offset;
}
-/**
- * ice_ptp_process_ts - Process the PTP Tx timestamps
- * @pf: Board private structure
- *
- * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx
- * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.
- */
-enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
+void ice_ptp_process_ts(struct ice_pf *pf)
{
switch (pf->ptp.tx_interrupt_mode) {
case ICE_PTP_TX_INTERRUPT_NONE:
/* This device has the clock owner handle timestamps for it */
- return ICE_TX_TSTAMP_WORK_DONE;
+ return;
case ICE_PTP_TX_INTERRUPT_SELF:
/* This device handles its own timestamps */
- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);
+ return;
case ICE_PTP_TX_INTERRUPT_ALL:
/* This device handles timestamps for all ports */
- return ice_ptp_tx_tstamp_owner(pf);
+ ice_ptp_tx_tstamp_owner(pf);
+ return;
+ default:
+ WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
+ pf->ptp.tx_interrupt_mode);
+ return;
+ }
+}
+
+static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)
+{
+ bool more_timestamps;
+
+ scoped_guard(spinlock_irqsave, &tx->lock) {
+ if (!tx->init)
+ return false;
+
+ more_timestamps = !bitmap_empty(tx->in_use, tx->len);
+ }
+
+ return more_timestamps;
+}
+
+static bool ice_any_port_has_timestamps(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ scoped_guard(mutex, &pf->adapter->ports.lock) {
+ list_for_each_entry(port, &pf->adapter->ports.ports,
+ list_node) {
+ struct ice_ptp_tx *tx = &port->tx;
+
+ if (ice_port_has_timestamps(tx))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ unsigned int i;
+
+ /* Check software indicator */
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ return false;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ if (ice_port_has_timestamps(&pf->ptp.port.tx))
+ return true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ if (ice_any_port_has_timestamps(pf))
+ return true;
+ break;
default:
WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
pf->ptp.tx_interrupt_mode);
- return ICE_TX_TSTAMP_WORK_DONE;
+ break;
+ }
+
+ /* Check hardware indicator */
+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
+ u64 tstamp_ready = 0;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (err || tstamp_ready)
+ return true;
}
+
+ return false;
}
/**
@@ -2770,7 +2790,9 @@ irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
return IRQ_WAKE_THREAD;
case ICE_MAC_E830:
/* E830 can read timestamps in the top half using rd32() */
- if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ ice_ptp_process_ts(pf);
+
+ if (ice_ptp_tx_tstamps_pending(pf)) {
/* Process outstanding Tx timestamps. If there
* is more work, re-arm the interrupt to trigger again.
*/
@@ -2850,6 +2872,20 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_queue_work - Queue PTP periodic work for a PF
+ * @pf: Board private structure
+ *
+ * Helper function to queue PTP periodic work after VSI rebuild completes.
+ * This ensures that PTP work only runs when VSI structures are ready.
+ */
+void ice_ptp_queue_work(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&
+ pf->ptp.state == ICE_PTP_READY)
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
+}
+
+/**
* ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
* @pf: Board private structure
* @rebuild: rebuild if true, prepare if false
@@ -2867,10 +2903,15 @@ static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
struct ice_pf *peer_pf = ptp_port_to_pf(port);
if (!ice_is_primary(&peer_pf->hw)) {
- if (rebuild)
+ if (rebuild) {
+ /* TODO: When implementing rebuild=true:
+ * 1. Ensure secondary PFs' VSIs are rebuilt
+ * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild
+ */
ice_ptp_rebuild(peer_pf, reset_type);
- else
+ } else {
ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
}
}
}
@@ -3016,9 +3057,6 @@ void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ptp->state = ICE_PTP_READY;
- /* Start periodic work going */
- kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
-
dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
return;
@@ -3223,8 +3261,9 @@ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.mac_type) {
case ICE_MAC_GENERIC:
- /* E822 based PHY has the clock owner process the interrupt
- * for all ports.
+ case ICE_MAC_GENERIC_3K_E825:
+ /* E82x hardware has the clock owner process timestamps for
+ * all ports.
*/
if (ice_pf_src_tmr_owned(pf))
pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 27016aac4f1e..8c44bd758a4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -304,8 +304,9 @@ void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
-enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+void ice_ptp_process_ts(struct ice_pf *pf);
irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf);
u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
struct ptp_system_timestamp *sts);
@@ -317,6 +318,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
+void ice_ptp_queue_work(struct ice_pf *pf);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_hwtstamp_get(struct net_device *netdev,
@@ -345,16 +347,18 @@ static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { }
-static inline bool ice_ptp_process_ts(struct ice_pf *pf)
-{
- return true;
-}
+static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
{
return IRQ_HANDLED;
}
+static inline bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
+{
+ return false;
+}
+
static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
struct ptp_system_timestamp *sts)
{
@@ -383,6 +387,10 @@ static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
+static inline void ice_ptp_queue_work(struct ice_pf *pf)
+{
+}
+
static inline int ice_ptp_clock_index(struct ice_pf *pf)
{
return -1;
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index 40c9504b7444..dab0772c5b9d 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -38,7 +39,7 @@
#define EMAC_DEFAULT_BUFSIZE 1536
#define EMAC_RX_BUF_2K 2048
-#define EMAC_RX_BUF_4K 4096
+#define EMAC_RX_BUF_MAX FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK)
/* Tuning parameters from SpacemiT */
#define EMAC_TX_FRAMES 64
@@ -193,7 +194,7 @@ static void emac_reset_hw(struct emac_priv *priv)
static void emac_init_hw(struct emac_priv *priv)
{
- u32 rxirq = 0, dma = 0;
+ u32 rxirq = 0, dma = 0, frame_sz;
regmap_set_bits(priv->regmap_apmu,
priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
@@ -218,6 +219,15 @@ static void emac_init_hw(struct emac_priv *priv)
DEFAULT_TX_THRESHOLD);
emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
+ /* Set maximum frame size and jabber size based on configured MTU,
+ * accounting for Ethernet header, double VLAN tags, and FCS.
+ */
+ frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+
+ emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz);
+ emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz);
+ emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz);
+
/* RX IRQ mitigation */
rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
EMAC_RX_FRAMES);
@@ -908,14 +918,14 @@ static int emac_change_mtu(struct net_device *ndev, int mtu)
return -EBUSY;
}
- frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
+ frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
if (frame_len <= EMAC_DEFAULT_BUFSIZE)
priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
else if (frame_len <= EMAC_RX_BUF_2K)
priv->dma_buf_sz = EMAC_RX_BUF_2K;
else
- priv->dma_buf_sz = EMAC_RX_BUF_4K;
+ priv->dma_buf_sz = EMAC_RX_BUF_MAX;
ndev->mtu = mtu;
@@ -1917,7 +1927,7 @@ static int emac_probe(struct platform_device *pdev)
ndev->hw_features = NETIF_F_SG;
ndev->features |= ndev->hw_features;
- ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN);
ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b756a0f09e1a..c63099a77cc0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -8093,7 +8093,7 @@ int stmmac_suspend(struct device *dev)
u32 chan;
if (!ndev || !netif_running(ndev))
- return 0;
+ goto suspend_bsp;
mutex_lock(&priv->lock);
@@ -8132,6 +8132,7 @@ int stmmac_suspend(struct device *dev)
if (stmmac_fpe_supported(priv))
ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+suspend_bsp:
if (priv->plat->suspend)
return priv->plat->suspend(dev, priv->plat->bsp_priv);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 54c24cd3d3be..b0e18bdc2c85 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -305,12 +305,19 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
return 0;
}
-static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+static void cpsw_ndo_set_rx_mode_work(struct work_struct *work)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work);
struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
int slave_port = -1;
+ rtnl_lock();
+ if (!netif_running(ndev))
+ goto unlock_rtnl;
+
+ netif_addr_lock_bh(ndev);
+
if (cpsw->data.dual_emac)
slave_port = priv->emac_port + 1;
@@ -318,7 +325,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
- return;
+ goto unlock_addr;
} else {
/* Disable promiscuous mode */
cpsw_set_promiscious(ndev, false);
@@ -331,6 +338,18 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* add/remove mcast address either for real netdev or for vlan */
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
cpsw_del_mc_addr);
+
+unlock_addr:
+ netif_addr_unlock_bh(ndev);
+unlock_rtnl:
+ rtnl_unlock();
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->rx_mode_work);
}
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
@@ -1472,6 +1491,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->ndev = ndev;
priv_sl2->dev = &ndev->dev;
priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ INIT_WORK(&priv_sl2->rx_mode_work, cpsw_ndo_set_rx_mode_work);
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
@@ -1653,6 +1673,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->emac_port = 0;
+ INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work);
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
@@ -1758,6 +1779,8 @@ clean_runtime_disable_ret:
static void cpsw_remove(struct platform_device *pdev)
{
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ struct net_device *ndev;
+ struct cpsw_priv *priv;
int i, ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
@@ -1770,9 +1793,15 @@ static void cpsw_remove(struct platform_device *pdev)
return;
}
- for (i = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].ndev)
- unregister_netdev(cpsw->slaves[i].ndev);
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ priv = netdev_priv(ndev);
+ unregister_netdev(ndev);
+ disable_work_sync(&priv->rx_mode_work);
+ }
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index ab88d4c02cbd..21af0a10626a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -248,16 +248,22 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
return 0;
}
-static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+static void cpsw_ndo_set_rx_mode_work(struct work_struct *work)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work);
struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ rtnl_lock();
+ if (!netif_running(ndev))
+ goto unlock_rtnl;
+
+ netif_addr_lock_bh(ndev);
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
- return;
+ goto unlock_addr;
}
/* Disable promiscuous mode */
@@ -270,6 +276,18 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* add/remove mcast address either for real netdev or for vlan */
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
cpsw_del_mc_addr);
+
+unlock_addr:
+ netif_addr_unlock_bh(ndev);
+unlock_rtnl:
+ rtnl_unlock();
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->rx_mode_work);
}
static unsigned int cpsw_rxbuf_total_len(unsigned int len)
@@ -1398,6 +1416,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->emac_port = i + 1;
priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
+ INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work);
if (is_valid_ether_addr(slave_data->mac_addr)) {
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1447,13 +1466,18 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
static void cpsw_unregister_ports(struct cpsw_common *cpsw)
{
+ struct net_device *ndev;
+ struct cpsw_priv *priv;
int i = 0;
for (i = 0; i < cpsw->data.slaves; i++) {
- if (!cpsw->slaves[i].ndev)
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
continue;
- unregister_netdev(cpsw->slaves[i].ndev);
+ priv = netdev_priv(ndev);
+ unregister_netdev(ndev);
+ disable_work_sync(&priv->rx_mode_work);
}
}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 91add8925e23..acb6181c5c9e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -391,6 +391,7 @@ struct cpsw_priv {
u32 tx_packet_min;
struct cpsw_ale_ratelimit ale_bc_ratelimit;
struct cpsw_ale_ratelimit ale_mc_ratelimit;
+ struct work_struct rx_mode_work;
};
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b4df7e184791..c509228be84d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1567,9 +1567,10 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(lowerdev)) {
+ if (macvlan_port_get_rtnl(lowerdev)) {
macvlan_flush_sources(port, vlan);
- macvlan_port_destroy(port->dev);
+ if (create)
+ macvlan_port_destroy(port->dev);
}
return err;
}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4746a8af3a8d..43aefdd8b70f 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -479,6 +479,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
linkmode_zero(caps->link_modes);
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
caps->link_modes);
+ phy_interface_zero(caps->interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, caps->interfaces);
}
#define SFP_QUIRK(_v, _p, _s, _f) \
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8e6689d2c70..adfc83b7ca6a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8530,19 +8530,6 @@ static int rtl8152_system_resume(struct r8152 *tp)
usb_submit_urb(tp->intr_urb, GFP_NOIO);
}
- /* If the device is RTL8152_INACCESSIBLE here then we should do a
- * reset. This is important because the usb_lock_device_for_reset()
- * that happens as a result of usb_queue_reset_device() will silently
- * fail if the device was suspended or if too much time passed.
- *
- * NOTE: The device is locked here so we can directly do the reset.
- * We don't need usb_lock_device_for_reset() because that's just a
- * wrapper over device_lock() and device_resume() (which calls us)
- * does that for us.
- */
- if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
- usb_reset_device(tp->udev);
-
return 0;
}
@@ -8653,19 +8640,33 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
+ bool runtime_resume = test_bit(SELECTIVE_SUSPEND, &tp->flags);
int ret;
mutex_lock(&tp->control);
rtl_reset_ocp_base(tp);
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
+ if (runtime_resume)
ret = rtl8152_runtime_resume(tp);
else
ret = rtl8152_system_resume(tp);
mutex_unlock(&tp->control);
+ /* If the device is RTL8152_INACCESSIBLE here then we should do a
+ * reset. This is important because the usb_lock_device_for_reset()
+ * that happens as a result of usb_queue_reset_device() will silently
+ * fail if the device was suspended or if too much time passed.
+ *
+ * NOTE: The device is locked here so we can directly do the reset.
+ * We don't need usb_lock_device_for_reset() because that's just a
+ * wrapper over device_lock() and device_resume() (which calls us)
+ * does that for us.
+ */
+ if (!runtime_resume && test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ usb_reset_device(tp->udev);
+
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index 3ca3e169738e..743e44ff19cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -55,8 +55,6 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
- wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->mlo_scan_start_wk);
-
CLEANUP_STRUCT(mld_vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index df8221277d51..3414b04a6953 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -1840,6 +1840,8 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
wiphy_delayed_work_cancel(mld->wiphy,
&mld_vif->emlsr.check_tpt_wk);
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->mlo_scan_start_wk);
iwl_mld_reset_cca_40mhz_workaround(mld, vif);
iwl_mld_smps_workaround(mld, vif, true);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 2375fc76039f..6c225861db61 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2026 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -3214,6 +3214,8 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
+ iwl_mvm_pause_tcm(mvm, true);
+
mvm->fast_resume = true;
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
@@ -3270,6 +3272,8 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
mvm->trans->state = IWL_TRANS_NO_FW;
}
+ iwl_mvm_resume_tcm(mvm);
+
out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->fast_resume = false;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 58f3097888a7..c2bee32332fe 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -806,8 +806,8 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len,
map)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
- nvme_free_sgls(req, iod->descriptors[0],
- &iod->cmd.common.dptr.sgl, attrs);
+ nvme_free_sgls(req, &iod->cmd.common.dptr.sgl,
+ iod->descriptors[0], attrs);
else
nvme_free_prps(req, attrs);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 8d246b8ca604..0103815542d4 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -180,9 +180,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
static void nvmet_bio_done(struct bio *bio)
{
struct nvmet_req *req = bio->bi_private;
+ blk_status_t blk_status = bio->bi_status;
- nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
nvmet_req_bio_put(req, bio);
+ nvmet_req_complete(req, blk_to_nvme_status(req, blk_status));
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 5619ec917858..a2a13617c6f4 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
phys_addr_t base, size;
int i, len;
const __be32 *prop;
- bool nomap;
+ bool nomap, default_cma;
prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
+
+ if (default_cma && cma_skip_dt_default_reserved_mem()) {
+ pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
+ return -EINVAL;
+ }
for (i = 0; i < len; i++) {
u64 b, s;
@@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
fdt_for_each_subnode(child, fdt, node) {
const char *uname;
+ bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL);
u64 b, s;
if (!of_fdt_device_is_available(fdt, child))
continue;
+ if (default_cma && cma_skip_dt_default_reserved_mem())
+ continue;
if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
continue;
@@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
phys_addr_t base = 0, align = 0, size;
int i, len;
const __be32 *prop;
- bool nomap;
+ bool nomap, default_cma;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
@@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
+
+ if (default_cma && cma_skip_dt_default_reserved_mem()) {
+ pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
+ return -EINVAL;
+ }
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)
diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c
index f0ef474e1a0d..23f554490539 100644
--- a/drivers/pci/ide.c
+++ b/drivers/pci/ide.c
@@ -11,7 +11,6 @@
#include <linux/pci_regs.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/tsm.h>
#include "pci.h"
@@ -168,7 +167,7 @@ void pci_ide_init(struct pci_dev *pdev)
for (u16 i = 0; i < nr_streams; i++) {
int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
- pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
if (val & PCI_IDE_SEL_CTL_EN)
continue;
val &= ~PCI_IDE_SEL_CTL_ID;
@@ -283,8 +282,8 @@ struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
/* for SR-IOV case, cover all VFs */
num_vf = pci_num_vf(pdev);
if (num_vf)
- rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
- pci_iov_virtfn_devfn(pdev, num_vf));
+ rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf - 1),
+ pci_iov_virtfn_devfn(pdev, num_vf - 1));
else
rid_end = pci_dev_id(pdev);
@@ -373,9 +372,6 @@ void pci_ide_stream_release(struct pci_ide *ide)
if (ide->partner[PCI_IDE_EP].enable)
pci_ide_stream_disable(pdev, ide);
- if (ide->tsm_dev)
- tsm_ide_stream_unregister(ide);
-
if (ide->partner[PCI_IDE_RP].setup)
pci_ide_stream_teardown(rp, ide);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index e44ef262beec..2fc67aeafdb3 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3545,10 +3545,9 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned offset,
- bool input)
+static int rockchip_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank;
@@ -3562,7 +3561,7 @@ static const struct pinmux_ops rockchip_pmx_ops = {
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
.set_mux = rockchip_pmx_set,
- .gpio_set_direction = rockchip_pmx_gpio_set_direction,
+ .gpio_request_enable = rockchip_pmx_gpio_request_enable,
};
/*
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index 404e62ad293a..ed285afaf9b0 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -302,6 +302,13 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
}
},
+ {
+ .ident = "MECHREVO Wujie 15X Pro",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "WUJIE Series-X5SP4NAG"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 6b1b8e444e24..74d3eb83f56a 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -207,7 +207,12 @@ static ssize_t cmpc_accel_sensitivity_show_v4(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
return sysfs_emit(buf, "%d\n", accel->sensitivity);
}
@@ -224,7 +229,12 @@ static ssize_t cmpc_accel_sensitivity_store_v4(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
r = kstrtoul(buf, 0, &sensitivity);
if (r)
@@ -256,7 +266,12 @@ static ssize_t cmpc_accel_g_select_show_v4(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
return sysfs_emit(buf, "%d\n", accel->g_select);
}
@@ -273,7 +288,12 @@ static ssize_t cmpc_accel_g_select_store_v4(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
r = kstrtoul(buf, 0, &g_select);
if (r)
@@ -302,6 +322,8 @@ static int cmpc_accel_open_v4(struct input_dev *input)
acpi = to_acpi_device(input->dev.parent);
accel = dev_get_drvdata(&input->dev);
+ if (!accel)
+ return -ENXIO;
cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity);
cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select);
@@ -549,7 +571,12 @@ static ssize_t cmpc_accel_sensitivity_show(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
return sysfs_emit(buf, "%d\n", accel->sensitivity);
}
@@ -566,7 +593,12 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev,
acpi = to_acpi_device(dev);
inputdev = dev_get_drvdata(&acpi->dev);
+ if (!inputdev)
+ return -ENXIO;
+
accel = dev_get_drvdata(&inputdev->dev);
+ if (!accel)
+ return -ENXIO;
r = kstrtoul(buf, 0, &sensitivity);
if (r)
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index dbe096eefa75..51e8977d3eb4 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -696,6 +696,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type,
return ret;
}
+ if (!str_value || !str_value[0]) {
+ pr_debug("Ignoring attribute with empty name\n");
+ goto pack_attr_exit;
+ }
+
/* All duplicate attributes found are ignored */
duplicate = kset_find_obj(temp_kset, str_value);
if (duplicate) {
diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c
index 58132da47745..05727169f49c 100644
--- a/drivers/platform/x86/intel/plr_tpmi.c
+++ b/drivers/platform/x86/intel/plr_tpmi.c
@@ -316,7 +316,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia
snprintf(name, sizeof(name), "domain%d", i);
dentry = debugfs_create_dir(name, plr->dbgfs_dir);
- debugfs_create_file("status", 0444, dentry, &plr->die_info[i],
+ debugfs_create_file("status", 0644, dentry, &plr->die_info[i],
&plr_status_fops);
}
diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c
index 70e5736c44c7..189c61ff7ff0 100644
--- a/drivers/platform/x86/intel/telemetry/debugfs.c
+++ b/drivers/platform/x86/intel/telemetry/debugfs.c
@@ -449,7 +449,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused)
for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) {
seq_printf(s, "%-32s\t%u\n",
debugfs_conf->pss_ltr_data[index].name,
- pss_s0ix_wakeup[index]);
+ pss_ltr_blkd[index]);
}
seq_puts(s, "\n--------------------------------------\n");
@@ -459,7 +459,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused)
for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) {
seq_printf(s, "%-32s\t%u\n",
debugfs_conf->pss_wakeup[index].name,
- pss_ltr_blkd[index]);
+ pss_s0ix_wakeup[index]);
}
return 0;
diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c
index f23c170a55dc..d9aa349f81e4 100644
--- a/drivers/platform/x86/intel/telemetry/pltdrv.c
+++ b/drivers/platform/x86/intel/telemetry/pltdrv.c
@@ -610,7 +610,7 @@ static int telemetry_setup(struct platform_device *pdev)
/* Get telemetry Info */
events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >>
TELEM_INFO_SRAMEVTS_SHIFT;
- event_regs = read_buf & TELEM_INFO_SRAMEVTS_MASK;
+ event_regs = read_buf & TELEM_INFO_NENABLES_MASK;
if ((events < TELEM_MAX_EVENTS_SRAM) ||
(event_regs < TELEM_MAX_EVENTS_SRAM)) {
dev_err(&pdev->dev, "PSS:Insufficient Space for SRAM Trace\n");
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index ecfc7703f201..012d87878afd 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -766,6 +766,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d
#define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d
+#define PCI_DEVICE_ID_INTEL_VSEC_NVL 0xd70d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
@@ -778,6 +779,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_NVL, &mtl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index f92e89c75db9..61ef7a218a80 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -838,8 +838,17 @@ static int acpi_add(struct acpi_device *device)
case 'P':
year = 2021;
break;
- default:
+ case 'Q':
year = 2022;
+ break;
+ case 'R':
+ year = 2023;
+ break;
+ case 'S':
+ year = 2024;
+ break;
+ default:
+ year = 2025;
}
break;
default:
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 255317e6fec8..937f1a5b78ed 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -1089,7 +1089,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pcc->platform)) {
result = PTR_ERR(pcc->platform);
- goto out_backlight;
+ goto out_sysfs;
}
result = device_create_file(&pcc->platform->dev,
&dev_attr_cdpower);
@@ -1105,6 +1105,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
out_platform:
platform_device_unregister(pcc->platform);
+out_sysfs:
+ sysfs_remove_group(&device->dev.kobj, &pcc_attr_group);
out_backlight:
backlight_device_unregister(pcc->backlight);
out_input:
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index 03dfddeee0c0..e9324bf16aea 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -183,7 +183,7 @@ static int toshiba_haps_add(struct acpi_device *acpi_dev)
pr_info("Toshiba HDD Active Protection Sensor device\n");
- haps = kzalloc(sizeof(struct toshiba_haps_dev), GFP_KERNEL);
+ haps = devm_kzalloc(&acpi_dev->dev, sizeof(*haps), GFP_KERNEL);
if (!haps)
return -ENOMEM;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 4e899ec1477d..b1cba986f0fb 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1025,6 +1025,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
&nonemb_cmd->dma,
GFP_KERNEL);
if (!nonemb_cmd->va) {
+ free_mcc_wrb(ctrl, tag);
mutex_unlock(&ctrl->mbox_lock);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 16a44c0917e1..e939bc88e151 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4489,7 +4489,7 @@ fail_lsrjt:
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
- dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
ha->flt, ha->flt_dma);
fail_flt_buffer:
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index fef840b54574..c18a0c946f76 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -396,7 +396,7 @@ EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
*/
bool qcom_smem_is_available(void)
{
- return !!__smem;
+ return !IS_ERR(__smem);
}
EXPORT_SYMBOL_GPL(qcom_smem_is_available);
@@ -1247,7 +1247,8 @@ static void qcom_smem_remove(struct platform_device *pdev)
{
platform_device_unregister(__smem->socinfo);
- __smem = NULL;
+ /* Set to -EPROBE_DEFER to signal unprobed state */
+ __smem = ERR_PTR(-EPROBE_DEFER);
}
static const struct of_device_id qcom_smem_of_match[] = {
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 9f167ff8da7b..09120a538a40 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1960,12 +1960,12 @@ static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
container_of(wwn, struct sbp_tport, tport_wwn);
struct sbp_tpg *tpg;
- unsigned long tpgt;
+ u16 tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
- if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ if (kstrtou16(name + 5, 10, &tpgt))
return ERR_PTR(-EINVAL);
if (tport->tpg) {
diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c
index 40543db621a1..6c454ae8a9c8 100644
--- a/drivers/ufs/host/ufs-amd-versal2.c
+++ b/drivers/ufs/host/ufs-amd-versal2.c
@@ -367,7 +367,7 @@ static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba,
{
int ret = 0;
- if (status == PRE_CHANGE) {
+ if (status == POST_CHANGE) {
ret = ufs_versal2_phy_init(hba);
if (ret)
dev_err(hba->dev, "Phy init failed (%d)\n", ret);
diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c
index f027876a2f19..8712df8596a1 100644
--- a/drivers/virt/coco/tsm-core.c
+++ b/drivers/virt/coco/tsm-core.c
@@ -4,16 +4,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/tsm.h>
-#include <linux/pci.h>
-#include <linux/rwsem.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/cleanup.h>
#include <linux/pci-tsm.h>
-#include <linux/pci-ide.h>
static struct class *tsm_class;
-static DECLARE_RWSEM(tsm_rwsem);
static DEFINE_IDA(tsm_ida);
static int match_id(struct device *dev, const void *data)
@@ -108,32 +104,6 @@ void tsm_unregister(struct tsm_dev *tsm_dev)
}
EXPORT_SYMBOL_GPL(tsm_unregister);
-/* must be invoked between tsm_register / tsm_unregister */
-int tsm_ide_stream_register(struct pci_ide *ide)
-{
- struct pci_dev *pdev = ide->pdev;
- struct pci_tsm *tsm = pdev->tsm;
- struct tsm_dev *tsm_dev = tsm->tsm_dev;
- int rc;
-
- rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name);
- if (rc)
- return rc;
-
- ide->tsm_dev = tsm_dev;
- return 0;
-}
-EXPORT_SYMBOL_GPL(tsm_ide_stream_register);
-
-void tsm_ide_stream_unregister(struct pci_ide *ide)
-{
- struct tsm_dev *tsm_dev = ide->tsm_dev;
-
- ide->tsm_dev = NULL;
- sysfs_remove_link(&tsm_dev->dev.kobj, ide->name);
-}
-EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister);
-
static void tsm_release(struct device *dev)
{
struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index f38d8305e46d..baadaaa189c0 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -150,6 +150,7 @@ static void scrub_rbio_work_locked(struct work_struct *work);
static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
{
bitmap_free(rbio->error_bitmap);
+ bitmap_free(rbio->stripe_uptodate_bitmap);
kfree(rbio->stripe_pages);
kfree(rbio->bio_paddrs);
kfree(rbio->stripe_paddrs);
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
index 6edc10958ecf..70e13db260db 100644
--- a/fs/efivarfs/vars.c
+++ b/fs/efivarfs/vars.c
@@ -552,7 +552,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
err = __efivar_entry_get(entry, attributes, size, data);
efivar_unlock();
- return 0;
+ return err;
}
/**
diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c
index 28d1cee90625..98287132626e 100644
--- a/fs/smb/client/cifstransport.c
+++ b/fs/smb/client/cifstransport.c
@@ -251,13 +251,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_send_recv(xid, ses, ses->server,
&rqst, &resp_buf_type, flags, &resp_iov);
if (rc < 0)
- return rc;
+ goto out;
if (out_buf) {
*pbytes_returned = resp_iov.iov_len;
if (resp_iov.iov_len)
memcpy(out_buf, resp_iov.iov_base, resp_iov.iov_len);
}
+
+out:
free_rsp_buf(resp_buf_type, resp_iov.iov_base);
return rc;
}
diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
index 7f11ae6bb785..2dd08388ea87 100644
--- a/fs/smb/client/smb2file.c
+++ b/fs/smb/client/smb2file.c
@@ -178,6 +178,7 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
&err_buftype);
if (rc == -EACCES && retry_without_read_attributes) {
+ free_rsp_buf(err_buftype, err_iov.iov_base);
oparms->desired_access &= ~FILE_READ_ATTRIBUTES;
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
&err_buftype);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 62d9c1cf6326..2e6931735880 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e
extern void cma_reserve_pages_on_error(struct cma *cma);
+#ifdef CONFIG_DMA_CMA
+extern bool cma_skip_dt_default_reserved_mem(void);
+#else
+static inline bool cma_skip_dt_default_reserved_mem(void)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9c6ac4b62eb9..338a1921a50a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
}
+void __kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size);
+
+static __always_inline void kasan_vrealloc(const void *start,
+ unsigned long old_size,
+ unsigned long new_size)
+{
+ if (kasan_enabled())
+ __kasan_vrealloc(start, old_size, new_size);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{ }
+static inline void kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size) { }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index cc74de3dbcfe..c328a7b356d0 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -17,6 +17,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
* to by vm_flags_ptr.
*/
int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
+struct file *memfd_alloc_file(const char *name, unsigned int flags);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -31,6 +32,11 @@ static inline int memfd_check_seals_mmap(struct file *file,
{
return 0;
}
+
+static inline struct file *memfd_alloc_file(const char *name, unsigned int flags)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 713ec0435b48..e3c2ccf872a8 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -224,7 +224,8 @@ static inline bool is_fsdax_page(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page, unsigned int order);
+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,
+ unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -234,9 +235,11 @@ bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
-static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+static inline void zone_device_folio_init(struct folio *folio,
+ struct dev_pagemap *pgmap,
+ unsigned int order)
{
- zone_device_page_init(&folio->page, order);
+ zone_device_page_init(&folio->page, pgmap, order);
if (order)
folio_set_large_rmappable(folio);
}
diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h
index 37a1ad9501b0..ae07d9f699c0 100644
--- a/include/linux/pci-ide.h
+++ b/include/linux/pci-ide.h
@@ -26,7 +26,7 @@ enum pci_ide_partner_select {
/**
* struct pci_ide_partner - Per port pair Selective IDE Stream settings
* @rid_start: Partner Port Requester ID range start
- * @rid_end: Partner Port Requester ID range end
+ * @rid_end: Partner Port Requester ID range end (inclusive)
* @stream_index: Selective IDE Stream Register Block selection
* @mem_assoc: PCI bus memory address association for targeting peer partner
* @pref_assoc: PCI bus prefetchable memory address association for
@@ -82,7 +82,6 @@ struct pci_ide_regs {
* @host_bridge_stream: allocated from host bridge @ide_stream_ida pool
* @stream_id: unique Stream ID (within Partner Port pairing)
* @name: name of the established Selective IDE Stream in sysfs
- * @tsm_dev: For TSM established IDE, the TSM device context
*
* Negative @stream_id values indicate "uninitialized" on the
* expectation that with TSM established IDE the TSM owns the stream_id
@@ -94,7 +93,6 @@ struct pci_ide {
u8 host_bridge_stream;
int stream_id;
const char *name;
- struct tsm_dev *tsm_dev;
};
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index da0133524d08..5f00b5ed0f3b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1776,6 +1776,11 @@ static __always_inline bool is_percpu_thread(void)
(current->nr_cpus_allowed == 1);
}
+static __always_inline bool is_user_task(struct task_struct *task)
+{
+ return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER));
+}
+
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ef56dc6318d3..daa4e4944ce3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4302,6 +4302,18 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+/* Variant of skb_header_pointer() where @offset is user-controlled
+ * and potentially negative.
+ */
+static inline void * __must_check
+skb_header_pointer_careful(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ if (unlikely(offset < 0 && -offset > skb_headroom(skb)))
+ return NULL;
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
static inline void * __must_check
skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
{
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index a3b7ab668eff..22e05b2aac69 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -123,7 +123,4 @@ int tsm_report_unregister(const struct tsm_report_ops *ops);
struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops);
void tsm_unregister(struct tsm_dev *tsm_dev);
struct tsm_dev *find_tsm_dev(int id);
-struct pci_ide;
-int tsm_ide_stream_register(struct pci_ide *ide);
-void tsm_ide_stream_unregister(struct pci_ide *ide);
#endif /* __TSM_H */
diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c
index e12b946278b6..1ea6afffa985 100644
--- a/kernel/cgroup/dmem.c
+++ b/kernel/cgroup/dmem.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/page_counter.h>
#include <linux/parser.h>
+#include <linux/refcount.h>
#include <linux/rculist.h>
#include <linux/slab.h>
@@ -71,7 +72,9 @@ struct dmem_cgroup_pool_state {
struct rcu_head rcu;
struct page_counter cnt;
+ struct dmem_cgroup_pool_state *parent;
+ refcount_t ref;
bool inited;
};
@@ -88,6 +91,9 @@ struct dmem_cgroup_pool_state {
static DEFINE_SPINLOCK(dmemcg_lock);
static LIST_HEAD(dmem_cgroup_regions);
+static void dmemcg_free_region(struct kref *ref);
+static void dmemcg_pool_free_rcu(struct rcu_head *rcu);
+
static inline struct dmemcg_state *
css_to_dmemcs(struct cgroup_subsys_state *css)
{
@@ -104,10 +110,38 @@ static struct dmemcg_state *parent_dmemcs(struct dmemcg_state *cg)
return cg->css.parent ? css_to_dmemcs(cg->css.parent) : NULL;
}
+static void dmemcg_pool_get(struct dmem_cgroup_pool_state *pool)
+{
+ refcount_inc(&pool->ref);
+}
+
+static bool dmemcg_pool_tryget(struct dmem_cgroup_pool_state *pool)
+{
+ return refcount_inc_not_zero(&pool->ref);
+}
+
+static void dmemcg_pool_put(struct dmem_cgroup_pool_state *pool)
+{
+ if (!refcount_dec_and_test(&pool->ref))
+ return;
+
+ call_rcu(&pool->rcu, dmemcg_pool_free_rcu);
+}
+
+static void dmemcg_pool_free_rcu(struct rcu_head *rcu)
+{
+ struct dmem_cgroup_pool_state *pool = container_of(rcu, typeof(*pool), rcu);
+
+ if (pool->parent)
+ dmemcg_pool_put(pool->parent);
+ kref_put(&pool->region->ref, dmemcg_free_region);
+ kfree(pool);
+}
+
static void free_cg_pool(struct dmem_cgroup_pool_state *pool)
{
list_del(&pool->region_node);
- kfree(pool);
+ dmemcg_pool_put(pool);
}
static void
@@ -342,6 +376,12 @@ alloc_pool_single(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region
page_counter_init(&pool->cnt,
ppool ? &ppool->cnt : NULL, true);
reset_all_resource_limits(pool);
+ refcount_set(&pool->ref, 1);
+ kref_get(&region->ref);
+ if (ppool && !pool->parent) {
+ pool->parent = ppool;
+ dmemcg_pool_get(ppool);
+ }
list_add_tail_rcu(&pool->css_node, &dmemcs->pools);
list_add_tail(&pool->region_node, &region->pools);
@@ -389,6 +429,10 @@ get_cg_pool_locked(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *regio
/* Fix up parent links, mark as inited. */
pool->cnt.parent = &ppool->cnt;
+ if (ppool && !pool->parent) {
+ pool->parent = ppool;
+ dmemcg_pool_get(ppool);
+ }
pool->inited = true;
pool = ppool;
@@ -423,7 +467,7 @@ static void dmemcg_free_region(struct kref *ref)
*/
void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
{
- struct list_head *entry;
+ struct dmem_cgroup_pool_state *pool, *next;
if (!region)
return;
@@ -433,11 +477,10 @@ void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
/* Remove from global region list */
list_del_rcu(&region->region_node);
- list_for_each_rcu(entry, &region->pools) {
- struct dmem_cgroup_pool_state *pool =
- container_of(entry, typeof(*pool), region_node);
-
+ list_for_each_entry_safe(pool, next, &region->pools, region_node) {
list_del_rcu(&pool->css_node);
+ list_del(&pool->region_node);
+ dmemcg_pool_put(pool);
}
/*
@@ -518,8 +561,10 @@ static struct dmem_cgroup_region *dmemcg_get_region_by_name(const char *name)
*/
void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
{
- if (pool)
+ if (pool) {
css_put(&pool->cs->css);
+ dmemcg_pool_put(pool);
+ }
}
EXPORT_SYMBOL_GPL(dmem_cgroup_pool_state_put);
@@ -533,6 +578,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region)
pool = find_cg_pool_locked(cg, region);
if (pool && !READ_ONCE(pool->inited))
pool = NULL;
+ if (pool && !dmemcg_pool_tryget(pool))
+ pool = NULL;
rcu_read_unlock();
while (!pool) {
@@ -541,6 +588,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region)
pool = get_cg_pool_locked(cg, region, &allocpool);
else
pool = ERR_PTR(-ENODEV);
+ if (!IS_ERR(pool))
+ dmemcg_pool_get(pool);
spin_unlock(&dmemcg_lock);
if (pool == ERR_PTR(-ENOMEM)) {
@@ -576,6 +625,7 @@ void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
page_counter_uncharge(&pool->cnt, size);
css_put(&pool->cs->css);
+ dmemcg_pool_put(pool);
}
EXPORT_SYMBOL_GPL(dmem_cgroup_uncharge);
@@ -627,7 +677,9 @@ int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
if (ret_limit_pool) {
*ret_limit_pool = container_of(fail, struct dmem_cgroup_pool_state, cnt);
css_get(&(*ret_limit_pool)->cs->css);
+ dmemcg_pool_get(*ret_limit_pool);
}
+ dmemcg_pool_put(pool);
ret = -EAGAIN;
goto err;
}
@@ -700,6 +752,9 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of,
if (!region_name[0])
continue;
+ if (!options || !*options)
+ return -EINVAL;
+
rcu_read_lock();
region = dmemcg_get_region_by_name(region_name);
rcu_read_unlock();
@@ -719,6 +774,7 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of,
/* And commit */
apply(pool, new_limit);
+ dmemcg_pool_put(pool);
out_put:
kref_put(&region->ref, dmemcg_free_region);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index d8fd6f779f79..0e266979728b 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -91,6 +91,16 @@ static int __init early_cma(char *p)
}
early_param("cma", early_cma);
+/*
+ * cma_skip_dt_default_reserved_mem - This is called from the
+ * reserved_mem framework to detect if the default cma region is being
+ * set by the "cma=" kernel parameter.
+ */
+bool __init cma_skip_dt_default_reserved_mem(void)
+{
+ return size_cmdline != -1;
+}
+
#ifdef CONFIG_DMA_NUMA_CMA
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
@@ -470,12 +480,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
struct cma *cma;
int err;
- if (size_cmdline != -1 && default_cma) {
- pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
- rmem->name);
- return -EBUSY;
- }
-
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index c5da29ad010c..2b2fbb709242 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -277,15 +277,20 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size,
{
struct gen_pool *pool = NULL;
struct page *page;
+ bool pool_found = false;
while ((pool = dma_guess_pool(pool, gfp))) {
+ pool_found = true;
page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
phys_addr_ok);
if (page)
return page;
}
- WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
+ if (pool_found)
+ WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev));
+ else
+ WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
return NULL;
}
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 1f6589578703..9d24b6e0c91f 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -246,7 +246,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
if (user && !crosstask) {
if (!user_mode(regs)) {
- if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
+ if (!is_user_task(current))
goto exit_put;
regs = task_pt_regs(current);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a0fa488bce84..8cca80094624 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7460,7 +7460,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
- } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
+ } else if (is_user_task(current)) {
perf_get_regs_user(regs_user, regs);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
@@ -8100,7 +8100,7 @@ static u64 perf_virt_to_phys(u64 virt)
* Try IRQ-safe get_user_page_fast_only first.
* If failed, leave phys_addr as 0.
*/
- if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
+ if (is_user_task(current)) {
struct page *p;
pagefault_disable();
@@ -8215,7 +8215,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
{
bool kernel = !event->attr.exclude_callchain_kernel;
bool user = !event->attr.exclude_callchain_user &&
- !(current->flags & (PF_KTHREAD | PF_USER_WORKER));
+ is_user_task(current);
/* Disallow cross-task user callchains. */
bool crosstask = event->ctx->task && event->ctx->task != current;
bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user &&
diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
index d4482b6e3cae..90d411a59f76 100644
--- a/kernel/liveupdate/kexec_handover.c
+++ b/kernel/liveupdate/kexec_handover.c
@@ -255,6 +255,14 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
if (is_folio && info.order)
prep_compound_page(page, info.order);
+ /* Always mark headpage's codetag as empty to avoid accounting mismatch */
+ clear_page_tag_ref(page);
+ if (!is_folio) {
+ /* Also do that for the non-compound tail pages */
+ for (unsigned int i = 1; i < nr_pages; i++)
+ clear_page_tag_ref(page + i);
+ }
+
adjust_managed_page_count(page, nr_pages);
return page;
}
@@ -1006,8 +1014,10 @@ int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
chunk->phys[idx++] = phys;
if (idx == ARRAY_SIZE(chunk->phys)) {
chunk = new_vmalloc_chunk(chunk);
- if (!chunk)
+ if (!chunk) {
+ err = -ENOMEM;
goto err_free;
+ }
idx = 0;
}
}
diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c
index a32a777f6df8..9f7283379ebc 100644
--- a/kernel/liveupdate/luo_file.c
+++ b/kernel/liveupdate/luo_file.c
@@ -402,8 +402,6 @@ static void luo_file_unfreeze_one(struct luo_file_set *file_set,
luo_file->fh->ops->unfreeze(&args);
}
-
- luo_file->serialized_data = 0;
}
static void __luo_file_unfreeze(struct luo_file_set *file_set,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index c509f2e7d69d..7bcde7114f1b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1034,6 +1034,12 @@ static void update_dl_entity(struct sched_dl_entity *dl_se)
return;
}
+ /*
+ * When [4] D->A is followed by [1] A->B, dl_defer_running
+ * needs to be cleared, otherwise it will fail to properly
+ * start the zero-laxity timer.
+ */
+ dl_se->dl_defer_running = 0;
replenish_dl_new_period(dl_se, rq);
} else if (dl_server(dl_se) && dl_se->dl_defer) {
/*
@@ -1655,6 +1661,12 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
* dl_server_active = 1;
* enqueue_dl_entity()
* update_dl_entity(WAKEUP)
+ * if (dl_time_before() || dl_entity_overflow())
+ * dl_defer_running = 0;
+ * replenish_dl_new_period();
+ * // fwd period
+ * dl_throttled = 1;
+ * dl_defer_armed = 1;
* if (!dl_defer_running)
* dl_defer_armed = 1;
* dl_throttled = 1;
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index afe28c04d5aa..0bb8fa927e9e 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -194,6 +194,7 @@ MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microsecond
#include <trace/events/sched_ext.h>
static void process_ddsp_deferred_locals(struct rq *rq);
+static bool task_dead_and_done(struct task_struct *p);
static u32 reenq_local(struct rq *rq);
static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);
static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,
@@ -2619,6 +2620,9 @@ static void set_cpus_allowed_scx(struct task_struct *p,
set_cpus_allowed_common(p, ac);
+ if (task_dead_and_done(p))
+ return;
+
/*
* The effective cpumask is stored in @p->cpus_ptr which may temporarily
* differ from the configured one in @p->cpus_mask. Always tell the bpf
@@ -3034,10 +3038,45 @@ void scx_cancel_fork(struct task_struct *p)
percpu_up_read(&scx_fork_rwsem);
}
+/**
+ * task_dead_and_done - Is a task dead and done running?
+ * @p: target task
+ *
+ * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the
+ * task no longer exists from SCX's POV. However, certain sched_class ops may be
+ * invoked on these dead tasks leading to failures - e.g. sched_setscheduler()
+ * may try to switch a task which finished sched_ext_dead() back into SCX
+ * triggering invalid SCX task state transitions and worse.
+ *
+ * Once a task has finished the final switch, sched_ext_dead() is the only thing
+ * that needs to happen on the task. Use this test to short-circuit sched_class
+ * operations which may be called on dead tasks.
+ */
+static bool task_dead_and_done(struct task_struct *p)
+{
+ struct rq *rq = task_rq(p);
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * In do_task_dead(), a dying task sets %TASK_DEAD with preemption
+ * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p
+ * won't ever run again.
+ */
+ return unlikely(READ_ONCE(p->__state) == TASK_DEAD) &&
+ !task_on_cpu(rq, p);
+}
+
void sched_ext_dead(struct task_struct *p)
{
unsigned long flags;
+ /*
+ * By the time control reaches here, @p has %TASK_DEAD set, switched out
+ * for the last time and then dropped the rq lock - task_dead_and_done()
+ * should be returning %true nullifying the straggling sched_class ops.
+ * Remove from scx_tasks and exit @p.
+ */
raw_spin_lock_irqsave(&scx_tasks_lock, flags);
list_del_init(&p->scx.tasks_node);
raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
@@ -3063,6 +3102,9 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p,
lockdep_assert_rq_held(task_rq(p));
+ if (task_dead_and_done(p))
+ return;
+
p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));
if (SCX_HAS_OP(sch, set_weight))
SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
@@ -3077,6 +3119,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p)
{
struct scx_sched *sch = scx_root;
+ if (task_dead_and_done(p))
+ return;
+
scx_enable_task(p);
/*
@@ -3090,6 +3135,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p)
static void switched_from_scx(struct rq *rq, struct task_struct *p)
{
+ if (task_dead_and_done(p))
+ return;
+
scx_disable_task(p);
}
diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c
index fe9bf8db1922..e2784038bbed 100644
--- a/kernel/vmcore_info.c
+++ b/kernel/vmcore_info.c
@@ -36,7 +36,11 @@ struct hwerr_info {
time64_t timestamp;
};
-static struct hwerr_info hwerr_data[HWERR_RECOV_MAX];
+/*
+ * The hwerr_data[] array is declared with global scope so that it remains
+ * accessible to vmcoreinfo even when Link Time Optimization (LTO) is enabled.
+ */
+struct hwerr_info hwerr_data[HWERR_RECOV_MAX];
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len)
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 84ecccddc771..012d5614efb9 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -64,13 +64,14 @@ void fprop_global_destroy(struct fprop_global *p)
bool fprop_new_period(struct fprop_global *p, int periods)
{
s64 events = percpu_counter_sum(&p->events);
+ unsigned long flags;
/*
* Don't do anything if there are no events.
*/
if (events <= 1)
return false;
- preempt_disable_nested();
+ local_irq_save(flags);
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
@@ -78,7 +79,7 @@ bool fprop_new_period(struct fprop_global *p, int periods)
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
- preempt_enable_nested();
+ local_irq_restore(flags);
return true;
}
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 8af169d3873a..455a6862ae50 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -662,7 +662,9 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror *dmirror,
goto error;
}
- zone_device_folio_init(page_folio(dpage), order);
+ zone_device_folio_init(page_folio(dpage),
+ page_pgmap(folio_page(page_folio(dpage), 0)),
+ order);
dpage->zone_device_data = rpage;
return dpage;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index ed489a14dddf..b7d05c2a6d93 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -606,4 +606,25 @@ void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
}
}
+
+void __kasan_vrealloc(const void *addr, unsigned long old_size,
+ unsigned long new_size)
+{
+ if (new_size < old_size) {
+ kasan_poison_last_granule(addr, new_size);
+
+ new_size = round_up(new_size, KASAN_GRANULE_SIZE);
+ old_size = round_up(old_size, KASAN_GRANULE_SIZE);
+ if (new_size < old_size)
+ __kasan_poison_vmalloc(addr + new_size,
+ old_size - new_size);
+ } else if (new_size > old_size) {
+ old_size = round_down(old_size, KASAN_GRANULE_SIZE);
+ __kasan_unpoison_vmalloc(addr + old_size,
+ new_size - old_size,
+ KASAN_VMALLOC_PROT_NORMAL |
+ KASAN_VMALLOC_VM_ALLOC |
+ KASAN_VMALLOC_KEEP_TAG);
+ }
+}
#endif
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index da0f5b6f5744..4f79ec720752 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -596,7 +596,7 @@ static void rcu_guarded_free(struct rcu_head *h)
static unsigned long kfence_init_pool(void)
{
unsigned long addr, start_pfn;
- int i;
+ int i, rand;
if (!arch_kfence_init_pool())
return (unsigned long)__kfence_pool;
@@ -647,13 +647,27 @@ static unsigned long kfence_init_pool(void)
INIT_LIST_HEAD(&meta->list);
raw_spin_lock_init(&meta->lock);
meta->state = KFENCE_OBJECT_UNUSED;
- meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
- list_add_tail(&meta->list, &kfence_freelist);
+ /* Use addr to randomize the freelist. */
+ meta->addr = i;
/* Protect the right redzone. */
- if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+ if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
goto reset_slab;
+ }
+
+ for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
+ rand = get_random_u32_below(i);
+ swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
+ }
+ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+ struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
+ struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
+
+ list_add_tail(&meta_2->list, &kfence_freelist);
+ }
+ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+ kfence_metadata_init[i].addr = addr;
addr += 2 * PAGE_SIZE;
}
@@ -666,6 +680,7 @@ static unsigned long kfence_init_pool(void)
return 0;
reset_slab:
+ addr += 2 * i * PAGE_SIZE;
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
struct page *page;
diff --git a/mm/memfd.c b/mm/memfd.c
index ab5312aff14b..f032c6052926 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -456,7 +456,7 @@ err_name:
return ERR_PTR(error);
}
-static struct file *alloc_file(const char *name, unsigned int flags)
+struct file *memfd_alloc_file(const char *name, unsigned int flags)
{
unsigned int *file_seals;
struct file *file;
@@ -520,5 +520,5 @@ SYSCALL_DEFINE2(memfd_create,
return PTR_ERR(name);
fd_flags = (flags & MFD_CLOEXEC) ? O_CLOEXEC : 0;
- return FD_ADD(fd_flags, alloc_file(name, flags));
+ return FD_ADD(fd_flags, memfd_alloc_file(name, flags));
}
diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c
index 4f6ba63b4310..a34fccc23b6a 100644
--- a/mm/memfd_luo.c
+++ b/mm/memfd_luo.c
@@ -78,6 +78,7 @@
#include <linux/liveupdate.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
+#include <linux/memfd.h>
#include "internal.h"
static int memfd_luo_preserve_folios(struct file *file,
@@ -443,11 +444,11 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args)
if (!ser)
return -EINVAL;
- file = shmem_file_setup("", 0, VM_NORESERVE);
-
+ file = memfd_alloc_file("", 0);
if (IS_ERR(file)) {
pr_err("failed to setup file: %pe\n", file);
- return PTR_ERR(file);
+ err = PTR_ERR(file);
+ goto free_ser;
}
vfs_setpos(file, ser->pos, MAX_LFS_FILESIZE);
@@ -473,7 +474,8 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args)
put_file:
fput(file);
-
+free_ser:
+ kho_restore_free(ser);
return err;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c80c2907da33..cf0d526e6d41 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -692,6 +692,8 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
unsigned long poisoned_pfn, struct to_kill *tk)
{
unsigned long pfn = 0;
+ unsigned long hwpoison_vaddr;
+ unsigned long mask;
if (pte_present(pte)) {
pfn = pte_pfn(pte);
@@ -702,10 +704,12 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
pfn = softleaf_to_pfn(entry);
}
- if (!pfn || pfn != poisoned_pfn)
+ mask = ~((1UL << (shift - PAGE_SHIFT)) - 1);
+ if (!pfn || pfn != (poisoned_pfn & mask))
return 0;
- set_to_kill(tk, addr, shift);
+ hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT);
+ set_to_kill(tk, hwpoison_vaddr, shift);
return 1;
}
@@ -1883,12 +1887,22 @@ static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
return count;
}
-static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
+#define MF_HUGETLB_FREED 0 /* freed hugepage */
+#define MF_HUGETLB_IN_USED 1 /* in-use hugepage */
+#define MF_HUGETLB_NON_HUGEPAGE 2 /* not a hugepage */
+#define MF_HUGETLB_FOLIO_PRE_POISONED 3 /* folio already poisoned */
+#define MF_HUGETLB_PAGE_PRE_POISONED 4 /* exact page already poisoned */
+#define MF_HUGETLB_RETRY 5 /* hugepage is busy, retry */
+/*
+ * Set hugetlb folio as hwpoisoned, update folio private raw hwpoison list
+ * to keep track of the poisoned pages.
+ */
+static int hugetlb_update_hwpoison(struct folio *folio, struct page *page)
{
struct llist_head *head;
struct raw_hwp_page *raw_hwp;
struct raw_hwp_page *p;
- int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
+ int ret = folio_test_set_hwpoison(folio) ? MF_HUGETLB_FOLIO_PRE_POISONED : 0;
/*
* Once the hwpoison hugepage has lost reliable raw error info,
@@ -1896,20 +1910,17 @@ static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
* so skip to add additional raw error info.
*/
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
- return -EHWPOISON;
+ return MF_HUGETLB_FOLIO_PRE_POISONED;
head = raw_hwp_list_head(folio);
llist_for_each_entry(p, head->first, node) {
if (p->page == page)
- return -EHWPOISON;
+ return MF_HUGETLB_PAGE_PRE_POISONED;
}
raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
if (raw_hwp) {
raw_hwp->page = page;
llist_add(&raw_hwp->node, head);
- /* the first error event will be counted in action_result(). */
- if (ret)
- num_poisoned_pages_inc(page_to_pfn(page));
} else {
/*
* Failed to save raw error info. We no longer trace all
@@ -1957,42 +1968,39 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio)
/*
* Called from hugetlb code with hugetlb_lock held.
- *
- * Return values:
- * 0 - free hugepage
- * 1 - in-use hugepage
- * 2 - not a hugepage
- * -EBUSY - the hugepage is busy (try to retry)
- * -EHWPOISON - the hugepage is already hwpoisoned
*/
int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared)
{
struct page *page = pfn_to_page(pfn);
struct folio *folio = page_folio(page);
- int ret = 2; /* fallback to normal page handling */
bool count_increased = false;
+ int ret, rc;
- if (!folio_test_hugetlb(folio))
+ if (!folio_test_hugetlb(folio)) {
+ ret = MF_HUGETLB_NON_HUGEPAGE;
goto out;
-
- if (flags & MF_COUNT_INCREASED) {
- ret = 1;
+ } else if (flags & MF_COUNT_INCREASED) {
+ ret = MF_HUGETLB_IN_USED;
count_increased = true;
} else if (folio_test_hugetlb_freed(folio)) {
- ret = 0;
+ ret = MF_HUGETLB_FREED;
} else if (folio_test_hugetlb_migratable(folio)) {
- ret = folio_try_get(folio);
- if (ret)
+ if (folio_try_get(folio)) {
+ ret = MF_HUGETLB_IN_USED;
count_increased = true;
+ } else {
+ ret = MF_HUGETLB_FREED;
+ }
} else {
- ret = -EBUSY;
+ ret = MF_HUGETLB_RETRY;
if (!(flags & MF_NO_RETRY))
goto out;
}
- if (folio_set_hugetlb_hwpoison(folio, page)) {
- ret = -EHWPOISON;
+ rc = hugetlb_update_hwpoison(folio, page);
+ if (rc >= MF_HUGETLB_FOLIO_PRE_POISONED) {
+ ret = rc;
goto out;
}
@@ -2017,10 +2025,16 @@ out:
* with basic operations like hugepage allocation/free/demotion.
* So some of prechecks for hwpoison (pinning, and testing/setting
* PageHWPoison) should be done in single hugetlb_lock range.
+ * Returns:
+ * 0 - not hugetlb, or recovered
+ * -EBUSY - not recovered
+ * -EOPNOTSUPP - hwpoison_filter'ed
+ * -EHWPOISON - folio or exact page already poisoned
+ * -EFAULT - kill_accessing_process finds current->mm null
*/
static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
- int res;
+ int res, rv;
struct page *p = pfn_to_page(pfn);
struct folio *folio;
unsigned long page_flags;
@@ -2029,22 +2043,29 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
*hugetlb = 1;
retry:
res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
- if (res == 2) { /* fallback to normal page handling */
+ switch (res) {
+ case MF_HUGETLB_NON_HUGEPAGE: /* fallback to normal page handling */
*hugetlb = 0;
return 0;
- } else if (res == -EHWPOISON) {
- if (flags & MF_ACTION_REQUIRED) {
- folio = page_folio(p);
- res = kill_accessing_process(current, folio_pfn(folio), flags);
- }
- action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
- return res;
- } else if (res == -EBUSY) {
+ case MF_HUGETLB_RETRY:
if (!(flags & MF_NO_RETRY)) {
flags |= MF_NO_RETRY;
goto retry;
}
return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);
+ case MF_HUGETLB_FOLIO_PRE_POISONED:
+ case MF_HUGETLB_PAGE_PRE_POISONED:
+ rv = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ rv = kill_accessing_process(current, pfn, flags);
+ if (res == MF_HUGETLB_PAGE_PRE_POISONED)
+ action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
+ else
+ action_result(pfn, MF_MSG_HUGE, MF_FAILED);
+ return rv;
+ default:
+ WARN_ON((res != MF_HUGETLB_FREED) && (res != MF_HUGETLB_IN_USED));
+ break;
}
folio = page_folio(p);
@@ -2055,7 +2076,7 @@ retry:
if (migratable_cleared)
folio_set_hugetlb_migratable(folio);
folio_unlock(folio);
- if (res == 1)
+ if (res == MF_HUGETLB_IN_USED)
folio_put(folio);
return -EOPNOTSUPP;
}
@@ -2064,7 +2085,7 @@ retry:
* Handling free hugepage. The possible race with hugepage allocation
* or demotion can be prevented by PageHWPoison flag.
*/
- if (res == 0) {
+ if (res == MF_HUGETLB_FREED) {
folio_unlock(folio);
if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
diff --git a/mm/memremap.c b/mm/memremap.c
index 63c6ab4fdf08..ac7be07e3361 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -477,10 +477,43 @@ void free_zone_device_folio(struct folio *folio)
}
}
-void zone_device_page_init(struct page *page, unsigned int order)
+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,
+ unsigned int order)
{
+ struct page *new_page = page;
+ unsigned int i;
+
VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES);
+ for (i = 0; i < (1UL << order); ++i, ++new_page) {
+ struct folio *new_folio = (struct folio *)new_page;
+
+ /*
+ * new_page could have been part of previous higher order folio
+ * which encodes the order, in page + 1, in the flags bits. We
+ * blindly clear bits which could have set my order field here,
+ * including page head.
+ */
+ new_page->flags.f &= ~0xffUL; /* Clear possible order, page head */
+
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ /*
+ * This pointer math looks odd, but new_page could have been
+ * part of a previous higher order folio, which sets _nr_pages
+ * in page + 1 (new_page). Therefore, we use pointer casting to
+ * correctly locate the _nr_pages bits within new_page which
+ * could have modified by previous higher order folio.
+ */
+ ((struct folio *)(new_page - 1))->_nr_pages = 0;
+#endif
+
+ new_folio->mapping = NULL;
+ new_folio->pgmap = pgmap; /* Also clear compound head */
+ new_folio->share = 0; /* fsdax only, unused for device private */
+ VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio);
+ VM_WARN_ON_FOLIO(!folio_is_zone_device(new_folio), new_folio);
+ }
+
/*
* Drivers shouldn't be allocating pages after calling
* memunmap_pages().
diff --git a/mm/mm_init.c b/mm/mm_init.c
index fc2a6f1e518f..2a809cd8e7fa 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2059,7 +2059,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
*/
static unsigned long __init
deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
- struct zone *zone)
+ struct zone *zone, bool can_resched)
{
int nid = zone_to_nid(zone);
unsigned long nr_pages = 0;
@@ -2085,10 +2085,10 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
spfn = chunk_end;
- if (irqs_disabled())
- touch_nmi_watchdog();
- else
+ if (can_resched)
cond_resched();
+ else
+ touch_nmi_watchdog();
}
}
@@ -2101,7 +2101,7 @@ deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn,
{
struct zone *zone = arg;
- deferred_init_memmap_chunk(start_pfn, end_pfn, zone);
+ deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true);
}
static unsigned int __init
@@ -2216,7 +2216,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1);
nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
spfn = epfn, epfn += PAGES_PER_SECTION) {
- nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone);
+ nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false);
}
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index ec6c01378e9d..79af5f9f8b90 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -962,17 +962,29 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
* being freed).
*/
static long shmem_free_swap(struct address_space *mapping,
- pgoff_t index, void *radswap)
+ pgoff_t index, pgoff_t end, void *radswap)
{
- int order = xa_get_order(&mapping->i_pages, index);
- void *old;
+ XA_STATE(xas, &mapping->i_pages, index);
+ unsigned int nr_pages = 0;
+ pgoff_t base;
+ void *entry;
- old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
- if (old != radswap)
- return 0;
- free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);
+ xas_lock_irq(&xas);
+ entry = xas_load(&xas);
+ if (entry == radswap) {
+ nr_pages = 1 << xas_get_order(&xas);
+ base = round_down(xas.xa_index, nr_pages);
+ if (base < index || base + nr_pages - 1 > end)
+ nr_pages = 0;
+ else
+ xas_store(&xas, NULL);
+ }
+ xas_unlock_irq(&xas);
- return 1 << order;
+ if (nr_pages)
+ free_swap_and_cache_nr(radix_to_swp_entry(radswap), nr_pages);
+
+ return nr_pages;
}
/*
@@ -1124,8 +1136,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
if (xa_is_value(folio)) {
if (unfalloc)
continue;
- nr_swaps_freed += shmem_free_swap(mapping,
- indices[i], folio);
+ nr_swaps_freed += shmem_free_swap(mapping, indices[i],
+ end - 1, folio);
continue;
}
@@ -1191,14 +1203,30 @@ whole_folios:
folio = fbatch.folios[i];
if (xa_is_value(folio)) {
+ int order;
long swaps_freed;
if (unfalloc)
continue;
- swaps_freed = shmem_free_swap(mapping, indices[i], folio);
+ swaps_freed = shmem_free_swap(mapping, indices[i],
+ end - 1, folio);
if (!swaps_freed) {
- /* Swap was replaced by page: retry */
- index = indices[i];
+ pgoff_t base = indices[i];
+
+ order = shmem_confirm_swap(mapping, indices[i],
+ radix_to_swp_entry(folio));
+ /*
+ * If found a large swap entry cross the end or start
+ * border, skip it as the truncate_inode_partial_folio
+ * above should have at least zerod its content once.
+ */
+ if (order > 0) {
+ base = round_down(base, 1 << order);
+ if (base < start || base + (1 << order) > end)
+ continue;
+ }
+ /* Swap was replaced by page or extended, retry */
+ index = base;
break;
}
nr_swaps_freed += swaps_freed;
diff --git a/mm/swap.h b/mm/swap.h
index d034c13d8dd2..1bd466da3039 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -198,7 +198,7 @@ int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug);
void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug);
/* linux/mm/swap_state.c */
-extern struct address_space swap_space __ro_after_init;
+extern struct address_space swap_space __read_mostly;
static inline struct address_space *swap_address_space(swp_entry_t entry)
{
return &swap_space;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 5f97c6ae70a2..44d228982521 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -37,8 +37,7 @@ static const struct address_space_operations swap_aops = {
#endif
};
-/* Set swap_space as read only as swap cache is handled by swap table */
-struct address_space swap_space __ro_after_init = {
+struct address_space swap_space __read_mostly = {
.a_ops = &swap_aops,
};
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 628f96e83b11..e286c2d2068c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4322,7 +4322,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
if (want_init_on_free() || want_init_on_alloc(flags))
memset((void *)p + size, 0, old_size - size);
vm->requested_size = size;
- kasan_poison_vmalloc(p + size, old_size - size);
+ kasan_vrealloc(p, old_size, size);
return (void *)p;
}
@@ -4330,16 +4330,13 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
* We already have the bytes available in the allocation; use them.
*/
if (size <= alloced_size) {
- kasan_unpoison_vmalloc(p + old_size, size - old_size,
- KASAN_VMALLOC_PROT_NORMAL |
- KASAN_VMALLOC_VM_ALLOC |
- KASAN_VMALLOC_KEEP_TAG);
/*
* No need to zero memory here, as unused memory will have
* already been zeroed at initial allocation time or during
* realloc shrink time.
*/
vm->requested_size = size;
+ kasan_vrealloc(p, old_size, size);
return (void *)p;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index bcd73d9bd764..029e560e32ce 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2289,12 +2289,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
kfree_skb(skb);
out_xmit:
return ret;
@@ -2396,12 +2396,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
- DEV_STATS_INC(dev, tx_errors);
+ dev_core_stats_tx_dropped_inc(dev);
kfree_skb(skb);
out_xmit:
return ret;
diff --git a/net/core/gro.c b/net/core/gro.c
index ad326c7cdc0a..ef61695fbdbb 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -265,6 +265,8 @@ static void gro_complete(struct gro_node *gro, struct sk_buff *skb)
goto out;
}
+ /* NICs can feed encapsulated packets into GRO */
+ skb->encapsulation = 0;
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
if (ptype->type != type || !ptype->callbacks.gro_complete)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 212cde35affa..25c455c10a01 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -185,10 +185,6 @@ static void linkwatch_do_dev(struct net_device *dev)
netif_state_change(dev);
}
- /* Note: our callers are responsible for calling netdev_tracker_free().
- * This is the reason we use __dev_put() instead of dev_put().
- */
- __dev_put(dev);
}
static void __linkwatch_run_queue(int urgent_only)
@@ -243,6 +239,11 @@ static void __linkwatch_run_queue(int urgent_only)
netdev_lock_ops(dev);
linkwatch_do_dev(dev);
netdev_unlock_ops(dev);
+ /* Use __dev_put() because netdev_tracker_free() was already
+ * called above. Must be after netdev_unlock_ops() to prevent
+ * netdev_run_todo() from freeing the device while still in use.
+ */
+ __dev_put(dev);
do_dev--;
spin_lock_irq(&lweventlist_lock);
}
@@ -278,8 +279,13 @@ void __linkwatch_sync_dev(struct net_device *dev)
{
netdev_ops_assert_locked(dev);
- if (linkwatch_clean_dev(dev))
+ if (linkwatch_clean_dev(dev)) {
linkwatch_do_dev(dev);
+ /* Use __dev_put() because netdev_tracker_free() was already
+ * called inside linkwatch_clean_dev().
+ */
+ __dev_put(dev);
+ }
}
void linkwatch_sync_dev(struct net_device *dev)
@@ -288,6 +294,10 @@ void linkwatch_sync_dev(struct net_device *dev)
netdev_lock_ops(dev);
linkwatch_do_dev(dev);
netdev_unlock_ops(dev);
+ /* Use __dev_put() because netdev_tracker_free() was already
+ * called inside linkwatch_clean_dev().
+ */
+ __dev_put(dev);
}
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 70e0e9a3b650..7dbfa6109f0b 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -170,8 +170,14 @@ static const struct seq_operations softnet_seq_ops = {
.show = softnet_seq_show,
};
+struct ptype_iter_state {
+ struct seq_net_private p;
+ struct net_device *dev;
+};
+
static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
{
+ struct ptype_iter_state *iter = seq->private;
struct list_head *ptype_list = NULL;
struct packet_type *pt = NULL;
struct net_device *dev;
@@ -181,12 +187,16 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
for_each_netdev_rcu(seq_file_net(seq), dev) {
ptype_list = &dev->ptype_all;
list_for_each_entry_rcu(pt, ptype_list, list) {
- if (i == pos)
+ if (i == pos) {
+ iter->dev = dev;
return pt;
+ }
++i;
}
}
+ iter->dev = NULL;
+
list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) {
if (i == pos)
return pt;
@@ -218,6 +228,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
+ struct ptype_iter_state *iter = seq->private;
struct net *net = seq_file_net(seq);
struct net_device *dev;
struct packet_type *pt;
@@ -229,19 +240,21 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return ptype_get_idx(seq, 0);
pt = v;
- nxt = pt->list.next;
- if (pt->dev) {
- if (nxt != &pt->dev->ptype_all)
+ nxt = READ_ONCE(pt->list.next);
+ dev = iter->dev;
+ if (dev) {
+ if (nxt != &dev->ptype_all)
goto found;
- dev = pt->dev;
for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
- if (!list_empty(&dev->ptype_all)) {
- nxt = dev->ptype_all.next;
+ nxt = READ_ONCE(dev->ptype_all.next);
+ if (nxt != &dev->ptype_all) {
+ iter->dev = dev;
goto found;
}
}
- nxt = net->ptype_all.next;
+ iter->dev = NULL;
+ nxt = READ_ONCE(net->ptype_all.next);
goto net_ptype_all;
}
@@ -252,20 +265,20 @@ net_ptype_all:
if (nxt == &net->ptype_all) {
/* continue with ->ptype_specific if it's not empty */
- nxt = net->ptype_specific.next;
+ nxt = READ_ONCE(net->ptype_specific.next);
if (nxt != &net->ptype_specific)
goto found;
}
hash = 0;
- nxt = ptype_base[0].next;
+ nxt = READ_ONCE(ptype_base[0].next);
} else
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
while (nxt == &ptype_base[hash]) {
if (++hash >= PTYPE_HASH_SIZE)
return NULL;
- nxt = ptype_base[hash].next;
+ nxt = READ_ONCE(ptype_base[hash].next);
}
found:
return list_entry(nxt, struct packet_type, list);
@@ -279,19 +292,24 @@ static void ptype_seq_stop(struct seq_file *seq, void *v)
static int ptype_seq_show(struct seq_file *seq, void *v)
{
+ struct ptype_iter_state *iter = seq->private;
struct packet_type *pt = v;
+ struct net_device *dev;
- if (v == SEQ_START_TOKEN)
+ if (v == SEQ_START_TOKEN) {
seq_puts(seq, "Type Device Function\n");
- else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
- (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
+ return 0;
+ }
+ dev = iter->dev;
+ if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
+ (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) {
if (pt->type == htons(ETH_P_ALL))
seq_puts(seq, "ALL ");
else
seq_printf(seq, "%04x", ntohs(pt->type));
seq_printf(seq, " %-8s %ps\n",
- pt->dev ? pt->dev->name : "", pt->func);
+ dev ? dev->name : "", pt->func);
}
return 0;
@@ -315,7 +333,7 @@ static int __net_init dev_proc_net_init(struct net *net)
&softnet_seq_ops))
goto out_dev;
if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
- sizeof(struct seq_net_private)))
+ sizeof(struct ptype_iter_state)))
goto out_softnet;
if (wext_proc_init(net))
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index c5ed88bccbb4..5fae329795c8 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -901,9 +901,6 @@ ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
ctx->key_off = key_off;
ctx->priv_size = ops->rxfh_priv_size;
- ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
- ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
-
return ctx;
}
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 4dced53be4b3..da5934cceb07 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -824,8 +824,8 @@ rss_set_ctx_update(struct ethtool_rxfh_context *ctx, struct nlattr **tb,
static int
ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info)
{
- bool indir_reset = false, indir_mod, xfrm_sym = false;
struct rss_req_info *request = RSS_REQINFO(req_info);
+ bool indir_reset = false, indir_mod, xfrm_sym;
struct ethtool_rxfh_context *ctx = NULL;
struct net_device *dev = req_info->dev;
bool mod = false, fields_mod = false;
@@ -860,12 +860,7 @@ ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info)
rxfh.input_xfrm = data.input_xfrm;
ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod);
- /* For drivers which don't support input_xfrm it will be set to 0xff
- * in the RSS context info. In all other case input_xfrm != 0 means
- * symmetric hashing is requested.
- */
- if (!request->rss_context || ops->rxfh_per_ctx_key)
- xfrm_sym = rxfh.input_xfrm || data.input_xfrm;
+ xfrm_sym = rxfh.input_xfrm || data.input_xfrm;
if (rxfh.input_xfrm == data.input_xfrm)
rxfh.input_xfrm = RXH_XFRM_NO_CHANGE;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 174d38c70ac4..9880d608392b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1138,7 +1138,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
fib6_set_expires(iter, rt->expires);
fib6_add_gc_list(iter);
}
- if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT))) {
+ if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT)) &&
+ !iter->fib6_nh->fib_nh_gw_family) {
iter->fib6_flags &= ~RTF_ADDRCONF;
iter->fib6_flags &= ~RTF_PREFIX_RT;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index be4924aeaf0e..58d9940a596a 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5915,7 +5915,7 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
list_for_each_entry(catchall, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_active(ext, genmask))
+ if (nft_set_elem_active(ext, genmask))
continue;
nft_clear(ctx->net, ext);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 2a1c00048fd6..58e849c0acf4 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -161,10 +161,8 @@ next_knode:
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, hdata;
- if (skb_headroom(skb) + toff > INT_MAX)
- goto out;
-
- data = skb_header_pointer(skb, toff, 4, &hdata);
+ data = skb_header_pointer_careful(skb, toff, 4,
+ &hdata);
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
@@ -214,8 +212,9 @@ check_terminal:
if (ht->divisor) {
__be32 *data, hdata;
- data = skb_header_pointer(skb, off + n->sel.hoff, 4,
- &hdata);
+ data = skb_header_pointer_careful(skb,
+ off + n->sel.hoff,
+ 4, &hdata);
if (!data)
goto out;
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
@@ -229,7 +228,7 @@ check_terminal:
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, hdata;
- data = skb_header_pointer(skb,
+ data = skb_header_pointer_careful(skb,
off + n->sel.offoff,
2, &hdata);
if (!data)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 751904f10aab..970db62bd029 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1219,7 +1219,7 @@ void tipc_crypto_key_flush(struct tipc_crypto *c)
rx = c;
tx = tipc_net(rx->net)->crypto_tx;
if (cancel_delayed_work(&rx->work)) {
- kfree(rx->skey);
+ kfree_sensitive(rx->skey);
rx->skey = NULL;
atomic_xchg(&rx->key_distr, 0);
tipc_node_put(rx->node);
@@ -2394,7 +2394,7 @@ static void tipc_crypto_work_rx(struct work_struct *work)
break;
default:
synchronize_rcu();
- kfree(rx->skey);
+ kfree_sensitive(rx->skey);
rx->skey = NULL;
break;
}
diff --git a/rust/Makefile b/rust/Makefile
index 5d357dce1704..4dcc2eff51cb 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -383,6 +383,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
-fstrict-flex-arrays=% -fmin-function-alignment=% \
-fzero-init-padding-bits=% -mno-fdpic \
+ -fdiagnostics-show-context -fdiagnostics-show-context=% \
--param=% --param asan-% -fno-isolate-erroneous-paths-dereference
# Derived from `scripts/Makefile.clang`.
diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs
index 553d50265883..2daead125626 100644
--- a/rust/kernel/bits.rs
+++ b/rust/kernel/bits.rs
@@ -27,7 +27,8 @@ macro_rules! impl_bit_fn {
///
/// This version is the default and should be used if `n` is known at
/// compile time.
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub const fn [<bit_ $ty>](n: u32) -> $ty {
build_assert!(n < <$ty>::BITS);
(1 as $ty) << n
@@ -75,7 +76,8 @@ macro_rules! impl_genmask_fn {
/// This version is the default and should be used if the range is known
/// at compile time.
$(#[$genmask_ex])*
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub const fn [<genmask_ $ty>](range: RangeInclusive<u32>) -> $ty {
let start = *range.start();
let end = *range.end();
diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs
index 84d634201d90..1e8725eb44ed 100644
--- a/rust/kernel/fmt.rs
+++ b/rust/kernel/fmt.rs
@@ -6,7 +6,7 @@
pub use core::fmt::{Arguments, Debug, Error, Formatter, Result, Write};
-/// Internal adapter used to route allow implementations of formatting traits for foreign types.
+/// Internal adapter used to route and allow implementations of formatting traits for foreign types.
///
/// It is inserted automatically by the [`fmt!`] macro and is not meant to be used directly.
///
diff --git a/rust/kernel/num/bounded.rs b/rust/kernel/num/bounded.rs
index f870080af8ac..fa81acbdc8c2 100644
--- a/rust/kernel/num/bounded.rs
+++ b/rust/kernel/num/bounded.rs
@@ -40,11 +40,11 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
fits_within!(value, T, num_bits)
}
-/// An integer value that requires only the `N` less significant bits of the wrapped type to be
+/// An integer value that requires only the `N` least significant bits of the wrapped type to be
/// encoded.
///
/// This limits the number of usable bits in the wrapped integer type, and thus the stored value to
-/// a narrower range, which provides guarantees that can be useful when working with in e.g.
+/// a narrower range, which provides guarantees that can be useful when working within e.g.
/// bitfields.
///
/// # Invariants
@@ -56,7 +56,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// # Examples
///
/// The preferred way to create values is through constants and the [`Bounded::new`] family of
-/// constructors, as they trigger a build error if the type invariants cannot be withheld.
+/// constructors, as they trigger a build error if the type invariants cannot be upheld.
///
/// ```
/// use kernel::num::Bounded;
@@ -82,7 +82,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// ```
/// use kernel::num::Bounded;
///
-/// // This succeeds because `15` can be represented with 4 unsigned bits.
+/// // This succeeds because `15` can be represented with 4 unsigned bits.
/// assert!(Bounded::<u8, 4>::try_new(15).is_some());
///
/// // This fails because `16` cannot be represented with 4 unsigned bits.
@@ -221,7 +221,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// let v: Option<Bounded<u16, 8>> = 128u32.try_into_bounded();
/// assert_eq!(v.as_deref().copied(), Some(128));
///
-/// // Fails because `128` doesn't fits into 6 bits.
+/// // Fails because `128` doesn't fit into 6 bits.
/// let v: Option<Bounded<u16, 6>> = 128u32.try_into_bounded();
/// assert_eq!(v, None);
/// ```
@@ -259,9 +259,9 @@ macro_rules! impl_const_new {
assert!(fits_within!(VALUE, $type, N));
}
- // INVARIANT: `fits_within` confirmed that `VALUE` can be represented within
+ // SAFETY: `fits_within` confirmed that `VALUE` can be represented within
// `N` bits.
- Self::__new(VALUE)
+ unsafe { Self::__new(VALUE) }
}
}
)*
@@ -282,9 +282,10 @@ where
/// All instances of [`Bounded`] must be created through this method as it enforces most of the
/// type invariants.
///
- /// The caller remains responsible for checking, either statically or dynamically, that `value`
- /// can be represented as a `T` using at most `N` bits.
- const fn __new(value: T) -> Self {
+ /// # Safety
+ ///
+ /// The caller must ensure that `value` can be represented within `N` bits.
+ const unsafe fn __new(value: T) -> Self {
// Enforce the type invariants.
const {
// `N` cannot be zero.
@@ -293,6 +294,7 @@ where
assert!(N <= T::BITS);
}
+ // INVARIANT: The caller ensures `value` fits within `N` bits.
Self(value)
}
@@ -328,8 +330,8 @@ where
/// ```
pub fn try_new(value: T) -> Option<Self> {
fits_within(value, N).then(|| {
- // INVARIANT: `fits_within` confirmed that `value` can be represented within `N` bits.
- Self::__new(value)
+ // SAFETY: `fits_within` confirmed that `value` can be represented within `N` bits.
+ unsafe { Self::__new(value) }
})
}
@@ -363,6 +365,7 @@ where
/// assert_eq!(Bounded::<u8, 1>::from_expr(1).get(), 1);
/// assert_eq!(Bounded::<u16, 8>::from_expr(0xff).get(), 0xff);
/// ```
+ // Always inline to optimize out error path of `build_assert`.
#[inline(always)]
pub fn from_expr(expr: T) -> Self {
crate::build_assert!(
@@ -370,8 +373,8 @@ where
"Requested value larger than maximal representable value."
);
- // INVARIANT: `fits_within` confirmed that `expr` can be represented within `N` bits.
- Self::__new(expr)
+ // SAFETY: `fits_within` confirmed that `expr` can be represented within `N` bits.
+ unsafe { Self::__new(expr) }
}
/// Returns the wrapped value as the backing type.
@@ -410,9 +413,9 @@ where
);
}
- // INVARIANT: The value did fit within `N` bits, so it will all the more fit within
+ // SAFETY: The value did fit within `N` bits, so it will all the more fit within
// the larger `M` bits.
- Bounded::__new(self.0)
+ unsafe { Bounded::__new(self.0) }
}
/// Attempts to shrink the number of bits usable for `self`.
@@ -466,9 +469,9 @@ where
// `U` and `T` have the same sign, hence this conversion cannot fail.
let value = unsafe { U::try_from(self.get()).unwrap_unchecked() };
- // INVARIANT: Although the backing type has changed, the value is still represented within
+ // SAFETY: Although the backing type has changed, the value is still represented within
// `N` bits, and with the same signedness.
- Bounded::__new(value)
+ unsafe { Bounded::__new(value) }
}
}
@@ -501,7 +504,7 @@ where
/// let v: Option<Bounded<u16, 8>> = 128u32.try_into_bounded();
/// assert_eq!(v.as_deref().copied(), Some(128));
///
-/// // Fails because `128` doesn't fits into 6 bits.
+/// // Fails because `128` doesn't fit into 6 bits.
/// let v: Option<Bounded<u16, 6>> = 128u32.try_into_bounded();
/// assert_eq!(v, None);
/// ```
@@ -944,9 +947,9 @@ macro_rules! impl_from_primitive {
Self: AtLeastXBits<{ <$type as Integer>::BITS as usize }>,
{
fn from(value: $type) -> Self {
- // INVARIANT: The trait bound on `Self` guarantees that `N` bits is
+ // SAFETY: The trait bound on `Self` guarantees that `N` bits is
// enough to hold any value of the source type.
- Self::__new(T::from(value))
+ unsafe { Self::__new(T::from(value)) }
}
}
)*
@@ -1051,8 +1054,8 @@ where
T: Integer + From<bool>,
{
fn from(value: bool) -> Self {
- // INVARIANT: A boolean can be represented using a single bit, and thus fits within any
+ // SAFETY: A boolean can be represented using a single bit, and thus fits within any
// integer type for any `N` > 0.
- Self::__new(T::from(value))
+ unsafe { Self::__new(T::from(value)) }
}
}
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
index 4729eb56827a..312cecab72e7 100644
--- a/rust/kernel/rbtree.rs
+++ b/rust/kernel/rbtree.rs
@@ -985,7 +985,7 @@ impl<'a, K, V> CursorMut<'a, K, V> {
self.peek(Direction::Prev)
}
- /// Access the previous node without moving the cursor.
+ /// Access the next node without moving the cursor.
pub fn peek_next(&self) -> Option<(&K, &V)> {
self.peek(Direction::Next)
}
@@ -1130,7 +1130,7 @@ pub struct IterMut<'a, K, V> {
}
// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
-// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// The iterator only gives out immutable references to the keys, but since the iterator has exclusive access to those same
// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
index 45a17985cda4..0fca1ba3c2db 100644
--- a/rust/kernel/sync/atomic/predefine.rs
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -35,12 +35,23 @@ unsafe impl super::AtomicAdd<i64> for i64 {
// as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to
// `isize_atomic_repr`, which also always implements `AtomicImpl`.
#[allow(non_camel_case_types)]
+#[cfg(not(testlib))]
#[cfg(not(CONFIG_64BIT))]
type isize_atomic_repr = i32;
#[allow(non_camel_case_types)]
+#[cfg(not(testlib))]
#[cfg(CONFIG_64BIT)]
type isize_atomic_repr = i64;
+#[allow(non_camel_case_types)]
+#[cfg(testlib)]
+#[cfg(target_pointer_width = "32")]
+type isize_atomic_repr = i32;
+#[allow(non_camel_case_types)]
+#[cfg(testlib)]
+#[cfg(target_pointer_width = "64")]
+type isize_atomic_repr = i64;
+
// Ensure size and alignment requirements are checked.
static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>());
static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>());
diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs
index 19236a5bccde..6c7ae8b05a0b 100644
--- a/rust/kernel/sync/refcount.rs
+++ b/rust/kernel/sync/refcount.rs
@@ -23,7 +23,8 @@ impl Refcount {
/// Construct a new [`Refcount`] from an initial value.
///
/// The initial value should be non-saturated.
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub fn new(value: i32) -> Self {
build_assert!(value >= 0, "initial value saturated");
// SAFETY: There are no safety requirements for this FFI call.
diff --git a/rust/macros/fmt.rs b/rust/macros/fmt.rs
index 2f4b9f6e2211..8354abd54502 100644
--- a/rust/macros/fmt.rs
+++ b/rust/macros/fmt.rs
@@ -67,7 +67,7 @@ pub(crate) fn fmt(input: TokenStream) -> TokenStream {
}
(None, acc)
})();
- args.extend(quote_spanned!(first_span => #lhs #adapter(&#rhs)));
+ args.extend(quote_spanned!(first_span => #lhs #adapter(&(#rhs))));
}
};
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index b38002151871..33f66e86418a 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -59,7 +59,7 @@ use proc_macro::TokenStream;
///
/// # Examples
///
-/// ```
+/// ```ignore
/// use kernel::prelude::*;
///
/// module!{
diff --git a/rust/proc-macro2/lib.rs b/rust/proc-macro2/lib.rs
index 7b78d065d51c..5d408943fa0d 100644
--- a/rust/proc-macro2/lib.rs
+++ b/rust/proc-macro2/lib.rs
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
+// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT`
+// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is
+// touched by Kconfig when the version string from the compiler changes.
+
//! [![github]](https://github.com/dtolnay/proc-macro2)&ensp;[![crates-io]](https://crates.io/crates/proc-macro2)&ensp;[![docs-rs]](crate)
//!
//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5037f4715d74..32e209bc7985 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -166,11 +166,13 @@ else ifeq ($(KBUILD_CHECKSRC),2)
cmd_force_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $<
endif
+ifeq ($(KBUILD_EXTMOD),)
ifneq ($(KBUILD_EXTRA_WARN),)
cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(KDOCFLAGS) \
$(if $(findstring 2, $(KBUILD_EXTRA_WARN)), -Wall) \
$<
endif
+endif
# Compile C sources (.c)
# ---------------------------------------------------------------------------
@@ -356,7 +358,7 @@ $(obj)/%.o: $(obj)/%.rs FORCE
quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
cmd_rustc_rsi_rs = \
$(rust_common_cmd) -Zunpretty=expanded $< >$@; \
- command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@
+ command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) --config-path $(srctree)/.rustfmt.toml $@
$(obj)/%.rsi: $(obj)/%.rs FORCE
+$(call if_changed_dep,rustc_rsi_rs)
diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux
index cd788cac9d91..276c3134a563 100644
--- a/scripts/Makefile.vmlinux
+++ b/scripts/Makefile.vmlinux
@@ -113,7 +113,8 @@ vmlinux: vmlinux.unstripped FORCE
# what kmod expects to parse.
quiet_cmd_modules_builtin_modinfo = GEN $@
cmd_modules_builtin_modinfo = $(cmd_objcopy); \
- sed -i 's/\x00\+$$/\x00/g' $@
+ sed -i 's/\x00\+$$/\x00/g' $@; \
+ chmod -x $@
OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
index 147d0cc94068..766c2d91cd81 100755
--- a/scripts/generate_rust_analyzer.py
+++ b/scripts/generate_rust_analyzer.py
@@ -61,7 +61,6 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
display_name,
deps,
cfg=[],
- edition="2021",
):
append_crate(
display_name,
@@ -69,13 +68,37 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
deps,
cfg,
is_workspace_member=False,
- edition=edition,
+ # Miguel Ojeda writes:
+ #
+ # > ... in principle even the sysroot crates may have different
+ # > editions.
+ # >
+ # > For instance, in the move to 2024, it seems all happened at once
+ # > in 1.87.0 in these upstream commits:
+ # >
+ # > 0e071c2c6a58 ("Migrate core to Rust 2024")
+ # > f505d4e8e380 ("Migrate alloc to Rust 2024")
+ # > 0b2489c226c3 ("Migrate proc_macro to Rust 2024")
+ # > 993359e70112 ("Migrate std to Rust 2024")
+ # >
+ # > But in the previous move to 2021, `std` moved in 1.59.0, while
+ # > the others in 1.60.0:
+ # >
+ # > b656384d8398 ("Update stdlib to the 2021 edition")
+ # > 06a1c14d52a8 ("Switch all libraries to the 2021 edition")
+ #
+ # Link: https://lore.kernel.org/all/CANiq72kd9bHdKaAm=8xCUhSHMy2csyVed69bOc4dXyFAW4sfuw@mail.gmail.com/
+ #
+ # At the time of writing all rust versions we support build the
+ # sysroot crates with the same edition. We may need to relax this
+ # assumption if future edition moves span multiple rust versions.
+ edition=core_edition,
)
# NB: sysroot crates reexport items from one another so setting up our transitive dependencies
# here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth
# for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`.
- append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []), edition=core_edition)
+ append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []))
append_sysroot_crate("alloc", ["core"])
append_sysroot_crate("std", ["alloc", "core"])
append_sysroot_crate("proc_macro", ["core", "std"])
@@ -83,7 +106,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
append_crate(
"compiler_builtins",
srctree / "rust" / "compiler_builtins.rs",
- [],
+ ["core"],
)
append_crate(
@@ -96,14 +119,15 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
append_crate(
"quote",
srctree / "rust" / "quote" / "lib.rs",
- ["alloc", "proc_macro", "proc_macro2"],
+ ["core", "alloc", "std", "proc_macro", "proc_macro2"],
cfg=crates_cfgs["quote"],
+ edition="2018",
)
append_crate(
"syn",
srctree / "rust" / "syn" / "lib.rs",
- ["proc_macro", "proc_macro2", "quote"],
+ ["std", "proc_macro", "proc_macro2", "quote"],
cfg=crates_cfgs["syn"],
)
@@ -123,7 +147,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
append_crate(
"pin_init_internal",
srctree / "rust" / "pin-init" / "internal" / "src" / "lib.rs",
- [],
+ ["std", "proc_macro"],
cfg=["kernel"],
is_proc_macro=True,
)
@@ -131,7 +155,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
append_crate(
"pin_init",
srctree / "rust" / "pin-init" / "src" / "lib.rs",
- ["core", "pin_init_internal", "macros"],
+ ["core", "compiler_builtins", "pin_init_internal", "macros"],
cfg=["kernel"],
)
@@ -190,7 +214,7 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs, core_edit
append_crate(
name,
path,
- ["core", "kernel"],
+ ["core", "kernel", "pin_init"],
cfg=cfg,
)
@@ -213,9 +237,6 @@ def main():
level=logging.INFO if args.verbose else logging.WARNING
)
- # Making sure that the `sysroot` and `sysroot_src` belong to the same toolchain.
- assert args.sysroot in args.sysroot_src.parents
-
rust_project = {
"crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs, args.core_edition),
"sysroot": str(args.sysroot),
diff --git a/scripts/livepatch/klp-build b/scripts/livepatch/klp-build
index 882272120c9e..a73515a82272 100755
--- a/scripts/livepatch/klp-build
+++ b/scripts/livepatch/klp-build
@@ -555,13 +555,11 @@ copy_orig_objects() {
local file_dir="$(dirname "$file")"
local orig_file="$ORIG_DIR/$rel_file"
local orig_dir="$(dirname "$orig_file")"
- local cmd_file="$file_dir/.$(basename "$file").cmd"
[[ ! -f "$file" ]] && die "missing $(basename "$file") for $_file"
mkdir -p "$orig_dir"
cp -f "$file" "$orig_dir"
- [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$orig_dir"
done
xtrace_restore
@@ -740,15 +738,17 @@ build_patch_module() {
local orig_dir="$(dirname "$orig_file")"
local kmod_file="$KMOD_DIR/$rel_file"
local kmod_dir="$(dirname "$kmod_file")"
- local cmd_file="$orig_dir/.$(basename "$file").cmd"
+ local cmd_file="$kmod_dir/.$(basename "$file").cmd"
mkdir -p "$kmod_dir"
cp -f "$file" "$kmod_dir"
- [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$kmod_dir"
# Tell kbuild this is a prebuilt object
cp -f "$file" "${kmod_file}_shipped"
+ # Make modpost happy
+ touch "$cmd_file"
+
echo -n " $rel_file" >> "$makefile"
done
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index 98f206cb7c60..0f1c8de1bd95 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -2,6 +2,8 @@
%{!?_arch: %define _arch dummy}
%{!?make: %define make make}
%define makeflags %{?_smp_mflags} ARCH=%{ARCH}
+%define __spec_install_post /usr/lib/rpm/brp-compress || :
+%define debug_package %{nil}
Name: kernel
Summary: The Linux Kernel
@@ -46,34 +48,12 @@ against the %{version} kernel package.
%endif
%if %{with_debuginfo}
-# list of debuginfo-related options taken from distribution kernel.spec
-# files
-%undefine _include_minidebuginfo
-%undefine _find_debuginfo_dwz_opts
-%undefine _unique_build_ids
-%undefine _unique_debug_names
-%undefine _unique_debug_srcs
-%undefine _debugsource_packages
-%undefine _debuginfo_subpackages
-%global _find_debuginfo_opts -r
-%global _missing_build_ids_terminate_build 1
-%global _no_recompute_build_ids 1
-%{debug_package}
+%package debuginfo
+Summary: Debug information package for the Linux kernel
+%description debuginfo
+This package provides debug information for the kernel image and modules from the
+%{version} package.
%endif
-# some (but not all) versions of rpmbuild emit %%debug_package with
-# %%install. since we've already emitted it manually, that would cause
-# a package redefinition error. ensure that doesn't happen
-%define debug_package %{nil}
-
-# later, we make all modules executable so that find-debuginfo.sh strips
-# them up. but they don't actually need to be executable, so remove the
-# executable bit, taking care to do it _after_ find-debuginfo.sh has run
-%define __spec_install_post \
- %{?__debug_package:%{__debug_install_post}} \
- %{__arch_install_post} \
- %{__os_install_post} \
- find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \\\
- | xargs --no-run-if-empty chmod u-x
%prep
%setup -q -n linux
@@ -87,7 +67,7 @@ patch -p1 < %{SOURCE2}
mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
# DEPMOD=true makes depmod no-op. We do not package depmod-generated files.
-%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install
+%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_STRIP=1 DEPMOD=true modules_install
%{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
@@ -118,22 +98,31 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
echo "%exclude /lib/modules/%{KERNELRELEASE}/build"
} > %{buildroot}/kernel.list
-# make modules executable so that find-debuginfo.sh strips them. this
-# will be undone later in %%__spec_install_post
-find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \
- | xargs --no-run-if-empty chmod u+x
-
%if %{with_debuginfo}
# copying vmlinux directly to the debug directory means it will not get
# stripped (but its source paths will still be collected + fixed up)
mkdir -p %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE}
cp vmlinux %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE}
+
+echo /usr/lib/debug/lib/modules/%{KERNELRELEASE}/vmlinux > %{buildroot}/debuginfo.list
+
+while read -r mod; do
+ mod="${mod%.o}.ko"
+ dbg="%{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE}/kernel/${mod}"
+ buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
+ link="%{buildroot}/usr/lib/debug/.build-id/${buildid}.debug"
+
+ mkdir -p "${dbg%/*}" "${link%/*}"
+ "${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}"
+ ln -sf --relative "${dbg}" "${link}"
+
+ echo "${dbg#%{buildroot}}" >> %{buildroot}/debuginfo.list
+ echo "${link#%{buildroot}}" >> %{buildroot}/debuginfo.list
+done < modules.order
%endif
%clean
rm -rf %{buildroot}
-rm -f debugfiles.list debuglinks.list debugsourcefiles.list debugsources.list \
- elfbins.list
%post
if [ -x /usr/bin/kernel-install ]; then
@@ -172,3 +161,9 @@ fi
/usr/src/kernels/%{KERNELRELEASE}
/lib/modules/%{KERNELRELEASE}/build
%endif
+
+%if %{with_debuginfo}
+%files -f %{buildroot}/debuginfo.list debuginfo
+%defattr (-, root, root)
+%exclude /debuginfo.list
+%endif
diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs
index be0561049660..6fd9f5c84e2e 100644
--- a/scripts/rustdoc_test_gen.rs
+++ b/scripts/rustdoc_test_gen.rs
@@ -206,7 +206,7 @@ pub extern "C" fn {kunit_name}(__kunit_test: *mut ::kernel::bindings::kunit) {{
/// The anchor where the test code body starts.
#[allow(unused)]
- static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 1;
+ static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 2;
{{
#![allow(unreachable_pub, clippy::disallowed_names)]
{body}
diff --git a/security/lsm.h b/security/lsm.h
index 81aadbc61685..db77cc83e158 100644
--- a/security/lsm.h
+++ b/security/lsm.h
@@ -37,15 +37,6 @@ int lsm_task_alloc(struct task_struct *task);
/* LSM framework initializers */
-#ifdef CONFIG_MMU
-int min_addr_init(void);
-#else
-static inline int min_addr_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_MMU */
-
#ifdef CONFIG_SECURITYFS
int securityfs_init(void);
#else
diff --git a/security/lsm_init.c b/security/lsm_init.c
index 05bd52e6b1f2..573e2a7250c4 100644
--- a/security/lsm_init.c
+++ b/security/lsm_init.c
@@ -489,12 +489,7 @@ int __init security_init(void)
*/
static int __init security_initcall_pure(void)
{
- int rc_adr, rc_lsm;
-
- rc_adr = min_addr_init();
- rc_lsm = lsm_initcall(pure);
-
- return (rc_adr ? rc_adr : rc_lsm);
+ return lsm_initcall(pure);
}
pure_initcall(security_initcall_pure);
diff --git a/security/min_addr.c b/security/min_addr.c
index 0fde5ec9abc8..56e4f9d25929 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -5,8 +5,6 @@
#include <linux/sysctl.h>
#include <linux/minmax.h>
-#include "lsm.h"
-
/* amount of vm to protect from userspace access by both DAC and the LSM*/
unsigned long mmap_min_addr;
/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
@@ -54,10 +52,11 @@ static const struct ctl_table min_addr_sysctl_table[] = {
},
};
-int __init min_addr_init(void)
+static int __init mmap_min_addr_init(void)
{
register_sysctl_init("vm", min_addr_sysctl_table);
update_mmap_min_addr();
return 0;
}
+pure_initcall(mmap_min_addr_init);
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index cafa48b5aceb..0a0496deb9c8 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -3383,11 +3383,22 @@ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream,
int action)
{
+ static const struct coef_fw dis_coefs[] = {
+ WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203),
+ WRITE_COEF(0x28, 0x0004), WRITE_COEF(0x29, 0xb023),
+ }; /* Disable AMP silence detection */
+ static const struct coef_fw en_coefs[] = {
+ WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203),
+ WRITE_COEF(0x28, 0x0084), WRITE_COEF(0x29, 0xb023),
+ }; /* Enable AMP silence detection */
+
switch (action) {
case HDA_GEN_PCM_ACT_OPEN:
+ alc_process_coef_fw(codec, dis_coefs);
alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */
break;
case HDA_GEN_PCM_ACT_CLOSE:
+ alc_process_coef_fw(codec, en_coefs);
alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */
break;
}
@@ -6739,6 +6750,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c8f, "HP EliteBook 630 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -7336,6 +7348,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1d05, 0x300f, "TongFang X6AR5xxY", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1d05, 0x3019, "TongFang X6FR5xxY", ALC2XX_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d05, 0x3031, "TongFang X6AR55xU", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -7346,6 +7359,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x2039, 0x0001, "Inspur S14-G1", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
@@ -7805,10 +7819,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60140},
{0x19, 0x04a11030},
{0x21, 0x04211020}),
- SND_HDA_PIN_QUIRK(0x10ec0274, 0x1d05, "TongFang", ALC274_FIXUP_HP_HEADSET_MIC,
- {0x17, 0x90170110},
- {0x19, 0x03a11030},
- {0x21, 0x03211020}),
SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT,
ALC282_STANDARD_PINS,
{0x12, 0x90a609c0},
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 0294177acc66..c18da0915baa 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -545,6 +545,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK PM1503CDA"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
}
@@ -675,6 +682,14 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "GOH-X"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "RB"),
+ DMI_MATCH(DMI_BOARD_NAME, "XyloD5_RBU"),
+ }
+ },
+
{}
};
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index e33f11435980..7aa558d6362f 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -453,7 +453,7 @@ static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("ASP_TX2", NULL, 1, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX2_EN_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("ASP_TX3", NULL, 2, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX3_EN_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("ASP_TX4", NULL, 3, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX4_EN_SHIFT, 0),
- SND_SOC_DAPM_AIF_OUT("ASP_TX5", NULL, 3, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX5_EN_SHIFT, 0),
+ SND_SOC_DAPM_AIF_OUT("ASP_TX5", NULL, 4, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX5_EN_SHIFT, 0),
SND_SOC_DAPM_MUX("ASP_TX1 Source", SND_SOC_NOPM, 0, 0, &cs35l45_asp_muxes[0]),
SND_SOC_DAPM_MUX("ASP_TX2 Source", SND_SOC_NOPM, 0, 0, &cs35l45_asp_muxes[1]),
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 28699d7b75ca..05b4e971a366 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -346,7 +346,6 @@ static int imx_aif_hw_params(struct snd_pcm_substream *substream,
SND_SOC_DAIFMT_PDM;
} else {
slots = 2;
- slot_width = params_physical_width(params);
fmt = (rtd->dai_link->dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) |
SND_SOC_DAIFMT_I2S;
}
diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
index 774fff58d51b..fce50fd9f093 100644
--- a/sound/soc/intel/boards/sof_es8336.c
+++ b/sound/soc/intel/boards/sof_es8336.c
@@ -120,7 +120,7 @@ static void pcm_pop_work_events(struct work_struct *work)
gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
if (quirk & SOF_ES8336_HEADPHONE_GPIO)
- gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
+ gpiod_set_value_cansleep(priv->gpio_headphone, !priv->speaker_en);
}
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 8721a098d53f..50b838be24e9 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -838,6 +838,7 @@ static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = {
SND_PCI_QUIRK(0x17aa, 0x2347, "Lenovo P16", SOC_SDW_CODEC_MIC),
SND_PCI_QUIRK(0x17aa, 0x2348, "Lenovo P16", SOC_SDW_CODEC_MIC),
SND_PCI_QUIRK(0x17aa, 0x2349, "Lenovo P1", SOC_SDW_CODEC_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3821, "Lenovo 0x3821", SOC_SDW_SIDECAR_AMPS),
{}
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
index 060955825fe0..e297c8ecedb7 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
@@ -442,7 +442,7 @@ static const struct snd_soc_acpi_adr_device rt1320_2_group2_adr[] = {
.adr = 0x000230025D132001ull,
.num_endpoints = 1,
.endpoints = &spk_r_endpoint,
- .name_prefix = "rt1320-1"
+ .name_prefix = "rt1320-2"
}
};
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 3f7999317f4d..719ec727efd4 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -197,7 +197,8 @@ static bool is_rust_noreturn(const struct symbol *func)
* as well as changes to the source code itself between versions (since
* these come from the Rust standard library).
*/
- return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ return str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") ||
+ str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
str_ends_with(func->name, "_4core6option13expect_failed") ||
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
diff --git a/tools/objtool/disas.c b/tools/objtool/disas.c
index 2b5059f55e40..26f08d41f2b1 100644
--- a/tools/objtool/disas.c
+++ b/tools/objtool/disas.c
@@ -108,6 +108,8 @@ static int sprint_name(char *str, const char *name, unsigned long offset)
#define DINFO_FPRINTF(dinfo, ...) \
((*(dinfo)->fprintf_func)((dinfo)->stream, __VA_ARGS__))
+#define bfd_vma_fmt \
+ __builtin_choose_expr(sizeof(bfd_vma) == sizeof(unsigned long), "%#lx <%s>", "%#llx <%s>")
static int disas_result_fprintf(struct disas_context *dctx,
const char *fmt, va_list ap)
@@ -170,10 +172,10 @@ static void disas_print_addr_sym(struct section *sec, struct symbol *sym,
if (sym) {
sprint_name(symstr, sym->name, addr - sym->offset);
- DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr);
} else {
str = offstr(sec, addr);
- DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str);
free(str);
}
}
@@ -252,7 +254,7 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo)
* example: "lea 0x0(%rip),%rdi". The kernel can reference
* the next IP with _THIS_IP_ macro.
*/
- DINFO_FPRINTF(dinfo, "0x%lx <_THIS_IP_>", addr);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, "_THIS_IP_");
return;
}
@@ -264,11 +266,11 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo)
*/
if (reloc->sym->type == STT_SECTION) {
str = offstr(reloc->sym->sec, reloc->sym->offset + offset);
- DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str);
free(str);
} else {
sprint_name(symstr, reloc->sym->name, offset);
- DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr);
}
}
@@ -311,7 +313,7 @@ static void disas_print_address(bfd_vma addr, struct disassemble_info *dinfo)
*/
sym = insn_call_dest(insn);
if (sym && (sym->offset == addr || (sym->offset == 0 && is_reloc))) {
- DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, sym->name);
+ DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, sym->name);
return;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 6a8ed9c62323..2c02c7b49265 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -18,15 +18,14 @@
#include <errno.h>
#include <libgen.h>
#include <ctype.h>
+#include <linux/align.h>
+#include <linux/kernel.h>
#include <linux/interval_tree_generic.h>
+#include <linux/log2.h>
#include <objtool/builtin.h>
#include <objtool/elf.h>
#include <objtool/warn.h>
-#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
-#define ALIGN_UP_POW2(x) (1U << ((8 * sizeof(x)) - __builtin_clz((x) - 1U)))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
static inline u32 str_hash(const char *str)
{
return jhash(str, strlen(str), 0);
@@ -1336,7 +1335,7 @@ unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char
return -1;
}
- offset = ALIGN_UP(strtab->sh.sh_size, strtab->sh.sh_addralign);
+ offset = ALIGN(strtab->sh.sh_size, strtab->sh.sh_addralign);
if (!elf_add_data(elf, strtab, str, strlen(str) + 1))
return -1;
@@ -1378,7 +1377,7 @@ void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_
sec->data->d_size = size;
sec->data->d_align = 1;
- offset = ALIGN_UP(sec->sh.sh_size, sec->sh.sh_addralign);
+ offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign);
sec->sh.sh_size = offset + size;
mark_sec_changed(elf, sec, true);
@@ -1502,7 +1501,7 @@ static int elf_alloc_reloc(struct elf *elf, struct section *rsec)
rsec->data->d_size = nr_relocs_new * elf_rela_size(elf);
rsec->sh.sh_size = rsec->data->d_size;
- nr_alloc = MAX(64, ALIGN_UP_POW2(nr_relocs_new));
+ nr_alloc = max(64UL, roundup_pow_of_two(nr_relocs_new));
if (nr_alloc <= rsec->nr_alloc_relocs)
return 0;
diff --git a/tools/objtool/klp-diff.c b/tools/objtool/klp-diff.c
index 4d1f9e9977eb..d94531e3f64e 100644
--- a/tools/objtool/klp-diff.c
+++ b/tools/objtool/klp-diff.c
@@ -1425,9 +1425,6 @@ static int clone_special_sections(struct elfs *e)
{
struct section *patched_sec;
- if (create_fake_symbols(e->patched))
- return -1;
-
for_each_sec(e->patched, patched_sec) {
if (is_special_section(patched_sec)) {
if (clone_special_section(e, patched_sec))
@@ -1704,6 +1701,17 @@ int cmd_klp_diff(int argc, const char **argv)
if (!e.out)
return -1;
+ /*
+ * Special section fake symbols are needed so that individual special
+ * section entries can be extracted by clone_special_sections().
+ *
+ * Note the fake symbols are also needed by clone_included_functions()
+ * because __WARN_printf() call sites add references to bug table
+ * entries in the calling functions.
+ */
+ if (create_fake_symbols(e.patched))
+ return -1;
+
if (clone_included_functions(&e))
return -1;
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index ba5c2b643efa..d45bf4ccb3bf 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -251,6 +251,7 @@ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
+ -U_FORTIFY_SOURCE \
-fno-builtin-memcmp -fno-builtin-memcpy \
-fno-builtin-memset -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -fno-strict-aliasing \
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index a39fdc4aa2ff..9b722c1e4b0f 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -162,6 +162,39 @@ run_test() {
echo " ok"
}
+run_test_csum() {
+ local -r msg="$1"
+ local -r dst="$2"
+ local csum_error_filter=UdpInCsumErrors
+ local csum_errors
+
+ printf "%-40s" "$msg"
+
+ is_ipv6 "$dst" && csum_error_filter=Udp6InCsumErrors
+
+ ip netns exec "$NS_DST" iperf3 -s -1 >/dev/null &
+ wait_local_port_listen "$NS_DST" 5201 tcp
+ local spid="$!"
+ ip netns exec "$NS_SRC" iperf3 -c "$dst" -t 2 >/dev/null
+ local retc="$?"
+ wait "$spid"
+ local rets="$?"
+ if [ "$rets" -ne 0 ] || [ "$retc" -ne 0 ]; then
+ echo " fail client exit code $retc, server $rets"
+ ret=1
+ return
+ fi
+
+ csum_errors=$(ip netns exec "$NS_DST" nstat -as "$csum_error_filter" |
+ grep "$csum_error_filter" | awk '{print $2}')
+ if [ -n "$csum_errors" ] && [ "$csum_errors" -gt 0 ]; then
+ echo " fail - csum error on receive $csum_errors, expected 0"
+ ret=1
+ return
+ fi
+ echo " ok"
+}
+
run_bench() {
local -r msg=$1
local -r dst=$2
@@ -260,6 +293,37 @@ for family in 4 6; do
ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST
cleanup
+
+ # force segmentation and re-aggregation
+ create_vxlan_pair
+ ip netns exec "$NS_DST" ethtool -K veth"$DST" generic-receive-offload on
+ ip netns exec "$NS_SRC" ethtool -K veth"$SRC" tso off
+ ip -n "$NS_SRC" link set dev veth"$SRC" mtu 1430
+
+ # forward to a 2nd veth pair
+ ip -n "$NS_DST" link add br0 type bridge
+ ip -n "$NS_DST" link set dev veth"$DST" master br0
+
+ # segment the aggregated TSO packet, without csum offload
+ ip -n "$NS_DST" link add veth_segment type veth peer veth_rx
+ for FEATURE in tso tx-udp-segmentation tx-checksumming; do
+ ip netns exec "$NS_DST" ethtool -K veth_segment "$FEATURE" off
+ done
+ ip -n "$NS_DST" link set dev veth_segment master br0 up
+ ip -n "$NS_DST" link set dev br0 up
+ ip -n "$NS_DST" link set dev veth_rx up
+
+ # move the lower layer IP in the last added veth
+ for ADDR in "$BM_NET_V4$DST/24" "$BM_NET_V6$DST/64"; do
+ # the dad argument will let iproute emit a unharmful warning
+ # with ipv4 addresses
+ ip -n "$NS_DST" addr del dev veth"$DST" "$ADDR"
+ ip -n "$NS_DST" addr add dev veth_rx "$ADDR" \
+ nodad 2>/dev/null
+ done
+
+ run_test_csum "GSO after GRO" "$OL_NET$DST"
+ cleanup
done
exit $ret
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 0e8b5277be3b..a369b20d47f0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -157,21 +157,28 @@ irqfd_shutdown(struct work_struct *work)
}
-/* assumes kvm->irqfds.lock is held */
-static bool
-irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
+static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
{
+ /*
+ * Assert that either irqfds.lock or SRCU is held, as irqfds.lock must
+ * be held to prevent false positives (on the irqfd being active), and
+ * while false negatives are impossible as irqfds are never added back
+ * to the list once they're deactivated, the caller must at least hold
+ * SRCU to guard against routing changes if the irqfd is deactivated.
+ */
+ lockdep_assert_once(lockdep_is_held(&irqfd->kvm->irqfds.lock) ||
+ srcu_read_lock_held(&irqfd->kvm->irq_srcu));
+
return list_empty(&irqfd->list) ? false : true;
}
/*
* Mark the irqfd as inactive and schedule it for removal
- *
- * assumes kvm->irqfds.lock is held
*/
-static void
-irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
+static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
{
+ lockdep_assert_held(&irqfd->kvm->irqfds.lock);
+
BUG_ON(!irqfd_is_active(irqfd));
list_del_init(&irqfd->list);
@@ -217,8 +224,15 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
seq = read_seqcount_begin(&irqfd->irq_entry_sc);
irq = irqfd->irq_entry;
} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
- /* An event has been signaled, inject an interrupt */
- if (kvm_arch_set_irq_inatomic(&irq, kvm,
+
+ /*
+ * An event has been signaled, inject an interrupt unless the
+ * irqfd is being deassigned (isn't active), in which case the
+ * routing information may be stale (once the irqfd is removed
+ * from the list, it will stop receiving routing updates).
+ */
+ if (unlikely(!irqfd_is_active(irqfd)) ||
+ kvm_arch_set_irq_inatomic(&irq, kvm,
KVM_USERSPACE_IRQ_SOURCE_ID, 1,
false) == -EWOULDBLOCK)
schedule_work(&irqfd->inject);
@@ -585,18 +599,8 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
- if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
- /*
- * This clearing of irq_entry.type is needed for when
- * another thread calls kvm_irq_routing_update before
- * we flush workqueue below (we synchronize with
- * kvm_irq_routing_update using irqfds.lock).
- */
- write_seqcount_begin(&irqfd->irq_entry_sc);
- irqfd->irq_entry.type = 0;
- write_seqcount_end(&irqfd->irq_entry_sc);
+ if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi)
irqfd_deactivate(irqfd);
- }
}
spin_unlock_irq(&kvm->irqfds.lock);