diff options
195 files changed, 1496 insertions, 619 deletions
@@ -644,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com> Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com> Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com> Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> +Rae Moar <raemoar63@gmail.com> <rmoar@google.com> Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl> Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org> Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org> diff --git a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml index 23624f32ac30..769e4cb5b99b 100644 --- a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml @@ -32,7 +32,7 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32-array minItems: 2 - maxItems: 2 + maxItems: 4 items: enum: [1, 2, 3, 4] @@ -48,7 +48,7 @@ properties: $ref: /schemas/types.yaml#/definitions/uint32-array minItems: 2 - maxItems: 2 + maxItems: 5 items: enum: [1, 2, 3, 4, 5] diff --git a/MAINTAINERS b/MAINTAINERS index 92e9cd1a363b..ca428cd68b04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12522,6 +12522,7 @@ F: include/linux/avf/virtchnl.h F: include/linux/net/intel/*/ INTEL ETHERNET PROTOCOL DRIVER FOR RDMA +M: Krzysztof Czurylo <krzysztof.czurylo@intel.com> M: Tatyana Nikolova <tatyana.e.nikolova@intel.com> L: linux-rdma@vger.kernel.org S: Supported @@ -12862,7 +12863,8 @@ F: tools/testing/selftests/sgx/* K: \bSGX_ INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER -M: Daniel Scally <djrscally@gmail.com> +M: Daniel Scally <dan.scally@ideasonboard.com> +M: Sakari Ailus <sakari.ailus@linux.intel.com> S: Maintained F: drivers/platform/x86/intel/int3472/ F: include/linux/platform_data/x86/int3472.h @@ -13426,9 +13428,12 @@ F: mm/kasan/ F: scripts/Makefile.kasan KCONFIG +M: Nathan Chancellor <nathan@kernel.org> +M: Nicolas Schier <nsc@kernel.org> L: linux-kbuild@vger.kernel.org -S: Orphan +S: Odd Fixes Q: https://patchwork.kernel.org/project/linux-kbuild/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git F: Documentation/kbuild/kconfig* F: scripts/Kconfig.include F: scripts/kconfig/ @@ -13613,7 +13618,7 @@ F: fs/smb/server/ KERNEL UNIT TESTING FRAMEWORK (KUnit) M: Brendan Higgins <brendan.higgins@linux.dev> M: David Gow <davidgow@google.com> -R: Rae Moar <rmoar@google.com> +R: Rae Moar <raemoar63@gmail.com> L: linux-kselftest@vger.kernel.org L: kunit-dev@googlegroups.com S: Maintained @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 74ff01133532..61130b88964b 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -917,6 +917,13 @@ config ARCH_USES_CFI_TRAPS An architecture should select this option if it requires the .kcfi_traps section for KCFI trap handling. +config ARCH_USES_CFI_GENERIC_LLVM_PASS + bool + help + An architecture should select this option if it uses the generic + KCFIPass in LLVM to expand kCFI bundles instead of architecture-specific + lowering. + config CFI bool "Use Kernel Control Flow Integrity (kCFI)" default CFI_CLANG diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2e3f93b690f4..4fb985b76e97 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -44,6 +44,8 @@ config ARM select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_MEMTEST + # https://github.com/llvm/llvm-project/commit/d130f402642fba3d065aacb506cb061c899558de + select ARCH_USES_CFI_GENERIC_LLVM_PASS if CLANG_VERSION < 220000 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_GENERAL_HUGETLB select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ab83089c3d8f..0c9a50a1e73e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, u8 src = bpf2a64[insn->src_reg]; const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; + const u8 tmp3 = bpf2a64[TMP_REG_3]; const u8 fp = bpf2a64[BPF_REG_FP]; const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; const u8 priv_sp = bpf2a64[PRIVATE_SP]; @@ -1757,8 +1758,8 @@ emit_cond_jmp: case BPF_ST | BPF_PROBE_MEM32 | BPF_W: case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { - emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); - dst = tmp2; + emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx); + dst = tmp3; } if (dst == fp) { dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP; diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index dc5bd3f1b8d2..96ca1a688984 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -109,7 +109,7 @@ endif ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump else -KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers +KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers endif ifdef CONFIG_LTO_CLANG # The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled. diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c4145672ca34..df22b10d9141 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -158,7 +158,6 @@ config S390 select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_KERNEL_PMD_MKWRITE select ARCH_WANT_LD_ORPHAN_WARN - select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_THP_SWAP select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS2 diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index b31c1df90257..8433f769f7e1 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA_DEBUGFS=y CONFIG_CMA_SYSFS=y @@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y CONFIG_XFRM_USER=m CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y CONFIG_SMC=m CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y # CONFIG_MD_BITMAP_FILE is not set CONFIG_MD_LINEAR=m CONFIG_MD_CLUSTER=m @@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_JFS_STATISTICS=y CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set CONFIG_XFS_DEBUG=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y @@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_DEBUG=y CONFIG_BTRFS_ASSERT=y CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FS_ENCRYPTION=y CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 161dad7ef211..4414dabd04a6 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 @@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y CONFIG_XFRM_USER=m CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y CONFIG_SMC=m CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m CONFIG_SCSI_DH_ALUA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y # CONFIG_MD_BITMAP_FILE is not set CONFIG_MD_LINEAR=m CONFIG_MD_CLUSTER=m @@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y CONFIG_JFS_STATISTICS=y CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FS_ENCRYPTION=y CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index ed0b137353ad..b5478267d6a7 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -33,7 +33,6 @@ CONFIG_NET=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_SAFE=y CONFIG_BLK_DEV_RAM=y -# CONFIG_DCSSBLK is not set # CONFIG_DASD is not set CONFIG_ENCLOSURE_SERVICES=y CONFIG_SCSI=y diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c index 7ecfdc4fba2d..89f3e6d8fd89 100644 --- a/arch/s390/crypto/phmac_s390.c +++ b/arch/s390/crypto/phmac_s390.c @@ -169,11 +169,18 @@ struct kmac_sha2_ctx { u64 buflen[2]; }; +enum async_op { + OP_NOP = 0, + OP_UPDATE, + OP_FINAL, + OP_FINUP, +}; + /* phmac request context */ struct phmac_req_ctx { struct hash_walk_helper hwh; struct kmac_sha2_ctx kmac_ctx; - bool final; + enum async_op async_op; }; /* @@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req) * using engine to serialize requests. */ if (rc == 0 || rc == -EKEYEXPIRED) { + req_ctx->async_op = OP_UPDATE; atomic_inc(&tfm_ctx->via_engine_ctr); rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS) @@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req) * using engine to serialize requests. */ if (rc == 0 || rc == -EKEYEXPIRED) { - req->nbytes = 0; - req_ctx->final = true; + req_ctx->async_op = OP_FINAL; atomic_inc(&tfm_ctx->via_engine_ctr); rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS) @@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req) if (rc) goto out; + req_ctx->async_op = OP_FINUP; + /* Try synchronous operations if no active engine usage */ if (!atomic_read(&tfm_ctx->via_engine_ctr)) { rc = phmac_kmac_update(req, false); if (rc == 0) - req->nbytes = 0; + req_ctx->async_op = OP_FINAL; } - if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) { + if (!rc && req_ctx->async_op == OP_FINAL && + !atomic_read(&tfm_ctx->via_engine_ctr)) { rc = phmac_kmac_final(req, false); if (rc == 0) goto out; @@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req) * using engine to serialize requests. */ if (rc == 0 || rc == -EKEYEXPIRED) { - req_ctx->final = true; + /* req->async_op has been set to either OP_FINUP or OP_FINAL */ atomic_inc(&tfm_ctx->via_engine_ctr); rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS) @@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq) /* * Three kinds of requests come in here: - * update when req->nbytes > 0 and req_ctx->final is false - * final when req->nbytes = 0 and req_ctx->final is true - * finup when req->nbytes > 0 and req_ctx->final is true - * For update and finup the hwh walk needs to be prepared and - * up to date but the actual nr of bytes in req->nbytes may be - * any non zero number. For final there is no hwh walk needed. + * 1. req->async_op == OP_UPDATE with req->nbytes > 0 + * 2. req->async_op == OP_FINUP with req->nbytes > 0 + * 3. req->async_op == OP_FINAL + * For update and finup the hwh walk has already been prepared + * by the caller. For final there is no hwh walk needed. */ - if (req->nbytes) { + switch (req_ctx->async_op) { + case OP_UPDATE: + case OP_FINUP: rc = phmac_kmac_update(req, true); if (rc == -EKEYEXPIRED) { /* @@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq) hwh_advance(hwh, rc); goto out; } - req->nbytes = 0; - } - - if (req_ctx->final) { + if (req_ctx->async_op == OP_UPDATE) + break; + req_ctx->async_op = OP_FINAL; + fallthrough; + case OP_FINAL: rc = phmac_kmac_final(req, true); if (rc == -EKEYEXPIRED) { /* @@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq) cond_resched(); return -ENOSPC; } + break; + default: + /* unknown/unsupported/unimplemented asynch op */ + return -EOPNOTSUPP; } out: - if (rc || req_ctx->final) + if (rc || req_ctx->async_op == OP_FINAL) memzero_explicit(kmac_ctx, sizeof(*kmac_ctx)); pr_debug("request complete with rc=%d\n", rc); local_bh_disable(); diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6890925d5587..a32f465ecf73 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -145,7 +145,6 @@ struct zpci_dev { u8 has_resources : 1; u8 is_physfn : 1; u8 util_str_avail : 1; - u8 irqs_registered : 1; u8 tid_avail : 1; u8 rtr_avail : 1; /* Relaxed translation allowed */ unsigned int devfn; /* DEVFN part of the RID*/ diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 9af2aae0a515..528d7c70979f 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b) static int add_marker(unsigned long start, unsigned long end, const char *name) { - size_t oldsize, newsize; - - oldsize = markers_cnt * sizeof(*markers); - newsize = oldsize + 2 * sizeof(*markers); - if (!oldsize) - markers = kvmalloc(newsize, GFP_KERNEL); - else - markers = kvrealloc(markers, newsize, GFP_KERNEL); - if (!markers) - goto error; + struct addr_marker *new; + size_t newsize; + + newsize = (markers_cnt + 2) * sizeof(*markers); + new = kvrealloc(markers, newsize, GFP_KERNEL); + if (!new) + return -ENOMEM; + markers = new; markers[markers_cnt].is_start = 1; markers[markers_cnt].start_address = start; markers[markers_cnt].size = end - start; @@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name) markers[markers_cnt].name = name; markers_cnt++; return 0; -error: - markers_cnt = 0; - return -ENOMEM; } static int pt_dump_init(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index b95376041501..27db1e72c623 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) * is unbound or probed and that userspace can't access its * configuration space while we perform recovery. */ - pci_dev_lock(pdev); + device_lock(&pdev->dev); if (pdev->error_state == pci_channel_io_perm_failure) { ers_res = PCI_ERS_RESULT_DISCONNECT; goto out_unlock; @@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev) driver->err_handler->resume(pdev); pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED); out_unlock: - pci_dev_unlock(pdev); + device_unlock(&pdev->dev); zpci_report_status(zdev, "recovery", status_str); return ers_res; diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index 84482a921332..e73be96ce5fe 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev) else rc = zpci_set_airq(zdev); - if (!rc) - zdev->irqs_registered = 1; - return rc; } @@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev) else rc = zpci_clear_airq(zdev); - if (!rc) - zdev->irqs_registered = 0; - return rc; } @@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev) { struct zpci_dev *zdev = to_zpci(pdev); - if (!zdev->irqs_registered) - zpci_set_irq(zdev); + zpci_set_irq(zdev); return true; } diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4db7e4bf69f5..1a27efcf3c20 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -75,7 +75,7 @@ export BITS # # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 # -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 @@ -98,7 +98,7 @@ ifeq ($(CONFIG_X86_KERNEL_IBT),y) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816 # KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables) -KBUILD_RUSTFLAGS += -Zcf-protection=branch -Zno-jump-tables +KBUILD_RUSTFLAGS += -Zcf-protection=branch $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) else KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) endif diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 28f5468a6ea3..fe65be0b9d9c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void) break; case INTEL_PANTHERLAKE_L: + case INTEL_WILDCATLAKE_L: pr_cont("Pantherlake Hybrid events, "); name = "pantherlake_hybrid"; goto lnl_common; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index c0b7ac1c7594..01bc59e9286c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status, { u64 val; - WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); + WARN_ON_ONCE(is_hybrid() && + hybrid_pmu(event->pmu)->pmu_type == hybrid_big); dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; val = hybrid_var(event->pmu, pebs_data_source)[dse]; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a762f7f5b161..d6c945cc5d07 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init), X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init), X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init), + X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init), X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init), diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index f32a0eca2ae5..950bfd006905 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -150,12 +150,12 @@ #define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */ -#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */ +#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */ #define INTEL_WILDCATLAKE_L IFM(6, 0xD5) -#define INTEL_NOVALAKE IFM(18, 0x01) -#define INTEL_NOVALAKE_L IFM(18, 0x03) +#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */ +#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */ /* "Small Core" Processors (Atom/E-Core) */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 015d23f3e01f..53f4089333f2 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long); void clear_page_orig(void *page); void clear_page_rep(void *page); void clear_page_erms(void *page); +KCFI_REFERENCE(clear_page_orig); +KCFI_REFERENCE(clear_page_rep); +KCFI_REFERENCE(clear_page_erms); static inline void clear_page(void *page) { diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h index 8d983cfd06ea..e5a13dc8816e 100644 --- a/arch/x86/include/asm/runtime-const.h +++ b/arch/x86/include/asm/runtime-const.h @@ -2,6 +2,10 @@ #ifndef _ASM_RUNTIME_CONST_H #define _ASM_RUNTIME_CONST_H +#ifdef MODULE + #error "Cannot use runtime-const infrastructure from modules" +#endif + #ifdef __ASSEMBLY__ .macro RUNTIME_CONST_PTR sym reg diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index c8a5ae35c871..641f45c22f9d 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -12,12 +12,12 @@ #include <asm/cpufeatures.h> #include <asm/page.h> #include <asm/percpu.h> -#include <asm/runtime-const.h> -/* - * Virtual variable: there's no actual backing store for this, - * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' - */ +#ifdef MODULE + #define runtime_const_ptr(sym) (sym) +#else + #include <asm/runtime-const.h> +#endif extern unsigned long USER_PTR_MAX; #ifdef CONFIG_ADDRESS_MASKING diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ccaa51ce63f6..8e36964a7721 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ZEN5); break; case 0x50 ... 0x5f: - case 0x90 ... 0xaf: + case 0x80 ... 0xaf: case 0xc0 ... 0xcf: setup_force_cpu_cap(X86_FEATURE_ZEN6); break; @@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c) } } +static const struct x86_cpu_id zen5_rdseed_microcode[] = { + ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a), + ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054), +}; + static void init_amd_zen5(struct cpuinfo_x86 *c) { + if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) { + clear_cpu_cap(c, X86_FEATURE_RDSEED); + msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); + pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n"); + } } static void init_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c7d3512914ca..02d97834a1d4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -78,6 +78,10 @@ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); +/* Used for modules: built-in code uses runtime constants */ +unsigned long USER_PTR_MAX; +EXPORT_SYMBOL(USER_PTR_MAX); + u32 elf_hwcap2 __read_mostly; /* Number of siblings per CPU package */ @@ -2579,7 +2583,7 @@ void __init arch_cpu_finalize_init(void) alternative_instructions(); if (IS_ENABLED(CONFIG_X86_64)) { - unsigned long USER_PTR_MAX = TASK_SIZE_MAX; + USER_PTR_MAX = TASK_SIZE_MAX; /* * Enable this when LAM is gated on LASS support diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 28ed8c089024..b7c797dc94f4 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev) return true; } +static bool cpu_has_entrysign(void) +{ + unsigned int fam = x86_family(bsp_cpuid_1_eax); + unsigned int model = x86_model(bsp_cpuid_1_eax); + + if (fam == 0x17 || fam == 0x19) + return true; + + if (fam == 0x1a) { + if (model <= 0x2f || + (0x40 <= model && model <= 0x4f) || + (0x60 <= model && model <= 0x6f)) + return true; + } + + return false; +} + static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) { struct patch_digest *pd = NULL; u8 digest[SHA256_DIGEST_SIZE]; int i; - if (x86_family(bsp_cpuid_1_eax) < 0x17) + if (!cpu_has_entrysign()) return true; if (!need_sha_check(cur_rev)) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1f71cc135e9a..e88eacb1b5bb 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu) !fpregs_state_valid(fpu, smp_processor_id())) os_xrstor_supervisor(fpu->fpstate); + /* Ensure XFD state is in sync before reloading XSTATE */ + xfd_update_state(fpu->fpstate); + /* Reset user states in registers. */ restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d4c93d9e73e4..de5083cb1d37 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2701,7 +2701,7 @@ emit_jmp: /* Update cleanup_addr */ ctx->cleanup_addr = proglen; if (bpf_prog_was_classic(bpf_prog) && - !capable(CAP_SYS_ADMIN)) { + !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { u8 *ip = image + addrs[i - 1]; if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 4b1ad84d1b5a..3e7bf1974cbd 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) } if (!bio_crypt_check_alignment(bio)) { - bio->bi_status = BLK_STS_IOERR; + bio->bi_status = BLK_STS_INVAL; goto fail; } diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c index 47ea3ccc2142..a6dbf623e557 100644 --- a/drivers/acpi/acpi_mrrm.c +++ b/drivers/acpi/acpi_mrrm.c @@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table) if (!mrrm) return -ENODEV; + if (mrrm->header.revision != 1) + return -EINVAL; + if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS) return -EOPNOTSUPP; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 103f29661576..be8e7e18abca 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) struct acpi_video_device *dev; mutex_lock(&video->device_list_lock); - list_for_each_entry(dev, &video->video_device_list, entry) + list_for_each_entry(dev, &video->video_device_list, entry) { acpi_video_dev_remove_notify_handler(dev); + cancel_delayed_work_sync(&dev->switch_brightness_work); + } mutex_unlock(&video->device_list_lock); acpi_video_bus_stop_devices(video); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 0a7026040188..3c6dd9b4ba0a 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device) input_set_drvdata(input, device); error = input_register_device(input); - if (error) + if (error) { + input_free_device(input); goto err_remove_fs; + } switch (device->device_type) { case ACPI_BUS_TYPE_POWER_BUTTON: diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index 8a28a72a7c6a..bedbab0e8e4e 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -49,6 +49,7 @@ struct acpi_fan_fst { }; struct acpi_fan { + acpi_handle handle; bool acpi4; bool has_fst; struct acpi_fan_fif fif; @@ -59,14 +60,14 @@ struct acpi_fan { struct device_attribute fine_grain_control; }; -int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst); +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst); int acpi_fan_create_attributes(struct acpi_device *device); void acpi_fan_delete_attributes(struct acpi_device *device); #if IS_REACHABLE(CONFIG_HWMON) -int devm_acpi_fan_create_hwmon(struct acpi_device *device); +int devm_acpi_fan_create_hwmon(struct device *dev); #else -static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; }; +static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; }; #endif #endif diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c index c1afb7b5ed3d..9b7fa52f3c2a 100644 --- a/drivers/acpi/fan_attr.c +++ b/drivers/acpi/fan_attr.c @@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, struct acpi_fan_fst fst; int status; - status = acpi_fan_get_fst(acpi_dev, &fst); + status = acpi_fan_get_fst(acpi_dev->handle, &fst); if (status) return status; diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index 04ff608f2ff0..46e7fe7a506d 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long return 0; } -int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst) +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; int ret = 0; - status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); - if (ACPI_FAILURE(status)) { - dev_err(&device->dev, "Get fan state failed\n"); - return -ENODEV; - } + status = acpi_evaluate_object(handle, "_FST", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -EIO; obj = buffer.pointer; - if (!obj || obj->type != ACPI_TYPE_PACKAGE || - obj->package.count != 3 || - obj->package.elements[1].type != ACPI_TYPE_INTEGER) { - dev_err(&device->dev, "Invalid _FST data\n"); - ret = -EINVAL; + if (!obj) + return -ENODATA; + + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) { + ret = -EPROTO; + goto err; + } + + if (obj->package.elements[0].type != ACPI_TYPE_INTEGER || + obj->package.elements[1].type != ACPI_TYPE_INTEGER || + obj->package.elements[2].type != ACPI_TYPE_INTEGER) { + ret = -EPROTO; goto err; } @@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state) struct acpi_fan_fst fst; int status, i; - status = acpi_fan_get_fst(device, &fst); + status = acpi_fan_get_fst(device->handle, &fst); if (status) return status; @@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev) struct acpi_device *device = ACPI_COMPANION(&pdev->dev); char *name; + if (!device) + return -ENODEV; + fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); if (!fan) { dev_err(&device->dev, "No memory for fan\n"); return -ENOMEM; } + + fan->handle = device->handle; device->driver_data = fan; platform_set_drvdata(pdev, fan); @@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev) } if (fan->has_fst) { - result = devm_acpi_fan_create_hwmon(device); + result = devm_acpi_fan_create_hwmon(&pdev->dev); if (result) return result; diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c index e8d90605106e..4b2c2007f2d7 100644 --- a/drivers/acpi/fan_hwmon.c +++ b/drivers/acpi/fan_hwmon.c @@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { - struct acpi_device *adev = to_acpi_device(dev->parent); struct acpi_fan *fan = dev_get_drvdata(dev); struct acpi_fan_fps *fps; struct acpi_fan_fst fst; int ret; - ret = acpi_fan_get_fst(adev, &fst); + ret = acpi_fan_get_fst(fan->handle, &fst); if (ret < 0) return ret; @@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = { .info = acpi_fan_hwmon_info, }; -int devm_acpi_fan_create_hwmon(struct acpi_device *device) +int devm_acpi_fan_create_hwmon(struct device *dev) { - struct acpi_fan *fan = acpi_driver_data(device); + struct acpi_fan *fan = dev_get_drvdata(dev); struct device *hdev; - hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan, - &acpi_fan_hwmon_chip_info, NULL); + hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info, + NULL); return PTR_ERR_OR_ZERO(hdev); } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index d4d52d5e9016..73cb933fdc89 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) * Baud Rate field. If this field is zero or not present, Configured * Baud Rate is used. */ - if (table->precise_baudrate) + if (table->header.revision >= 4 && table->precise_baudrate) baud_rate = table->precise_baudrate; else switch (table->baud_rate) { case 0: diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 54eb7d227cf4..e523fae73004 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, if (IS_ERR(bus)) return ERR_CAST(bus); - return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, - lock_key, lock_name); + return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__regmap_init_slimbus); @@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, if (IS_ERR(bus)) return ERR_CAST(bus); - return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, - lock_key, lock_name); + return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name); } EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index f982027e8c85..0ee55f889cfd 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev) .logical_block_size = dev->blocksize, .physical_block_size = dev->blocksize, .max_hw_sectors = dev->max_sectors, + .dma_alignment = dev->blocksize - 1, }; struct nullb *nullb; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 7d21fb5a72f4..23239b0c04f9 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, /* * Use a physical idle state, not busy polling, unless a timer - * is going to trigger soon enough. + * is going to trigger soon enough or the exit latency of the + * idle state in question is greater than the predicted idle + * duration. */ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && - s->target_residency_ns <= data->next_timer_ns) { + s->target_residency_ns <= data->next_timer_ns && + s->exit_latency_ns <= predicted_ns) { predicted_ns = s->target_residency_ns; idx = i; break; diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 8d1c79aaca07..5993bcba9716 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev) err_engine_rsa_start: crypto_engine_exit(acry_dev->crypt_engine_rsa); clk_exit: - clk_disable_unprepare(acry_dev->clk); return rc; } @@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev) aspeed_acry_unregister(acry_dev); crypto_engine_exit(acry_dev->crypt_engine_rsa); tasklet_kill(&acry_dev->done_task); - clk_disable_unprepare(acry_dev->clk); } MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3f78c56b58dc..39e6f93dc310 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) "RCU protection is required for safe access to returned string"); if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return fence->ops->get_driver_name(fence); + return fence->ops->get_timeline_name(fence); else return "signaled-timeline"; } diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c index 7c5db8bf0595..1ded4c3f0213 100644 --- a/drivers/edac/versalnet_edac.c +++ b/drivers/edac/versalnet_edac.c @@ -433,7 +433,7 @@ static void handle_error(struct mc_priv *priv, struct ecc_status *stat, phys_addr_t pfn; int err; - if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS)) + if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS)) return; mci = priv->mci[ctl_num]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index ef996493115f..425a3e564360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h index bcb97d245673..353421807387 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 474bfe36c0c2..aa78c2ee9e21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block) return 0; } +static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { + case IP_VERSION(6, 1, 1): + return adev->pm.fw_version < 0x0a640500; + default: + return false; + } +} + +static int vpe_get_dpm_level(struct amdgpu_device *adev) +{ + struct amdgpu_vpe *vpe = &adev->vpe; + + if (!adev->pm.dpm_enabled) + return 0; + + return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); +} + static void vpe_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = @@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work) unsigned int fences = 0; fences += amdgpu_fence_count_emitted(&adev->vpe.ring); + if (fences) + goto reschedule; - if (fences == 0) - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); - else - schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); + if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) + goto reschedule; + + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + return; + +reschedule: + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } static int vpe_common_init(struct amdgpu_vpe *vpe) diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c index 96616a865aac..ed1e25661706 100644 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1ec9d03ad747..38f9ea313dcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); struct amdgpu_display_manager *dm = vblank_work->dm; + struct amdgpu_device *adev = drm_to_adev(dm->ddev); + int r; mutex_lock(&dm->dc_lock); @@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) if (dm->active_vblank_irq_count == 0) { dc_post_update_surfaces_to_stream(dm->dc); + + r = amdgpu_dpm_pause_power_profile(adev, true); + if (r) + dev_warn(adev->dev, "failed to set default power profile mode\n"); + dc_allow_idle_optimizations(dm->dc, true); + + r = amdgpu_dpm_pause_power_profile(adev, false); + if (r) + dev_warn(adev->dev, "failed to restore the power profile mode\n"); } mutex_unlock(&dm->dc_lock); @@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) int irq_type; int rc = 0; - if (acrtc->otg_inst == -1) - goto skip; + if (enable && !acrtc->base.enabled) { + drm_dbg_vbl(crtc->dev, + "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", + acrtc->crtc_id, acrtc->base.enabled); + return -EINVAL; + } irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); @@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) return rc; } #endif -skip: + if (amdgpu_in_reset(adev)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fe100e4c9801..cc21337a182f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct edid_caps->panel_patch.remove_sink_ext_caps = true; break; case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171): drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_colorimetry = true; break; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 09be2a90cc79..4f569cd8a5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut( dpp_base->ctx->dc->optimized_required = true; dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; } - } else { - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); } } diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h index 086869264425..a252ee4c7874 100644 --- a/drivers/gpu/drm/amd/include/amd_cper.h +++ b/drivers/gpu/drm/amd/include/amd_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h index 64b553e7de1a..e7fdcee22a71 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index d2dbd90bb427..0a876c840c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) table->VoltageResponseTime = 0; table->PhaseResponseTime = 0; table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); table->PCIeGenInterval = 1; table->VRConfig = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 1f50f1e74c48..aa3ae9b115c4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->VoltageResponseTime = 0; table->PhaseResponseTime = 0; table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); table->PCIeGenInterval = 1; result = iceland_populate_smc_svi2_config(hwmgr, table); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index f532f7c69259..a8961a8f5c42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu, table_index); uint32_t table_size; int ret = 0; - if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; table_size = smu_table->tables[table_index].size; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c15aef014f69..d41bd876167c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val) __ast_write8(addr, reg + 1, val); } -static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask, u8 val) { - u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); + u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); - tmp |= val; - __ast_write8_i(addr, reg, index, tmp); + val &= ~preserve_mask; + __ast_write8_i(addr, reg, index, tmp | val); } static inline u32 ast_read32(struct ast_device *ast, u32 reg) diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index d502d146b177..56638814bb28 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -280,7 +280,7 @@ sanity: GIT_STRATEGY: none script: # ci-fairy check-commits --junit-xml=check-commits.xml - - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml + # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml - | set -eu image_tags=( diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index ebf305fb24f0..6fb55601252f 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); void __drm_gem_reset_shadow_plane(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state) { - __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); - drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); + if (shadow_plane_state) { + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); + } else { + __drm_atomic_helper_plane_reset(plane, NULL); + } } EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index b13a17276d07..88385dc3b30d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, u32 link_target, link_dwords; bool switch_context = gpu->exec_state != exec_state; bool switch_mmu_context = gpu->mmu_context != mmu_context; - unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); + unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq); bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; bool has_blt = !!(gpu->identity.minor_features5 & chipMinorFeatures5_BLT_ENGINE); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 77a0199f9ea5..4a4cace1f879 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display, REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id; } +static bool fixup_dmc_evt(struct intel_display *display, + enum intel_dmc_id dmc_id, + i915_reg_t reg_ctl, u32 *data_ctl, + i915_reg_t reg_htp, u32 *data_htp) +{ + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) + return false; + + if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) + return false; + + /* make sure reg_ctl and reg_htp are for the same event */ + if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != + i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) + return false; + + /* + * On ADL-S the HRR event handler is not restored after DC6. + * Clear it to zero from the beginning to avoid mismatches later. + */ + if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { + *data_ctl = 0; + *data_htp = 0; + return true; + } + + return false; +} + static bool disable_dmc_evt(struct intel_display *display, enum intel_dmc_id dmc_id, i915_reg_t reg, u32 data) @@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; + } + + for (i = 0; i < mmio_count - 1; i++) { + u32 orig_mmiodata[2] = { + dmc_info->mmiodata[i], + dmc_info->mmiodata[i+1], + }; + + if (!fixup_dmc_evt(display, dmc_id, + dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], + dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) + continue; + + drm_dbg_kms(display->drm, + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), + orig_mmiodata[0], dmc_info->mmiodata[i]); + drm_dbg_kms(display->drm, + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", + i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), + orig_mmiodata[1], dmc_info->mmiodata[i+1]); + } + for (i = 0; i < mmio_count; i++) { drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", - i, mmioaddr[i], mmiodata[i], + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i], is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 6d8325c76697..7fc6af703307 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -25,19 +25,18 @@ struct imx_parallel_display_encoder { struct drm_encoder encoder; - struct drm_bridge bridge; - struct imx_parallel_display *pd; }; struct imx_parallel_display { struct device *dev; u32 bus_format; struct drm_bridge *next_bridge; + struct drm_bridge bridge; }; static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { - return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; + return container_of(b, struct imx_parallel_display, bridge); } static const u32 imx_pd_bus_fmts[] = { @@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(imxpd_encoder)) return PTR_ERR(imxpd_encoder); - imxpd_encoder->pd = imxpd; encoder = &imxpd_encoder->encoder; - bridge = &imxpd_encoder->bridge; + bridge = &imxpd->bridge; ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); if (ret) return ret; - bridge->funcs = &imx_pd_bridge_funcs; drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); connector = drm_bridge_connector_init(drm, encoder); @@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev) u32 bus_format = 0; const char *fmt; - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); - if (!imxpd) - return -ENOMEM; + imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, + &imx_pd_bridge_funcs); + if (IS_ERR(imxpd)) + return PTR_ERR(imxpd); /* port@1 is the output port */ imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); @@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imxpd); + devm_drm_bridge_add(dev, &imxpd->bridge); + return component_add(dev, &imx_pd_ops); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index eb5537f0ac90..31ff2922758a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -686,10 +686,6 @@ err_free: for (i = 0; i < private->data->mmsys_dev_num; i++) private->all_drm_private[i]->drm = NULL; err_put_dev: - for (i = 0; i < private->data->mmsys_dev_num; i++) { - /* For device_find_child in mtk_drm_get_all_priv() */ - put_device(private->all_drm_private[i]->dev); - } put_device(private->mutex_dev); return ret; } @@ -697,18 +693,12 @@ err_put_dev: static void mtk_drm_unbind(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); - int i; /* for multi mmsys dev, unregister drm dev in mmsys master */ if (private->drm_master) { drm_dev_unregister(private->drm); mtk_drm_kms_deinit(private->drm); drm_dev_put(private->drm); - - for (i = 0; i < private->data->mmsys_dev_num; i++) { - /* For device_find_child in mtk_drm_get_all_priv() */ - put_device(private->all_drm_private[i]->dev); - } put_device(private->mutex_dev); } private->mtk_drm_bound = false; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index fc62fef2fed8..4e6dc16e4a4c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) return true; } +#define NEXT_BLK(blk) \ + ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) + static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); @@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) for (blk = (const struct block_header *) fw_image->data; (const u8*) blk < fw_image->data + fw_image->size; - blk = (const struct block_header *) &blk->data[blk->size >> 2]) { + blk = NEXT_BLK(blk)) { if (blk->size == 0) continue; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index afaa3cfefd35..4b5a4edd0702 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, return 0; } -static bool -adreno_smmu_has_prr(struct msm_gpu *gpu) -{ - struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); - return adreno_smmu && adreno_smmu->set_prr_addr; -} - int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4b970a59deaf..2f8156051d9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc, adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, dpu_kms->perf.perf_cfg); + if (dpu_kms->catalog->caps->has_3d_merge) + adjusted_mode_clk /= 2; + /* * The given mode, adjusted for the perf clock factor, should not exceed * the max core clock rate diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 6641455c4ec6..9f8d1bba9139 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = { .base = 0x200, .len = 0xa0,}, \ .csc_blk = {.name = "csc", \ .base = 0x320, .len = 0x100,}, \ - .format_list = plane_formats_yuv, \ - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ .rotation_cfg = NULL, \ } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index f54cf0faa1c7..905524ceeb1f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg, int i; for (i = 0; i < DPU_MAX_PLANES; i++) { + uint32_t w = src_w, h = src_h; + if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { - src_w /= chroma_subsmpl_h; - src_h /= chroma_subsmpl_v; + w /= chroma_subsmpl_h; + h /= chroma_subsmpl_v; } - pixel_ext->num_ext_pxls_top[i] = src_h; - pixel_ext->num_ext_pxls_left[i] = src_w; + pixel_ext->num_ext_pxls_top[i] = h; + pixel_ext->num_ext_pxls_left[i] = w; } } @@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, * We already have verified scaling against platform limitations. * Now check if the SSPP supports scaling at all. */ - if (!sblk->scaler_blk.len && + if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) && ((drm_rect_width(&new_plane_state->src) >> 16 != drm_rect_width(&new_plane_state->dst)) || (drm_rect_height(&new_plane_state->src) >> 16 != @@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, state, plane_state, prev_adjacent_plane_state); if (ret) - break; + return ret; prev_adjacent_plane_state = plane_state; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 2c77c74fac0f..d9c3b0a1d091 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm, if (!reqs->scale && !reqs->yuv) hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); - if (!hw_sspp && reqs->scale) + if (!hw_sspp && !reqs->yuv) hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB); if (!hw_sspp) hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index cd73468e369a..7545c0293efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector, DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n", fb->width, dpu_wb_conn->maxlinewidth); return -EINVAL; + } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { + DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); + return -EINVAL; } return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e391505fdaf0..3cbf08231492 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -109,7 +109,6 @@ struct msm_dsi_phy { struct msm_dsi_dphy_timing timing; const struct msm_dsi_phy_cfg *cfg; void *tuning_cfg; - void *pll_data; enum msm_dsi_phy_usecase usecase; bool regulator_ldo_mode; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 32f06edd21a9..c5e1d2016bcc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) u32 data; spin_lock_irqsave(&pll->pll_enable_lock, flags); - if (pll->pll_enable_cnt++) { - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); - WARN_ON(pll->pll_enable_cnt == INT_MAX); - return; - } + pll->pll_enable_cnt++; + WARN_ON(pll->pll_enable_cnt == INT_MAX); data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; @@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) spin_lock_init(&pll_7nm->pll_enable_lock); pll_7nm->phy = phy; - phy->pll_data = pll_7nm; ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws); if (ret) { @@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, u32 const delay_us = 5; u32 const timeout_us = 1000; struct msm_dsi_dphy_timing *timing = &phy->timing; - struct dsi_pll_7nm *pll = phy->pll_data; void __iomem *base = phy->base; bool less_than_1500_mhz; - unsigned long flags; u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; u32 glbl_pemph_ctrl_0; u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; @@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, glbl_rescode_bot_ctrl = 0x3c; } - spin_lock_irqsave(&pll->pll_enable_lock, flags); - pll->pll_enable_cnt = 1; /* de-assert digital and pll power down */ data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B | DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); /* Assert PLL core reset */ writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); @@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable) static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) { - struct dsi_pll_7nm *pll = phy->pll_data; void __iomem *base = phy->base; - unsigned long flags; u32 data; DBG(""); @@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); - spin_lock_irqsave(&pll->pll_enable_lock, flags); - pll->pll_enable_cnt = 0; /* Turn off all PHY blocks */ writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); /* make sure phy is turned off */ wmb(); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07d8cdd6bb2e..9f7fbe577abb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj) put_pages(obj); } - if (obj->resv != &obj->_resv) { + /* + * In error paths, we could end up here before msm_gem_new_handle() + * has changed obj->resv to point to the shared resv. In this case, + * we don't want to drop a ref to the shared r_obj that we haven't + * taken yet. + */ + if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) { struct drm_gem_object *r_obj = container_of(obj->resv, struct drm_gem_object, _resv); - WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); - /* Drop reference we hold to shared resv obj: */ drm_gem_object_put(r_obj); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3ab3b27134f9..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) submit->user_fence, DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); + + last_fence = vm->last_fence; + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); + dma_fence_put(last_fence); + return; } @@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) dma_resv_add_fence(obj->resv, submit->user_fence, DMA_RESV_USAGE_READ); } - - last_fence = vm->last_fence; - vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); - dma_fence_put(last_fence); } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 8316af1723c2..89a95977f41e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -971,6 +971,7 @@ static int lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) { struct drm_device *dev = job->vm->drm; + struct msm_drm_private *priv = dev->dev_private; int i = job->nr_ops++; int ret = 0; @@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) break; } + if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && + !adreno_smmu_has_prr(priv->gpu)) { + ret = UERR(EINVAL, dev, "PRR not supported\n"); + } + return ret; } @@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file) * Maybe we could allow just UNMAP ops? OTOH userspace should just * immediately close the device file and all will be torn down. */ - if (to_msm_vm(ctx->vm)->unusable) + if (to_msm_vm(msm_context_vm(dev, ctx))->unusable) return UERR(EPIPE, dev, "context is unusable"); /* diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a597f2bee30b..2894fc118485 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev) return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); } +static inline bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + + if (!adreno_smmu) + return false; + + return adreno_smmu && adreno_smmu->set_prr_addr; +} + /* It turns out that all targets use the same ringbuffer size */ #define MSM_GPU_RINGBUFFER_SZ SZ_32K #define MSM_GPU_RINGBUFFER_BLKSIZE 32 diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 0e18619f96cb..a188617653e8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); if (ret != p->count) { + kfree(p->pages); + p->pages = NULL; p->count = ret; return -ENOMEM; } @@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo struct kmem_cache *pt_cache = get_pt_cache(mmu); uint32_t remaining_pt_count = p->count - p->ptr; + if (!p->pages) + return; + if (p->count > 0) trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e60f7892f5ce..a7bf539e5d86 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, return 0; } +static bool +nouveau_sched_job_list_empty(struct nouveau_sched *sched) +{ + bool empty; + + spin_lock(&sched->job.list.lock); + empty = list_empty(&sched->job.list.head); + spin_unlock(&sched->job.list.lock); + + return empty; +} static void nouveau_sched_fini(struct nouveau_sched *sched) @@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched) struct drm_gpu_scheduler *drm_sched = &sched->base; struct drm_sched_entity *entity = &sched->entity; - rmb(); /* for list_empty to work without lock */ - wait_event(sched->job.wq, list_empty(&sched->job.list.head)); + wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched)); drm_sched_entity_fini(entity); drm_sched_fini(drm_sched); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 2fc7b0779b37..893af9b16756 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | - MIPI_DSI_MODE_LPM; + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base, &kingdisplay_panel_funcs, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 04d91929eedd..d5f821d6b23c 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = { .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; +/* + * The mode data for this panel has been reverse engineered without access + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all + * other panels results in garbage data on the display. + */ static const struct drm_display_mode t28cp45tn89_mode = { .clock = 6008, .hdisplay = 240, @@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = { .vtotal = 320 + 8 + 4 + 4, .width_mm = 43, .height_mm = 57, - .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, }; static const struct drm_display_mode et028013dma_mode = { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e821d67af7..9c8907bc61d9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev, ret = pci_enable_device(pdev); if (ret) - goto err_free; + return ret; pci_set_drvdata(pdev, ddev); ret = radeon_driver_load_kms(ddev, flags); if (ret) - goto err_agp; + goto err; ret = drm_dev_register(ddev, flags); if (ret) - goto err_agp; + goto err; if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) format = drm_format_info(DRM_FORMAT_C8); @@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev, return 0; -err_agp: +err: pci_disable_device(pdev); -err_free: - drm_dev_put(ddev); return ret; } static void -radeon_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_put_dev(dev); -} - -static void radeon_pci_shutdown(struct pci_dev *pdev) { - /* if we are running in a VM, make sure the device - * torn down properly on reboot/shutdown - */ - if (radeon_device_is_virtual()) - radeon_pci_remove(pdev); - #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64) /* * Some adapters need to be suspended before a @@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = radeon_pci_probe, - .remove = radeon_pci_remove, .shutdown = radeon_pci_shutdown, .driver.pm = &radeon_pm_ops, }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 645e33bf7947..ba1446acd703 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev) rdev->agp = NULL; done_free: - kfree(rdev); dev->dev_private = NULL; } diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 5a4697f636f2..c8e949f4a568 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, entity->guilty = guilty; entity->num_sched_list = num_sched_list; entity->priority = priority; + entity->last_user = current->group_leader; /* * It's perfectly valid to initialize an entity without having a valid * scheduler attached. It's just not valid to use the scheduler before it @@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) /* For a killed process disallow further enqueueing of jobs. */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); - if ((!last_user || last_user == current->group_leader) && + if (last_user == current->group_leader && (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) drm_sched_entity_kill(entity); @@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) drm_sched_rq_remove_entity(entity->rq, entity); entity->rq = rq; } - spin_unlock(&entity->lock); if (entity->num_sched_list == 1) entity->sched_list = NULL; + + spin_unlock(&entity->lock); } /** diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3e0ad7e5b5df..6d3db5e55d98 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt) unsigned int fw_ref; int err; - if (xe_device_wedged(gt_to_xe(gt))) - return -ECANCELED; + if (xe_device_wedged(gt_to_xe(gt))) { + err = -ECANCELED; + goto err_pm_put; + } /* We only support GT resets with GuC submission */ - if (!xe_device_uc_enabled(gt_to_xe(gt))) - return -ENODEV; + if (!xe_device_uc_enabled(gt_to_xe(gt))) { + err = -ENODEV; + goto err_pm_put; + } xe_gt_info(gt, "reset started\n"); @@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt) if (!err) xe_gt_warn(gt, "reset block failed to get lifted"); - xe_pm_runtime_get(gt_to_xe(gt)); - if (xe_fault_inject_gt_reset()) { err = -ECANCELED; goto err_fail; @@ -874,6 +876,7 @@ err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); xe_device_declare_wedged(gt_to_xe(gt)); +err_pm_put: xe_pm_runtime_put(gt_to_xe(gt)); return err; @@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt) return; xe_gt_info(gt, "reset queued\n"); - queue_work(gt->ordered_wq, >->reset.worker); + xe_pm_runtime_get_noresume(gt_to_xe(gt)); + if (!queue_work(gt->ordered_wq, >->reset.worker)) + xe_pm_runtime_put(gt_to_xe(gt)); } void xe_gt_suspend_prepare(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h index fec331d791e7..b2d09c596714 100644 --- a/drivers/gpu/drm/xe/xe_validation.h +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val) */ DEFINE_CLASS(xe_validation, struct xe_validation_ctx *, if (_T) xe_validation_ctx_fini(_T);, - ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); - _ret ? NULL : _ctx; }), + ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); + *_ret ? NULL : _ctx; }), struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, - struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); + struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret); static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) {return *_T; } #define class_xe_validation_is_conditional true @@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) * exhaustive eviction. */ #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \ - scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ + scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \ drm_exec_until_all_locked(_exec) #endif diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c index 37cd37556510..fab5d914029d 100644 --- a/drivers/infiniband/core/uverbs_std_types_cq.c +++ b/drivers/infiniband/core/uverbs_std_types_cq.c @@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( return ret; err_free: + ib_umem_release(umem); rdma_restrack_put(&cq->res); kfree(cq); err_event_file: diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 4dab5ca7362b..84ce3fce2826 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -913,7 +913,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, spin_unlock_irqrestore(&qp->scq->cq_lock, flags); } -static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) +static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) { struct bnxt_re_qp *gsi_sqp; struct bnxt_re_ah *gsi_sah; @@ -933,10 +933,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); - if (rc) { + if (rc) ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); - goto fail; - } + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); /* remove from active qp list */ @@ -951,10 +950,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) rdev->gsi_ctx.gsi_sqp = NULL; rdev->gsi_ctx.gsi_sah = NULL; rdev->gsi_ctx.sqp_tbl = NULL; - - return 0; -fail: - return rc; } static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index d9a12681f843..22d3e25c3b9d 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, if (umem->length < cq->size) { ibdev_dbg(&dev->ibdev, "External memory too small\n"); err = -EINVAL; - goto err_free_mem; + goto err_out; } if (!ib_umem_is_contiguous(umem)) { ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n"); err = -EINVAL; - goto err_free_mem; + goto err_out; } cq->cpu_addr = NULL; @@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, err = efa_com_create_cq(&dev->edev, ¶ms, &result); if (err) - goto err_free_mem; + goto err_free_mapped; resp.db_off = result.db_off; resp.cq_idx = result.cq_idx; @@ -1299,12 +1299,10 @@ err_remove_mmap: efa_cq_user_mmap_entries_remove(cq); err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); -err_free_mem: - if (umem) - ib_umem_release(umem); - else - efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); - +err_free_mapped: + if (!umem) + efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, + DMA_FROM_DEVICE); err_out: atomic64_inc(&dev->stats.create_cq_err); return err; diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 3a5c93c9fb3e..6aa82fe9dd3d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/pci.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" @@ -37,6 +38,43 @@ #include "hns_roce_hem.h" #include "hns_roce_common.h" +void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; + + if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) + return; + + mutex_lock(&cq_table->bank_mutex); + cq_table->ctx_num[uctx->cq_bank_id]--; + mutex_unlock(&cq_table->bank_mutex); +} + +void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; + u32 least_load = cq_table->ctx_num[0]; + u8 bankid = 0; + u8 i; + + if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) + return; + + mutex_lock(&cq_table->bank_mutex); + for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) { + if (cq_table->ctx_num[i] < least_load) { + least_load = cq_table->ctx_num[i]; + bankid = i; + } + } + cq_table->ctx_num[bankid]++; + mutex_unlock(&cq_table->bank_mutex); + + uctx->cq_bank_id = bankid; +} + static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank) { u32 least_load = bank[0].inuse; @@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank) return bankid; } -static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) +static u8 select_cq_bankid(struct hns_roce_dev *hr_dev, + struct hns_roce_bank *bank, struct ib_udata *udata) +{ + struct hns_roce_ucontext *uctx = udata ? + rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, + ibucontext) : NULL; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + return uctx ? uctx->cq_bank_id : 0; + + return get_least_load_bankid_for_cq(bank); +} + +static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, + struct ib_udata *udata) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_bank *bank; @@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) int id; mutex_lock(&cq_table->bank_mutex); - bankid = get_least_load_bankid_for_cq(cq_table->bank); + bankid = select_cq_bankid(hr_dev, cq_table->bank, udata); bank = &cq_table->bank[bankid]; id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL); @@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cq_buf; } - ret = alloc_cqn(hr_dev, hr_cq); + ret = alloc_cqn(hr_dev, hr_cq, udata); if (ret) { ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret); goto err_cq_db; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 78ee04a48a74..06832c0ac055 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -217,6 +217,7 @@ struct hns_roce_ucontext { struct mutex page_mutex; struct hns_user_mmap_entry *db_mmap_entry; u32 config; + u8 cq_bank_id; }; struct hns_roce_pd { @@ -495,6 +496,7 @@ struct hns_roce_cq_table { struct hns_roce_hem_table table; struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM]; struct mutex bank_mutex; + u32 ctx_num[HNS_ROCE_CQ_BANK_NUM]; }; struct hns_roce_srq_table { @@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, enum hns_roce_mmap_type mmap_type); bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl); +void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); +void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx); #endif /* _HNS_ROCE_DEVICE_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index f82bdd46a917..63052c0e7613 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe, hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); hr_reg_clear(fseg, FRMR_BLK_MODE); + hr_reg_clear(fseg, FRMR_BLOCK_SIZE); + hr_reg_clear(fseg, FRMR_ZBVA); } static void set_atomic_seg(const struct ib_send_wr *wr, @@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, int j = 0; int i; - hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, - (*sge_ind) & (qp->sge.sge_cnt - 1)); - hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, !!(wr->send_flags & IB_SEND_INLINE)); if (wr->send_flags & IB_SEND_INLINE) @@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, + curr_idx & (qp->sge.sge_cnt - 1)); + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { if (msg_len != ATOMIC_WR_LEN) @@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, owner_bit = ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); + /* RC and UD share the same DirectWQE field layout */ + ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0; + /* Corresponding to the QP type, wqe process separately */ if (ibqp->qp_type == IB_QPT_RC) ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit); @@ -7048,7 +7053,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) goto error_failed_roce_init; } - handle->priv = hr_dev; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index d50f36f8a110..f3607fe107a7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, if (ret) goto error_fail_copy_to_udata; + hns_roce_get_cq_bankid_for_uctx(context); + return 0; error_fail_copy_to_udata: @@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); + hns_roce_put_cq_bankid_for_uctx(context); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) mutex_destroy(&context->page_mutex); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 6ff1b8ce580c..bdd879ac12dd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_cnt = cnt; - cap->max_send_sge = hr_qp->sq.max_gs; return 0; } @@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, /* sync the parameters of kernel QP to user's configuration */ cap->max_send_wr = cnt; - cap->max_send_sge = hr_qp->sq.max_gs; return 0; } diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index 3091f9345f12..fa6325adaede 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev, static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, struct sd_pd_idx *idx) { - idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; + idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE; idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE); idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD); } diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 4ae77cdde9dc..c1b8f81ea283 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -706,7 +706,7 @@ struct irdma_sc_dev { u32 vchnl_ver; u16 num_vfs; u16 hmc_fn_id; - u8 vf_id; + u16 vf_id; bool privileged:1; bool vchnl_up:1; bool ceq_valid:1; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 76ce6137f2ba..c883c9ea5a83 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -2503,6 +2503,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, spin_lock_init(&iwcq->lock); INIT_LIST_HEAD(&iwcq->resize_list); INIT_LIST_HEAD(&iwcq->cmpl_generated); + iwcq->cq_num = cq_num; info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index ed21c1b56e8e..ac8b38701835 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -140,7 +140,7 @@ struct irdma_srq { struct irdma_cq { struct ib_cq ibcq; struct irdma_sc_cq sc_cq; - u16 cq_num; + u32 cq_num; bool user_mode; atomic_t armed; enum irdma_cmpl_notify last_notify; diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index d911021c1bb0..83862d57b126 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -1010,6 +1010,11 @@ int vb2_ioctl_remove_bufs(struct file *file, void *priv, if (vb2_queue_is_busy(vdev->queue, file)) return -EBUSY; + if (vb2_fileio_is_active(vdev->queue)) { + dprintk(vdev->queue, 1, "file io in progress\n"); + return -EBUSY; + } + return vb2_core_remove_bufs(vdev->queue, d->index, d->count); } EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs); diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index b62fd12c93c1..74c59a94b2b0 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -1136,11 +1136,8 @@ int cx18_init_on_first_open(struct cx18 *cx) int video_input; int fw_retry_count = 3; struct v4l2_frequency vf; - struct cx18_open_id fh; v4l2_std_id std; - fh.cx = cx; - if (test_bit(CX18_F_I_FAILED, &cx->i_flags)) return -ENXIO; @@ -1220,14 +1217,14 @@ int cx18_init_on_first_open(struct cx18 *cx) video_input = cx->active_input; cx->active_input++; /* Force update of input */ - cx18_s_input(NULL, &fh, video_input); + cx18_do_s_input(cx, video_input); /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code in one place. */ cx->std++; /* Force full standard initialization */ std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std; - cx18_s_std(NULL, &fh, std); - cx18_s_frequency(NULL, &fh, &vf); + cx18_do_s_std(cx, std); + cx18_do_s_frequency(cx, &vf); return 0; } diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c index 0f3019739d03..0d676a57e24e 100644 --- a/drivers/media/pci/cx18/cx18-ioctl.c +++ b/drivers/media/pci/cx18/cx18-ioctl.c @@ -521,10 +521,8 @@ static int cx18_g_input(struct file *file, void *fh, unsigned int *i) return 0; } -int cx18_s_input(struct file *file, void *fh, unsigned int inp) +int cx18_do_s_input(struct cx18 *cx, unsigned int inp) { - struct cx18_open_id *id = file2id(file); - struct cx18 *cx = id->cx; v4l2_std_id std = V4L2_STD_ALL; const struct cx18_card_video_input *card_input = cx->card->video_inputs + inp; @@ -558,6 +556,11 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp) return 0; } +static int cx18_s_input(struct file *file, void *fh, unsigned int inp) +{ + return cx18_do_s_input(file2id(file)->cx, inp); +} + static int cx18_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { @@ -570,11 +573,8 @@ static int cx18_g_frequency(struct file *file, void *fh, return 0; } -int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) +int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf) { - struct cx18_open_id *id = file2id(file); - struct cx18 *cx = id->cx; - if (vf->tuner != 0) return -EINVAL; @@ -585,6 +585,12 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v return 0; } +static int cx18_s_frequency(struct file *file, void *fh, + const struct v4l2_frequency *vf) +{ + return cx18_do_s_frequency(file2id(file)->cx, vf); +} + static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct cx18 *cx = file2id(file)->cx; @@ -593,11 +599,8 @@ static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std) return 0; } -int cx18_s_std(struct file *file, void *fh, v4l2_std_id std) +int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std) { - struct cx18_open_id *id = file2id(file); - struct cx18 *cx = id->cx; - if ((std & V4L2_STD_ALL) == 0) return -EINVAL; @@ -642,6 +645,11 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std) return 0; } +static int cx18_s_std(struct file *file, void *fh, v4l2_std_id std) +{ + return cx18_do_s_std(file2id(file)->cx, std); +} + static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) { struct cx18_open_id *id = file2id(file); diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h index 97cd9f99e22d..42a8acd69735 100644 --- a/drivers/media/pci/cx18/cx18-ioctl.h +++ b/drivers/media/pci/cx18/cx18-ioctl.h @@ -12,6 +12,8 @@ u16 cx18_service2vbi(int type); void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal); u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt); void cx18_set_funcs(struct video_device *vdev); -int cx18_s_std(struct file *file, void *fh, v4l2_std_id std); -int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf); -int cx18_s_input(struct file *file, void *fh, unsigned int inp); + +struct cx18; +int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std); +int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf); +int cx18_do_s_input(struct cx18 *cx, unsigned int inp); diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 72a8f76a41f4..459eb6cc370c 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c @@ -1247,15 +1247,12 @@ err: int ivtv_init_on_first_open(struct ivtv *itv) { - struct v4l2_frequency vf; /* Needed to call ioctls later */ - struct ivtv_open_id fh; + struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG]; + struct v4l2_frequency vf; int fw_retry_count = 3; int video_input; - fh.itv = itv; - fh.type = IVTV_ENC_STREAM_TYPE_MPG; - if (test_bit(IVTV_F_I_FAILED, &itv->i_flags)) return -ENXIO; @@ -1297,13 +1294,13 @@ int ivtv_init_on_first_open(struct ivtv *itv) video_input = itv->active_input; itv->active_input++; /* Force update of input */ - ivtv_s_input(NULL, &fh, video_input); + ivtv_do_s_input(itv, video_input); /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code in one place. */ itv->std++; /* Force full standard initialization */ itv->std_out = itv->std; - ivtv_s_frequency(NULL, &fh, &vf); + ivtv_do_s_frequency(s, &vf); if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c index 84c73bd22f2d..8d5ea3aec06f 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c @@ -974,9 +974,8 @@ static int ivtv_g_input(struct file *file, void *fh, unsigned int *i) return 0; } -int ivtv_s_input(struct file *file, void *fh, unsigned int inp) +int ivtv_do_s_input(struct ivtv *itv, unsigned int inp) { - struct ivtv *itv = file2id(file)->itv; v4l2_std_id std; int i; @@ -1017,6 +1016,11 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp) return 0; } +static int ivtv_s_input(struct file *file, void *fh, unsigned int inp) +{ + return ivtv_do_s_input(file2id(file)->itv, inp); +} + static int ivtv_g_output(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = file2id(file)->itv; @@ -1065,10 +1069,9 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency * return 0; } -int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) +int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf) { - struct ivtv *itv = file2id(file)->itv; - struct ivtv_stream *s = &itv->streams[file2id(file)->type]; + struct ivtv *itv = s->itv; if (s->vdev.vfl_dir) return -ENOTTY; @@ -1082,6 +1085,15 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v return 0; } +static int ivtv_s_frequency(struct file *file, void *fh, + const struct v4l2_frequency *vf) +{ + struct ivtv_open_id *id = file2id(file); + struct ivtv *itv = id->itv; + + return ivtv_do_s_frequency(&itv->streams[id->type], vf); +} + static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct ivtv *itv = file2id(file)->itv; diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.h b/drivers/media/pci/ivtv/ivtv-ioctl.h index 7f8c6f43d397..96ca7e2ef973 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.h +++ b/drivers/media/pci/ivtv/ivtv-ioctl.h @@ -9,6 +9,8 @@ #ifndef IVTV_IOCTL_H #define IVTV_IOCTL_H +struct ivtv; + u16 ivtv_service2vbi(int type); void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal); u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt); @@ -17,7 +19,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed); void ivtv_set_funcs(struct video_device *vdev); void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std); void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std); -int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf); -int ivtv_s_input(struct file *file, void *fh, unsigned int inp); +int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf); +int ivtv_do_s_input(struct ivtv *itv, unsigned int inp); #endif diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index fb6afb8e84f0..ee4f54d68349 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -167,13 +167,26 @@ static struct uvc_entity *uvc_entity_by_reference(struct uvc_device *dev, static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id) { - struct uvc_streaming *stream; + struct uvc_streaming *stream, *last_stream; + unsigned int count = 0; list_for_each_entry(stream, &dev->streams, list) { + count += 1; + last_stream = stream; if (stream->header.bTerminalLink == id) return stream; } + /* + * If the streaming entity is referenced by an invalid ID, notify the + * user and use heuristics to guess the correct entity. + */ + if (count == 1 && id == UVC_INVALID_ENTITY_ID) { + dev_warn(&dev->intf->dev, + "UVC non compliance: Invalid USB header. The streaming entity has an invalid ID, guessing the correct one."); + return last_stream; + } + return NULL; } diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 1da953629010..25e66bf18f5f 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -2608,7 +2608,7 @@ EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming); int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd) { #if IS_REACHABLE(CONFIG_LEDS_CLASS) - sd->privacy_led = led_get(sd->dev, "privacy-led"); + sd->privacy_led = led_get(sd->dev, "privacy"); if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT) return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led), "getting privacy LED\n"); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c916176bd9f0..72fb675a696f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req) return nvme_pci_setup_data_prp(req, &iter); } -static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) +static blk_status_t nvme_pci_setup_meta_iter(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; unsigned int entries = req->nr_integrity_segments; @@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) * descriptor provides an explicit length, so we're relying on that * mechanism to catch any misunderstandings between the application and * device. + * + * P2P DMA also needs to use the blk_dma_iter method, so mptr setup + * leverages this routine when that happens. */ - if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || + (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) { iod->cmd.common.metadata = cpu_to_le64(iter.addr); iod->meta_total_len = iter.len; iod->meta_dma = iter.addr; @@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req) struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct bio_vec bv = rq_integrity_vec(req); + if (is_pci_p2pdma_page(bv.bv_page)) + return nvme_pci_setup_meta_iter(req); + iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) return BLK_STS_IOERR; @@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req) if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && nvme_pci_metadata_use_sgls(req)) - return nvme_pci_setup_meta_sgls(req); + return nvme_pci_setup_meta_iter(req); return nvme_pci_setup_meta_mptr(req); } diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index b340380f3892..ceba21684e82 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, const char *hash_name; u8 *challenge = req->sq->dhchap_c1; struct nvme_dhchap_key *transformed_key; - u8 buf[4]; + u8 buf[4], sc_c = ctrl->concat ? 1 : 0; int ret; hash_name = nvme_auth_hmac_name(ctrl->shash_id); @@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; - memset(buf, 0, 4); + *buf = sc_c; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "HostHost", 8); if (ret) goto out; + memset(buf, 0, 4); ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); if (ret) goto out; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 6948824642dc..c48a20602d7f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -247,6 +247,7 @@ struct qcom_pcie_ops { int (*get_resources)(struct qcom_pcie *pcie); int (*init)(struct qcom_pcie *pcie); int (*post_init)(struct qcom_pcie *pcie); + void (*host_post_init)(struct qcom_pcie *pcie); void (*deinit)(struct qcom_pcie *pcie); void (*ltssm_enable)(struct qcom_pcie *pcie); int (*config_sid)(struct qcom_pcie *pcie); @@ -1038,6 +1039,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) return 0; } +static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) +{ + /* + * Downstream devices need to be in D0 state before enabling PCI PM + * substates. + */ + pci_set_power_state_locked(pdev, PCI_D0); + pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); + + return 0; +} + +static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) +{ + struct dw_pcie_rp *pp = &pcie->pci->pp; + + pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); +} + static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; @@ -1312,9 +1332,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) pcie->cfg->ops->deinit(pcie); } +static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct qcom_pcie *pcie = to_qcom_pcie(pci); + + if (pcie->cfg->ops->host_post_init) + pcie->cfg->ops->host_post_init(pcie); +} + static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { .init = qcom_pcie_host_init, .deinit = qcom_pcie_host_deinit, + .post_init = qcom_pcie_host_post_init, }; /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ @@ -1376,6 +1406,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, + .host_post_init = qcom_pcie_host_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, .config_sid = qcom_pcie_config_sid_1_9_0, @@ -1386,6 +1417,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, .post_init = qcom_pcie_post_init_2_7_0, + .host_post_init = qcom_pcie_host_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4a8735b275e4..3645f392a9fd 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1604,7 +1604,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); - if (pref) { + if (pref && (pref->flags & IORESOURCE_PREFETCH)) { pbus_size_mem(bus, IORESOURCE_MEM | IORESOURCE_PREFETCH | (pref->flags & IORESOURCE_MEM_64), diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 46e62feeda3c..c122016d82f1 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -432,7 +432,7 @@ config WIRELESS_HOTKEY depends on INPUT help This driver provides supports for the wireless buttons found on some AMD, - HP, & Xioami laptops. + HP, & Xiaomi laptops. On such systems the driver should load automatically (via ACPI alias). To compile this driver as a module, choose M here: the module will diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c index 841a5414d28a..28076929d6af 100644 --- a/drivers/platform/x86/dell/dell-wmi-base.c +++ b/drivers/platform/x86/dell/dell-wmi-base.c @@ -365,6 +365,13 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = { /* Backlight brightness change event */ { KE_IGNORE, 0x0003, { KEY_RESERVED } }, + /* + * Electronic privacy screen toggled, extended data gives state, + * separate entries for on/off see handling in dell_wmi_process_key(). + */ + { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_OFF } }, + { KE_KEY, 0x000c, { KEY_EPRIVACY_SCREEN_ON } }, + /* Ultra-performance mode switch request */ { KE_IGNORE, 0x000d, { KEY_RESERVED } }, @@ -435,6 +442,11 @@ static int dell_wmi_process_key(struct wmi_device *wdev, int type, int code, u16 "Dell tablet mode switch", SW_TABLET_MODE, !buffer[0]); return 1; + } else if (type == 0x0012 && code == 0x000c && remaining > 0) { + /* Eprivacy toggle, switch to "on" key entry for on events */ + if (buffer[0] == 2) + key++; + used = 1; } else if (type == 0x0012 && code == 0x000d && remaining > 0) { value = (buffer[2] == 2); used = 1; diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c index 476ec24d3702..9e052b164a1a 100644 --- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c +++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c @@ -245,15 +245,12 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472, if (IS_ERR(regulator->rdev)) return PTR_ERR(regulator->rdev); - int3472->regulators[int3472->n_regulator_gpios].ena_gpio = gpio; int3472->n_regulator_gpios++; return 0; } void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472) { - for (int i = 0; i < int3472->n_regulator_gpios; i++) { + for (int i = 0; i < int3472->n_regulator_gpios; i++) regulator_unregister(int3472->regulators[i].rdev); - gpiod_put(int3472->regulators[i].ena_gpio); - } } diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c index f1d6d7b0cb75..b1d84b968112 100644 --- a/drivers/platform/x86/intel/int3472/led.c +++ b/drivers/platform/x86/intel/int3472/led.c @@ -43,7 +43,7 @@ int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gp int3472->pled.lookup.provider = int3472->pled.name; int3472->pled.lookup.dev_id = int3472->sensor_name; - int3472->pled.lookup.con_id = "privacy-led"; + int3472->pled.lookup.con_id = "privacy"; led_add_lookup(&int3472->pled.lookup); return 0; diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 022d98f3c32a..ea9c4058ee6a 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1613,6 +1613,8 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np, step /= r1; new[j].min = min; + new[j].min_sel = desc->linear_ranges[j].min_sel; + new[j].max_sel = desc->linear_ranges[j].max_sel; new[j].step = step; dev_dbg(dev, "%s: old range min %d, step %d\n", diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index cc5d05dc395c..17173239301e 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -611,8 +611,9 @@ int scsi_host_busy(struct Scsi_Host *shost) { int cnt = 0; - blk_mq_tagset_busy_iter(&shost->tag_set, - scsi_host_check_in_flight, &cnt); + if (shost->tag_set.ops) + blk_mq_tagset_busy_iter(&shost->tag_set, + scsi_host_check_in_flight, &cnt); return cnt; } EXPORT_SYMBOL(scsi_host_busy); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 746ff6a1f309..1c13812a3f03 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -554,9 +554,9 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) * happened, even if someone else gets the sense data. */ if (sshdr.asc == 0x28) - scmd->device->ua_new_media_ctr++; + atomic_inc(&sdev->ua_new_media_ctr); else if (sshdr.asc == 0x29) - scmd->device->ua_por_ctr++; + atomic_inc(&sdev->ua_por_ctr); } if (scsi_sense_is_deferred(&sshdr)) diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 7765fb27c37c..b8c572394aac 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -80,6 +80,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x5825), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 8339fec975b9..9ca27de4767a 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -4282,8 +4282,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, get, UIC_GET_ATTR_ID(attr_sel), UFS_UIC_COMMAND_RETRIES - retries); - if (mib_val && !ret) - *mib_val = uic_cmd.argument3; + if (mib_val) + *mib_val = ret == 0 ? uic_cmd.argument3 : 0; if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) && pwr_mode_change) @@ -4999,7 +4999,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_enable); static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) { - int tx_lanes = 0, i, err = 0; + int tx_lanes, i, err = 0; if (!peer) ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), @@ -6673,6 +6673,20 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_uic_err, hba->force_reset, ufshcd_is_link_broken(hba) ? "; link is broken" : ""); + /* + * Use ufshcd_rpm_get_noresume() here to safely perform link recovery + * even if an error occurs during runtime suspend or runtime resume. + * This avoids potential deadlocks that could happen if we tried to + * resume the device while a PM operation is already in progress. + */ + ufshcd_rpm_get_noresume(hba); + if (hba->pm_op_in_progress) { + ufshcd_link_recovery(hba); + ufshcd_rpm_put(hba); + return; + } + ufshcd_rpm_put(hba); + down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); if (ufshcd_err_handling_should_stop(hba)) { @@ -6684,14 +6698,6 @@ static void ufshcd_err_handler(struct work_struct *work) } spin_unlock_irqrestore(hba->host->host_lock, flags); - ufshcd_rpm_get_noresume(hba); - if (hba->pm_op_in_progress) { - ufshcd_link_recovery(hba); - ufshcd_rpm_put(hba); - return; - } - ufshcd_rpm_put(hba); - ufshcd_err_handling_prepare(hba); spin_lock_irqsave(hba->host->host_lock, flags); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 916cad80941c..5167bec14e36 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -38,6 +38,7 @@ #include <linux/workqueue.h> #include <linux/notifier.h> #include <linux/mm_inline.h> +#include <linux/overflow.h> #include "vfio.h" #define DRIVER_VERSION "0.2" @@ -167,12 +168,14 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, { struct rb_node *node = iommu->dma_list.rb_node; + WARN_ON(!size); + while (node) { struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); - if (start + size <= dma->iova) + if (start + size - 1 < dma->iova) node = node->rb_left; - else if (start >= dma->iova + dma->size) + else if (start > dma->iova + dma->size - 1) node = node->rb_right; else return dma; @@ -182,16 +185,19 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, } static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, - dma_addr_t start, u64 size) + dma_addr_t start, + dma_addr_t end) { struct rb_node *res = NULL; struct rb_node *node = iommu->dma_list.rb_node; struct vfio_dma *dma_res = NULL; + WARN_ON(end < start); + while (node) { struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); - if (start < dma->iova + dma->size) { + if (start <= dma->iova + dma->size - 1) { res = node; dma_res = dma; if (start >= dma->iova) @@ -201,7 +207,7 @@ static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, node = node->rb_right; } } - if (res && size && dma_res->iova >= start + size) + if (res && dma_res->iova > end) res = NULL; return res; } @@ -211,11 +217,13 @@ static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; struct vfio_dma *dma; + WARN_ON(new->size != 0); + while (*link) { parent = *link; dma = rb_entry(parent, struct vfio_dma, node); - if (new->iova + new->size <= dma->iova) + if (new->iova <= dma->iova) link = &(*link)->rb_left; else link = &(*link)->rb_right; @@ -895,14 +903,20 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, unsigned long remote_vaddr; struct vfio_dma *dma; bool do_accounting; + dma_addr_t iova_end; + size_t iova_size; - if (!iommu || !pages) + if (!iommu || !pages || npage <= 0) return -EINVAL; /* Supported for v2 version only */ if (!iommu->v2) return -EACCES; + if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) || + check_add_overflow(user_iova, iova_size - 1, &iova_end)) + return -EOVERFLOW; + mutex_lock(&iommu->lock); if (WARN_ONCE(iommu->vaddr_invalid_count, @@ -1008,12 +1022,21 @@ static void vfio_iommu_type1_unpin_pages(void *iommu_data, { struct vfio_iommu *iommu = iommu_data; bool do_accounting; + dma_addr_t iova_end; + size_t iova_size; int i; /* Supported for v2 version only */ if (WARN_ON(!iommu->v2)) return; + if (WARN_ON(npage <= 0)) + return; + + if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) || + check_add_overflow(user_iova, iova_size - 1, &iova_end))) + return; + mutex_lock(&iommu->lock); do_accounting = list_empty(&iommu->domain_list); @@ -1067,7 +1090,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain, #define VFIO_IOMMU_TLB_SYNC_MAX 512 static size_t unmap_unpin_fast(struct vfio_domain *domain, - struct vfio_dma *dma, dma_addr_t *iova, + struct vfio_dma *dma, dma_addr_t iova, size_t len, phys_addr_t phys, long *unlocked, struct list_head *unmapped_list, int *unmapped_cnt, @@ -1077,18 +1100,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (entry) { - unmapped = iommu_unmap_fast(domain->domain, *iova, len, + unmapped = iommu_unmap_fast(domain->domain, iova, len, iotlb_gather); if (!unmapped) { kfree(entry); } else { - entry->iova = *iova; + entry->iova = iova; entry->phys = phys; entry->len = unmapped; list_add_tail(&entry->list, unmapped_list); - *iova += unmapped; (*unmapped_cnt)++; } } @@ -1107,18 +1129,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain, } static size_t unmap_unpin_slow(struct vfio_domain *domain, - struct vfio_dma *dma, dma_addr_t *iova, + struct vfio_dma *dma, dma_addr_t iova, size_t len, phys_addr_t phys, long *unlocked) { - size_t unmapped = iommu_unmap(domain->domain, *iova, len); + size_t unmapped = iommu_unmap(domain->domain, iova, len); if (unmapped) { - *unlocked += vfio_unpin_pages_remote(dma, *iova, + *unlocked += vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, unmapped >> PAGE_SHIFT, false); - *iova += unmapped; cond_resched(); } return unmapped; @@ -1127,12 +1148,12 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain, static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, bool do_accounting) { - dma_addr_t iova = dma->iova, end = dma->iova + dma->size; struct vfio_domain *domain, *d; LIST_HEAD(unmapped_region_list); struct iommu_iotlb_gather iotlb_gather; int unmapped_region_cnt = 0; long unlocked = 0; + size_t pos = 0; if (!dma->size) return 0; @@ -1156,13 +1177,14 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, } iommu_iotlb_gather_init(&iotlb_gather); - while (iova < end) { + while (pos < dma->size) { size_t unmapped, len; phys_addr_t phys, next; + dma_addr_t iova = dma->iova + pos; phys = iommu_iova_to_phys(domain->domain, iova); if (WARN_ON(!phys)) { - iova += PAGE_SIZE; + pos += PAGE_SIZE; continue; } @@ -1171,7 +1193,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, * may require hardware cache flushing, try to find the * largest contiguous physical memory chunk to unmap. */ - for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) { + for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) { next = iommu_iova_to_phys(domain->domain, iova + len); if (next != phys + len) break; @@ -1181,16 +1203,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, * First, try to use fast unmap/unpin. In case of failure, * switch to slow unmap/unpin path. */ - unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, + unmapped = unmap_unpin_fast(domain, dma, iova, len, phys, &unlocked, &unmapped_region_list, &unmapped_region_cnt, &iotlb_gather); if (!unmapped) { - unmapped = unmap_unpin_slow(domain, dma, &iova, len, + unmapped = unmap_unpin_slow(domain, dma, iova, len, phys, &unlocked); if (WARN_ON(!unmapped)) break; } + + pos += unmapped; } dma->iommu_mapped = false; @@ -1282,7 +1306,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, } static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, - dma_addr_t iova, size_t size, size_t pgsize) + dma_addr_t iova, dma_addr_t iova_end, size_t pgsize) { struct vfio_dma *dma; struct rb_node *n; @@ -1299,8 +1323,8 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, if (dma && dma->iova != iova) return -EINVAL; - dma = vfio_find_dma(iommu, iova + size - 1, 0); - if (dma && dma->iova + dma->size != iova + size) + dma = vfio_find_dma(iommu, iova_end, 1); + if (dma && dma->iova + dma->size - 1 != iova_end) return -EINVAL; for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { @@ -1309,7 +1333,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, if (dma->iova < iova) continue; - if (dma->iova > iova + size - 1) + if (dma->iova > iova_end) break; ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); @@ -1374,7 +1398,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, int ret = -EINVAL, retries = 0; unsigned long pgshift; dma_addr_t iova = unmap->iova; - u64 size = unmap->size; + dma_addr_t iova_end; + size_t size = unmap->size; bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL; bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR; struct rb_node *n, *first_n; @@ -1387,6 +1412,11 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, goto unlock; } + if (iova != unmap->iova || size != unmap->size) { + ret = -EOVERFLOW; + goto unlock; + } + pgshift = __ffs(iommu->pgsize_bitmap); pgsize = (size_t)1 << pgshift; @@ -1396,10 +1426,15 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, if (unmap_all) { if (iova || size) goto unlock; - size = U64_MAX; - } else if (!size || size & (pgsize - 1) || - iova + size - 1 < iova || size > SIZE_MAX) { - goto unlock; + iova_end = ~(dma_addr_t)0; + } else { + if (!size || size & (pgsize - 1)) + goto unlock; + + if (check_add_overflow(iova, size - 1, &iova_end)) { + ret = -EOVERFLOW; + goto unlock; + } } /* When dirty tracking is enabled, allow only min supported pgsize */ @@ -1446,17 +1481,17 @@ again: if (dma && dma->iova != iova) goto unlock; - dma = vfio_find_dma(iommu, iova + size - 1, 0); - if (dma && dma->iova + dma->size != iova + size) + dma = vfio_find_dma(iommu, iova_end, 1); + if (dma && dma->iova + dma->size - 1 != iova_end) goto unlock; } ret = 0; - n = first_n = vfio_find_dma_first_node(iommu, iova, size); + n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end); while (n) { dma = rb_entry(n, struct vfio_dma, node); - if (dma->iova >= iova + size) + if (dma->iova > iova_end) break; if (!iommu->v2 && iova > dma->iova) @@ -1648,7 +1683,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, { bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR; dma_addr_t iova = map->iova; + dma_addr_t iova_end; unsigned long vaddr = map->vaddr; + unsigned long vaddr_end; size_t size = map->size; int ret = 0, prot = 0; size_t pgsize; @@ -1656,8 +1693,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, /* Verify that none of our __u64 fields overflow */ if (map->size != size || map->vaddr != vaddr || map->iova != iova) + return -EOVERFLOW; + + if (!size) return -EINVAL; + if (check_add_overflow(iova, size - 1, &iova_end) || + check_add_overflow(vaddr, size - 1, &vaddr_end)) + return -EOVERFLOW; + /* READ/WRITE from device perspective */ if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) prot |= IOMMU_WRITE; @@ -1673,13 +1717,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, WARN_ON((pgsize - 1) & PAGE_MASK); - if (!size || (size | iova | vaddr) & (pgsize - 1)) { - ret = -EINVAL; - goto out_unlock; - } - - /* Don't allow IOVA or virtual address wrap */ - if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { + if ((size | iova | vaddr) & (pgsize - 1)) { ret = -EINVAL; goto out_unlock; } @@ -1710,7 +1748,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; } - if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { + if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) { ret = -EINVAL; goto out_unlock; } @@ -1783,12 +1821,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, for (; n; n = rb_next(n)) { struct vfio_dma *dma; - dma_addr_t iova; + size_t pos = 0; dma = rb_entry(n, struct vfio_dma, node); - iova = dma->iova; - while (iova < dma->iova + dma->size) { + while (pos < dma->size) { + dma_addr_t iova = dma->iova + pos; phys_addr_t phys; size_t size; @@ -1804,14 +1842,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { - iova += PAGE_SIZE; + pos += PAGE_SIZE; continue; } size = PAGE_SIZE; p = phys + size; i = iova + size; - while (i < dma->iova + dma->size && + while (pos + size < dma->size && p == iommu_iova_to_phys(d->domain, i)) { size += PAGE_SIZE; p += PAGE_SIZE; @@ -1819,9 +1857,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, } } else { unsigned long pfn; - unsigned long vaddr = dma->vaddr + - (iova - dma->iova); - size_t n = dma->iova + dma->size - iova; + unsigned long vaddr = dma->vaddr + pos; + size_t n = dma->size - pos; long npage; npage = vfio_pin_pages_remote(dma, vaddr, @@ -1852,7 +1889,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, goto unwind; } - iova += size; + pos += size; } } @@ -1869,29 +1906,29 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, unwind: for (; n; n = rb_prev(n)) { struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); - dma_addr_t iova; + size_t pos = 0; if (dma->iommu_mapped) { iommu_unmap(domain->domain, dma->iova, dma->size); continue; } - iova = dma->iova; - while (iova < dma->iova + dma->size) { + while (pos < dma->size) { + dma_addr_t iova = dma->iova + pos; phys_addr_t phys, p; size_t size; dma_addr_t i; phys = iommu_iova_to_phys(domain->domain, iova); if (!phys) { - iova += PAGE_SIZE; + pos += PAGE_SIZE; continue; } size = PAGE_SIZE; p = phys + size; i = iova + size; - while (i < dma->iova + dma->size && + while (pos + size < dma->size && p == iommu_iova_to_phys(domain->domain, i)) { size += PAGE_SIZE; p += PAGE_SIZE; @@ -2977,7 +3014,8 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, struct vfio_iommu_type1_dirty_bitmap_get range; unsigned long pgshift; size_t data_size = dirty.argsz - minsz; - size_t iommu_pgsize; + size_t size, iommu_pgsize; + dma_addr_t iova, iova_end; if (!data_size || data_size < sizeof(range)) return -EINVAL; @@ -2986,14 +3024,24 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, sizeof(range))) return -EFAULT; - if (range.iova + range.size < range.iova) + iova = range.iova; + size = range.size; + + if (iova != range.iova || size != range.size) + return -EOVERFLOW; + + if (!size) return -EINVAL; + + if (check_add_overflow(iova, size - 1, &iova_end)) + return -EOVERFLOW; + if (!access_ok((void __user *)range.bitmap.data, range.bitmap.size)) return -EINVAL; pgshift = __ffs(range.bitmap.pgsize); - ret = verify_bitmap_size(range.size >> pgshift, + ret = verify_bitmap_size(size >> pgshift, range.bitmap.size); if (ret) return ret; @@ -3007,19 +3055,18 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, ret = -EINVAL; goto out_unlock; } - if (range.iova & (iommu_pgsize - 1)) { + if (iova & (iommu_pgsize - 1)) { ret = -EINVAL; goto out_unlock; } - if (!range.size || range.size & (iommu_pgsize - 1)) { + if (size & (iommu_pgsize - 1)) { ret = -EINVAL; goto out_unlock; } if (iommu->dirty_page_tracking) ret = vfio_iova_dirty_bitmap(range.bitmap.data, - iommu, range.iova, - range.size, + iommu, iova, iova_end, range.bitmap.pgsize); else ret = -EINVAL; diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 210fd3ac18a4..56ef1d88e003 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -2614,8 +2614,12 @@ static int aty_init(struct fb_info *info) pr_cont("\n"); } #endif - if (par->pll_ops->init_pll) - par->pll_ops->init_pll(info, &par->pll); + if (par->pll_ops->init_pll) { + ret = par->pll_ops->init_pll(info, &par->pll); + if (ret) + return ret; + } + if (par->pll_ops->resume_pll) par->pll_ops->resume_pll(info, &par->pll); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index a9ec7f488522..dc5ad3fcc7be 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -79,12 +79,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info, struct fb_image *image, u8 *buf, u8 *dst) { u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; + unsigned int charcnt = vc->vc_font.charcount; u32 idx = vc->vc_font.width >> 3; u8 *src; while (cnt--) { - src = vc->vc_font.data + (scr_readw(s++)& - charmask)*cellsize; + u16 ch = scr_readw(s++) & charmask; + + if (ch >= charcnt) + ch = 0; + src = vc->vc_font.data + (unsigned int)ch * cellsize; if (attr) { update_attr(buf, src, attr, vc); @@ -112,14 +116,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc, u8 *dst) { u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; + unsigned int charcnt = vc->vc_font.charcount; u32 shift_low = 0, mod = vc->vc_font.width % 8; u32 shift_high = 8; u32 idx = vc->vc_font.width >> 3; u8 *src; while (cnt--) { - src = vc->vc_font.data + (scr_readw(s++)& - charmask)*cellsize; + u16 ch = scr_readw(s++) & charmask; + + if (ch >= charcnt) + ch = 0; + src = vc->vc_font.data + (unsigned int)ch * cellsize; if (attr) { update_attr(buf, src, attr, vc); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 96cc9b389246..9bd3c3814b5c 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2810,6 +2810,25 @@ int fbcon_mode_deleted(struct fb_info *info, return found; } +static void fbcon_delete_mode(struct fb_videomode *m) +{ + struct fbcon_display *p; + + for (int i = first_fb_vc; i <= last_fb_vc; i++) { + p = &fb_display[i]; + if (p->mode == m) + p->mode = NULL; + } +} + +void fbcon_delete_modelist(struct list_head *head) +{ + struct fb_modelist *modelist; + + list_for_each_entry(modelist, head, list) + fbcon_delete_mode(&modelist->mode); +} + #ifdef CONFIG_VT_HW_CONSOLE_BINDING static void fbcon_unbind(void) { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 53f1719b1ae1..eff757ebbed1 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -544,6 +544,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info) fb_info->pixmap.addr = NULL; } + fbcon_delete_modelist(&fb_info->modelist); fb_destroy_modelist(&fb_info->modelist); registered_fb[fb_info->node] = NULL; num_registered_fb--; diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index cbdb1caf61bd..0b8d23c12b77 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -192,7 +192,7 @@ static unsigned long pvr2fb_map; #ifdef CONFIG_PVR2_DMA static unsigned int shdma = PVR2_CASCADE_CHAN; -static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; +static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS; #endif static struct fb_videomode pvr2_modedb[] = { diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c index 91d070ef6989..6ff059ee1694 100644 --- a/drivers/video/fbdev/valkyriefb.c +++ b/drivers/video/fbdev/valkyriefb.c @@ -329,11 +329,13 @@ static int __init valkyriefb_init(void) if (of_address_to_resource(dp, 0, &r)) { printk(KERN_ERR "can't find address for valkyrie\n"); + of_node_put(dp); return 0; } frame_buffer_phys = r.start; cmap_regs_phys = r.start + 0x304000; + of_node_put(dp); } #endif /* ppc (!CONFIG_MAC) */ diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 755ec6dfd51c..23273d0e6f22 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2228,6 +2228,14 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, wbc_account_cgroup_owner(wbc, folio, range_len); folio_unlock(folio); } + /* + * If the fs is already in error status, do not submit any writeback + * but immediately finish it. + */ + if (unlikely(BTRFS_FS_ERROR(fs_info))) { + btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info))); + return; + } btrfs_submit_bbio(bbio, 0); } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7efd1f8a1912..fa82def46e39 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2854,12 +2854,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode, { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; + u64 range_start; + u64 range_end; int ret; int ret2; if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode)) return 0; + range_start = round_down(i_size_read(inode), root->fs_info->sectorsize); + range_end = round_up(end, root->fs_info->sectorsize); + + ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start, + range_end - range_start); + if (ret) + return ret; + trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b1b3a0553ee..3df5f36185a0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6873,7 +6873,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, BTRFS_I(inode)->dir_index = 0ULL; inode_inc_iversion(inode); inode_set_ctime_current(inode); - set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), &fname.disk_name, 1, index); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1175b8192cd7..31ad8580322a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1539,8 +1539,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst ASSERT(prealloc); /* Check the level of src and dst first */ - if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) + if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) { + kfree(prealloc); return -EINVAL; + } mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 621e0df097e3..c90b2d2cb08f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -7910,6 +7910,9 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans, bool log_pinned = false; int ret; + /* The inode has a new name (ref/extref), so make sure we log it. */ + set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); + btrfs_init_log_ctx(&ctx, inode); ctx.logging_new_name = true; diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 5dee7c498bc8..ed6e926226b5 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -333,8 +333,7 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, inode = mapping->host; *inode_ret = inode; - *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) + - (bh_offset(bh) >> inode->i_blkbits); + *lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits; return true; } diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 4f959f1e08d2..185ac41bd7e9 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -173,7 +173,7 @@ module_param(enable_oplocks, bool, 0644); MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); module_param(enable_gcm_256, bool, 0644); -MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); +MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1"); module_param(require_gcm_256, bool, 0644); MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index fb1813cbe0eb..3528c365a452 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -616,6 +616,8 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16, extern struct TCP_Server_Info * cifs_find_tcp_session(struct smb3_fs_context *ctx); +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal); + void __cifs_put_smb_ses(struct cifs_ses *ses); extern struct cifs_ses * diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index dd12f3eb61dc..55cb4b0cbd48 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -310,6 +310,8 @@ cifs_abort_connection(struct TCP_Server_Info *server) server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; + } else if (cifs_rdma_enabled(server)) { + smbd_destroy(server); } server->sequence_number = 0; server->session_estab = false; @@ -338,12 +340,6 @@ cifs_abort_connection(struct TCP_Server_Info *server) mid_execute_callback(mid); release_mid(mid); } - - if (cifs_rdma_enabled(server)) { - cifs_server_lock(server); - smbd_destroy(server); - cifs_server_unlock(server); - } } static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) @@ -2015,39 +2011,31 @@ static int match_session(struct cifs_ses *ses, /** * cifs_setup_ipc - helper to setup the IPC tcon for the session * @ses: smb session to issue the request on - * @ctx: the superblock configuration context to use for building the - * new tree connection for the IPC (interprocess communication RPC) + * @seal: if encryption is requested * * A new IPC connection is made and stored in the session * tcon_ipc. The IPC tcon has the same lifetime as the session. */ -static int -cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal) { int rc = 0, xid; struct cifs_tcon *tcon; char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; - bool seal = false; struct TCP_Server_Info *server = ses->server; /* * If the mount request that resulted in the creation of the * session requires encryption, force IPC to be encrypted too. */ - if (ctx->seal) { - if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) - seal = true; - else { - cifs_server_dbg(VFS, - "IPC: server doesn't support encryption\n"); - return -EOPNOTSUPP; - } + if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) { + cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n"); + return ERR_PTR(-EOPNOTSUPP); } /* no need to setup directory caching on IPC share, so pass in false */ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc); if (tcon == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); spin_lock(&server->srv_lock); scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); @@ -2057,13 +2045,13 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) tcon->ses = ses; tcon->ipc = true; tcon->seal = seal; - rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); + rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls); free_xid(xid); if (rc) { - cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); + cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc); tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail); - goto out; + return ERR_PTR(rc); } cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); @@ -2071,9 +2059,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) spin_lock(&tcon->tc_lock); tcon->status = TID_GOOD; spin_unlock(&tcon->tc_lock); - ses->tcon_ipc = tcon; -out: - return rc; + return tcon; } static struct cifs_ses * @@ -2347,6 +2333,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; + struct cifs_tcon *ipc; struct cifs_ses *ses; unsigned int xid; int retries = 0; @@ -2525,7 +2512,12 @@ retry_new_session: list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); - cifs_setup_ipc(ses, ctx); + ipc = cifs_setup_ipc(ses, ctx->seal); + spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); + ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL; + spin_unlock(&ses->ses_lock); + spin_unlock(&cifs_tcp_ses_lock); free_xid(xid); diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c index 4dada26d56b5..f2ad0ccd08a7 100644 --- a/fs/smb/client/dfs_cache.c +++ b/fs/smb/client/dfs_cache.c @@ -1120,24 +1120,63 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1) return match; } -static bool is_ses_good(struct cifs_ses *ses) +static bool is_ses_good(struct cifs_tcon *tcon, struct cifs_ses *ses) { struct TCP_Server_Info *server = ses->server; - struct cifs_tcon *tcon = ses->tcon_ipc; + struct cifs_tcon *ipc = NULL; bool ret; + spin_lock(&cifs_tcp_ses_lock); spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); + ret = !cifs_chan_needs_reconnect(ses, server) && - ses->ses_status == SES_GOOD && - !tcon->need_reconnect; + ses->ses_status == SES_GOOD; + spin_unlock(&ses->chan_lock); + + if (!ret) + goto out; + + if (likely(ses->tcon_ipc)) { + if (ses->tcon_ipc->need_reconnect) { + ret = false; + goto out; + } + } else { + spin_unlock(&ses->ses_lock); + spin_unlock(&cifs_tcp_ses_lock); + + ipc = cifs_setup_ipc(ses, tcon->seal); + + spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); + if (!IS_ERR(ipc)) { + if (!ses->tcon_ipc) { + ses->tcon_ipc = ipc; + ipc = NULL; + } + } else { + ret = false; + ipc = NULL; + } + } + +out: spin_unlock(&ses->ses_lock); + spin_unlock(&cifs_tcp_ses_lock); + if (ipc && server->ops->tree_disconnect) { + unsigned int xid = get_xid(); + + (void)server->ops->tree_disconnect(xid, ipc); + _free_xid(xid); + } + tconInfoFree(ipc, netfs_trace_tcon_ref_free_ipc); return ret; } /* Refresh dfs referral of @ses */ -static void refresh_ses_referral(struct cifs_ses *ses) +static void refresh_ses_referral(struct cifs_tcon *tcon, struct cifs_ses *ses) { struct cache_entry *ce; unsigned int xid; @@ -1153,7 +1192,7 @@ static void refresh_ses_referral(struct cifs_ses *ses) } ses = CIFS_DFS_ROOT_SES(ses); - if (!is_ses_good(ses)) { + if (!is_ses_good(tcon, ses)) { cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__); goto out; @@ -1241,7 +1280,7 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh) up_read(&htable_rw_lock); ses = CIFS_DFS_ROOT_SES(ses); - if (!is_ses_good(ses)) { + if (!is_ses_good(tcon, ses)) { cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__); goto out; @@ -1309,7 +1348,7 @@ void dfs_cache_refresh(struct work_struct *work) tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work); list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) - refresh_ses_referral(ses); + refresh_ses_referral(tcon, ses); refresh_tcon_referral(tcon, false); queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 0f9130ef2e7d..1e39f2165e42 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -2799,11 +2799,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid fid; int rc; __le16 *utf16_path; - struct cached_fid *cfid = NULL; + struct cached_fid *cfid; int retries = 0, cur_sleep = 1; replay_again: /* reinitialize for possible replay */ + cfid = NULL; flags = CIFS_CP_CREATE_CLOSE_OP; oplock = SMB2_OPLOCK_LEVEL_NONE; server = cifs_pick_channel(ses); diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h index d36a6ae0abe5..d4fcf591e63d 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.h +++ b/fs/xfs/libxfs/xfs_rtgroup.h @@ -50,6 +50,12 @@ struct xfs_rtgroup { uint8_t *rtg_rsum_cache; struct xfs_open_zone *rtg_open_zone; }; + + /* + * Count of outstanding GC operations for zoned XFS. Any RTG with a + * non-zero rtg_gccount will not be picked as new GC victim. + */ + atomic_t rtg_gccount; }; /* diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 23cdab4515bb..040402240807 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -246,6 +246,14 @@ xfs_zoned_map_extent( * If a data write raced with this GC write, keep the existing data in * the data fork, mark our newly written GC extent as reclaimable, then * move on to the next extent. + * + * Note that this can also happen when racing with operations that do + * not actually invalidate the data, but just move it to a different + * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the + * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE). If the + * data was just moved around, GC fails to free the zone, but the zone + * becomes a GC candidate again as soon as all previous GC I/O has + * finished and these blocks will be moved out eventually. */ if (old_startblock != NULLFSBLOCK && old_startblock != data.br_startblock) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 109877d9a6bf..4ade54445532 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -114,6 +114,8 @@ struct xfs_gc_bio { /* Open Zone being written to */ struct xfs_open_zone *oz; + struct xfs_rtgroup *victim_rtg; + /* Bio used for reads and writes, including the bvec used by it */ struct bio_vec bv; struct bio bio; /* must be last */ @@ -264,6 +266,7 @@ xfs_zone_gc_iter_init( iter->rec_count = 0; iter->rec_idx = 0; iter->victim_rtg = victim_rtg; + atomic_inc(&victim_rtg->rtg_gccount); } /* @@ -362,6 +365,7 @@ xfs_zone_gc_query( return 0; done: + atomic_dec(&iter->victim_rtg->rtg_gccount); xfs_rtgroup_rele(iter->victim_rtg); iter->victim_rtg = NULL; return 0; @@ -451,6 +455,20 @@ xfs_zone_gc_pick_victim_from( if (!rtg) continue; + /* + * If the zone is already undergoing GC, don't pick it again. + * + * This prevents us from picking one of the zones for which we + * already submitted GC I/O, but for which the remapping hasn't + * concluded yet. This won't cause data corruption, but + * increases write amplification and slows down GC, so this is + * a bad thing. + */ + if (atomic_read(&rtg->rtg_gccount)) { + xfs_rtgroup_rele(rtg); + continue; + } + /* skip zones that are just waiting for a reset */ if (rtg_rmap(rtg)->i_used_blocks == 0 || rtg_rmap(rtg)->i_used_blocks >= victim_used) { @@ -688,6 +706,9 @@ xfs_zone_gc_start_chunk( chunk->scratch = &data->scratch[data->scratch_idx]; chunk->data = data; chunk->oz = oz; + chunk->victim_rtg = iter->victim_rtg; + atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); + atomic_inc(&chunk->victim_rtg->rtg_gccount); bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); bio->bi_end_io = xfs_zone_gc_end_io; @@ -710,6 +731,8 @@ static void xfs_zone_gc_free_chunk( struct xfs_gc_bio *chunk) { + atomic_dec(&chunk->victim_rtg->rtg_gccount); + xfs_rtgroup_rele(chunk->victim_rtg); list_del(&chunk->entry); xfs_open_zone_put(chunk->oz); xfs_irele(chunk->ip); @@ -770,6 +793,10 @@ xfs_zone_gc_split_write( split_chunk->oz = chunk->oz; atomic_inc(&chunk->oz->oz_ref); + split_chunk->victim_rtg = chunk->victim_rtg; + atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); + atomic_inc(&chunk->victim_rtg->rtg_gccount); + chunk->offset += split_len; chunk->len -= split_len; chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a9a2e732a65..e04d56a5332e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -832,7 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) /* Required sections not related to debugging. */ #define ELF_DETAILS \ - .modinfo : { *(.modinfo) } \ + .modinfo : { *(.modinfo) . = ALIGN(8); } \ .comment 0 : { *(.comment) } \ .symtab 0 : { *(.symtab) } \ .strtab 0 : { *(.strtab) } \ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 8e8d1cc8b06c..44c30183ecc3 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -341,15 +341,15 @@ enum req_op { /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, /* Open a zone */ - REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, + REQ_OP_ZONE_OPEN = (__force blk_opf_t)11, /* Close a zone */ - REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, + REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13, /* Transition a zone to full */ - REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, + REQ_OP_ZONE_FINISH = (__force blk_opf_t)15, /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = (__force blk_opf_t)15, + REQ_OP_ZONE_RESET = (__force blk_opf_t)17, /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, + REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19, /* Driver private requests */ REQ_OP_DRV_IN = (__force blk_opf_t)34, @@ -478,6 +478,7 @@ static inline bool op_is_zone_mgmt(enum req_op op) { switch (op & REQ_OP_MASK) { case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 59288a2c1ad2..1414be493738 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -461,6 +461,12 @@ struct ftrace_likely_data { # define __nocfi #endif +#if defined(CONFIG_ARCH_USES_CFI_GENERIC_LLVM_PASS) +# define __nocfi_generic __nocfi +#else +# define __nocfi_generic +#endif + /* * Any place that could be marked with the "alloc_size" attribute is also * a place to be marked with the "malloc" attribute, except those that may diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index 81f0e698acbf..f206370060e1 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -18,6 +18,7 @@ void fbcon_suspended(struct fb_info *info); void fbcon_resumed(struct fb_info *info); int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode); +void fbcon_delete_modelist(struct list_head *head); void fbcon_new_modelist(struct fb_info *info); void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps); @@ -38,6 +39,7 @@ static inline void fbcon_suspended(struct fb_info *info) {} static inline void fbcon_resumed(struct fb_info *info) {} static inline int fbcon_mode_deleted(struct fb_info *info, struct fb_videomode *mode) { return 0; } +static inline void fbcon_delete_modelist(struct list_head *head) {} static inline void fbcon_new_modelist(struct fb_info *info) {} static inline void fbcon_get_requirement(struct fb_info *info, struct fb_blit_caps *caps) {} diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h index 1571e9157fa5..b1b837583d54 100644 --- a/include/linux/platform_data/x86/int3472.h +++ b/include/linux/platform_data/x86/int3472.h @@ -100,7 +100,6 @@ struct int3472_gpio_regulator { struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2]; char supply_name_upper[GPIO_SUPPLY_NAME_LENGTH]; char regulator_name[GPIO_REGULATOR_NAME_LENGTH]; - struct gpio_desc *ena_gpio; struct regulator_dev *rdev; struct regulator_desc rdesc; }; diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 4e1ac1fbcec4..55343795644b 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1643,7 +1643,7 @@ struct regmap_irq_chip_data; * @status_invert: Inverted status register: cleared bits are active interrupts. * @status_is_level: Status register is actuall signal level: Xor status * register with previous value to get active interrupts. - * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @wake_invert: Inverted wake register: cleared bits are wake disabled. * @type_in_mask: Use the mask registers for controlling irq type. Use this if * the hardware provides separate bits for rising/falling edge * or low/high level interrupts and they should be combined into diff --git a/include/linux/sched.h b/include/linux/sched.h index cbb7340c5866..b469878de25c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2407,12 +2407,12 @@ static inline void __migrate_enable(void) { } * be defined in kernel/sched/core.c. */ #ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE -static inline void migrate_disable(void) +static __always_inline void migrate_disable(void) { __migrate_disable(); } -static inline void migrate_enable(void) +static __always_inline void migrate_enable(void) { __migrate_enable(); } diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h index bc3507edd589..898723ab62e8 100644 --- a/include/net/libeth/xdp.h +++ b/include/net/libeth/xdp.h @@ -513,7 +513,7 @@ struct libeth_xdp_tx_desc { * can't fail, but can send less frames if there's no enough free descriptors * available. The actual free space is returned by @prep from the driver. */ -static __always_inline u32 +static __always_inline __nocfi_generic u32 libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq, u32 n, bool unroll, u64 priv, u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq), diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 6d6500148c4b..993008cdea65 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -252,8 +252,8 @@ struct scsi_device { unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ - unsigned int ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */ - unsigned int ua_por_ctr; /* Counter for Power On / Reset UAs */ + atomic_t ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */ + atomic_t ua_por_ctr; /* Counter for Power On / Reset UAs */ atomic_t disk_events_disable_depth; /* disable depth for disk events */ @@ -693,10 +693,8 @@ static inline int scsi_device_busy(struct scsi_device *sdev) } /* Macros to access the UNIT ATTENTION counters */ -#define scsi_get_ua_new_media_ctr(sdev) \ - ((const unsigned int)(sdev->ua_new_media_ctr)) -#define scsi_get_ua_por_ctr(sdev) \ - ((const unsigned int)(sdev->ua_por_ctr)) +#define scsi_get_ua_new_media_ctr(sdev) atomic_read(&sdev->ua_new_media_ctr) +#define scsi_get_ua_por_ctr(sdev) atomic_read(&sdev->ua_por_ctr) #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h index cde8f173f566..22acaaec7b1c 100644 --- a/include/uapi/linux/fb.h +++ b/include/uapi/linux/fb.h @@ -319,7 +319,7 @@ enum { #define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ #define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ #define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ -#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ +#define FB_VBLANK_HAVE_VSYNC 0x100 /* vertical syncs can be detected */ struct fb_vblank { __u32 flags; /* FB_VBLANK flags */ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 4a9fbf42aa9f..9cd89bcc1d9c 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -631,6 +631,18 @@ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ +/* + * Keycodes for hotkeys toggling the electronic privacy screen found on some + * laptops on/off. Note when the embedded-controller turns on/off the eprivacy + * screen itself then the state should be reported through drm connecter props: + * https://www.kernel.org/doc/html/latest/gpu/drm-kms.html#standard-connector-properties + * Except when implementing the drm connecter properties API is not possible + * because e.g. the firmware does not allow querying the presence and/or status + * of the eprivacy screen at boot. + */ +#define KEY_EPRIVACY_SCREEN_ON 0x252 +#define KEY_EPRIVACY_SCREEN_OFF 0x253 + #define KEY_KBDINPUTASSIST_PREV 0x260 #define KEY_KBDINPUTASSIST_NEXT 0x261 #define KEY_KBDINPUTASSIST_PREVGROUP 0x262 diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8eb117c52817..eb25e70e0bdc 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4345,6 +4345,7 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_local_irq_save) BTF_ID_FLAGS(func, bpf_local_irq_restore) +#ifdef CONFIG_BPF_EVENTS BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) @@ -4353,6 +4354,7 @@ BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +#endif #ifdef CONFIG_DMA_SHARED_BUFFER BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 719d73299397..d706c4b7f532 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -216,6 +216,8 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) static void bpf_ringbuf_free(struct bpf_ringbuf *rb) { + irq_work_sync(&rb->work); + /* copy pages pointer and nr_pages to local variable, as we are going * to unmap rb itself with vunmap() below */ diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 14e85ff23551..53166ef86ba4 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -706,7 +706,6 @@ static void power_down(void) #ifdef CONFIG_SUSPEND if (hibernation_mode == HIBERNATION_SUSPEND) { - pm_restore_gfp_mask(); error = suspend_devices_and_enter(mem_sleep_current); if (!error) goto exit; @@ -746,9 +745,6 @@ static void power_down(void) cpu_relax(); exit: - /* Match the pm_restore_gfp_mask() call in hibernate(). */ - pm_restrict_gfp_mask(); - /* Restore swap signature. */ error = swsusp_unmark(); if (error) diff --git a/kernel/power/main.c b/kernel/power/main.c index 3cf2d7e72567..549f51ca3a1e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -31,23 +31,35 @@ * held, unless the suspend/hibernate code is guaranteed not to run in parallel * with that modification). */ +static unsigned int saved_gfp_count; static gfp_t saved_gfp_mask; void pm_restore_gfp_mask(void) { WARN_ON(!mutex_is_locked(&system_transition_mutex)); - if (saved_gfp_mask) { - gfp_allowed_mask = saved_gfp_mask; - saved_gfp_mask = 0; - } + + if (WARN_ON(!saved_gfp_count) || --saved_gfp_count) + return; + + gfp_allowed_mask = saved_gfp_mask; + saved_gfp_mask = 0; + + pm_pr_dbg("GFP mask restored\n"); } void pm_restrict_gfp_mask(void) { WARN_ON(!mutex_is_locked(&system_transition_mutex)); - WARN_ON(saved_gfp_mask); + + if (saved_gfp_count++) { + WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask); + return; + } + saved_gfp_mask = gfp_allowed_mask; gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); + + pm_pr_dbg("GFP mask restricted\n"); } unsigned int lock_system_sleep(void) diff --git a/kernel/power/process.c b/kernel/power/process.c index 8ff68ebaa1e0..dc0dfc349f22 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,6 +132,7 @@ int freeze_processes(void) if (!pm_freezing) static_branch_inc(&freezer_active); + pm_wakeup_clear(0); pm_freezing = true; error = try_to_freeze_tasks(true); if (!error) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4bb4686c1c08..b4ca17c2fecf 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -595,7 +595,6 @@ static int enter_state(suspend_state_t state) } pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); - pm_wakeup_clear(0); pm_suspend_clear_flags(); error = suspend_prepare(state); if (error) diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan index 7251b6b59e69..cae1ddcc18e1 100644 --- a/lib/Kconfig.kmsan +++ b/lib/Kconfig.kmsan @@ -3,7 +3,7 @@ config HAVE_ARCH_KMSAN bool config HAVE_KMSAN_COMPILER - def_bool CC_IS_CLANG + def_bool $(cc-option,-fsanitize=kernel-memory) config KMSAN bool "KMSAN: detector of uninitialized values use" diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 8886055e938f..16859c6226dd 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -64,7 +64,7 @@ config CRYPTO_LIB_CURVE25519 config CRYPTO_LIB_CURVE25519_ARCH bool depends on CRYPTO_LIB_CURVE25519 && !UML && !KMSAN - default y if ARM && KERNEL_MODE_NEON + default y if ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN default y if PPC64 && CPU_LITTLE_ENDIAN default y if X86_64 diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index bded351aeace..d2845b214585 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -90,7 +90,7 @@ else libcurve25519-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += curve25519-fiat32.o endif # clang versions prior to 18 may blow out the stack with KASAN -ifeq ($(call clang-min-version, 180000),) +ifeq ($(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_) KASAN_SANITIZE_curve25519-hacl64.o := n endif diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 8c01eabd4eaf..63130a48e237 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -739,7 +739,7 @@ static struct kunit_case kunit_current_test_cases[] = { static void test_dev_action(void *priv) { - *(void **)priv = (void *)1; + *(long *)priv = 1; } static void kunit_device_test(struct kunit *test) diff --git a/lib/kunit/test.c b/lib/kunit/test.c index bb66ea1a3eac..62eb529824c6 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -745,7 +745,8 @@ int kunit_run_tests(struct kunit_suite *suite) .param_index = ++test.param_index, .parent = &test, }; - kunit_init_test(¶m_test, test_case->name, test_case->log); + kunit_init_test(¶m_test, test_case->name, NULL); + param_test.log = test_case->log; kunit_run_case_catch_errors(suite, test_case, ¶m_test); if (param_desc[0] == '\0') { diff --git a/net/core/filter.c b/net/core/filter.c index 76628df1fc82..fa06c5a08e22 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3877,7 +3877,8 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, u32 new_len = skb->len + head_room; int ret; - if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || + if (unlikely(flags || (int)head_room < 0 || + (!skb_is_gso(skb) && new_len > max_len) || new_len < skb->len)) return -EINVAL; diff --git a/rust/Makefile b/rust/Makefile index 23c7ae905bd2..3e545c1a0ff4 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -69,6 +69,9 @@ core-edition := $(if $(call rustc-min-version,108700),2024,2021) # the time being (https://github.com/rust-lang/rust/issues/144521). rustdoc_modifiers_workaround := $(if $(call rustc-min-version,108800),-Cunsafe-allow-abi-mismatch=fixed-x18) +# Similarly, for doctests (https://github.com/rust-lang/rust/issues/146465). +doctests_modifiers_workaround := $(rustdoc_modifiers_workaround)$(if $(call rustc-min-version,109100),$(comma)sanitizer) + # `rustc` recognizes `--remap-path-prefix` since 1.26.0, but `rustdoc` only # since Rust 1.81.0. Moreover, `rustdoc` ICEs on out-of-tree builds since Rust # 1.82.0 (https://github.com/rust-lang/rust/issues/138520). Thus workaround both @@ -127,9 +130,14 @@ rustdoc-core: private rustc_target_flags = --edition=$(core-edition) $(core-cfgs rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs rustdoc-clean FORCE +$(call if_changed,rustdoc) +# Even if `rustdoc` targets are not kernel objects, they should still be +# treated as such so that we pass the same flags. Otherwise, for instance, +# `rustdoc` will complain about missing sanitizer flags causing an ABI mismatch. +rustdoc-compiler_builtins: private is-kernel-object := y rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE +$(call if_changed,rustdoc) +rustdoc-ffi: private is-kernel-object := y rustdoc-ffi: $(src)/ffi.rs rustdoc-core FORCE +$(call if_changed,rustdoc) @@ -147,6 +155,7 @@ rustdoc-pin_init: $(src)/pin-init/src/lib.rs rustdoc-pin_init_internal \ rustdoc-macros FORCE +$(call if_changed,rustdoc) +rustdoc-kernel: private is-kernel-object := y rustdoc-kernel: private rustc_target_flags = --extern ffi --extern pin_init \ --extern build_error --extern macros \ --extern bindings --extern uapi @@ -230,7 +239,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $< --extern bindings --extern uapi \ --no-run --crate-name kernel -Zunstable-options \ --sysroot=/dev/null \ - $(rustdoc_modifiers_workaround) \ + $(doctests_modifiers_workaround) \ --test-builder $(objtree)/scripts/rustdoc_test_builder \ $< $(rustdoc_test_kernel_quiet); \ $(objtree)/scripts/rustdoc_test_gen @@ -522,6 +531,10 @@ $(obj)/pin_init.o: $(src)/pin-init/src/lib.rs $(obj)/compiler_builtins.o \ $(obj)/$(libpin_init_internal_name) $(obj)/$(libmacros_name) FORCE +$(call if_changed_rule,rustc_library) +# Even if normally `build_error` is not a kernel object, it should still be +# treated as such so that we pass the same flags. Otherwise, for instance, +# `rustc` will complain about missing sanitizer flags causing an ABI mismatch. +$(obj)/build_error.o: private is-kernel-object := y $(obj)/build_error.o: private skip_gendwarfksyms = 1 $(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE +$(call if_changed_rule,rustc_library) diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs index 10a6a1789854..2392c281459e 100644 --- a/rust/kernel/devres.rs +++ b/rust/kernel/devres.rs @@ -103,7 +103,7 @@ struct Inner<T: Send> { /// /// # Invariants /// -/// [`Self::inner`] is guaranteed to be initialized and is always accessed read-only. +/// `Self::inner` is guaranteed to be initialized and is always accessed read-only. #[pin_data(PinnedDrop)] pub struct Devres<T: Send> { dev: ARef<Device>, diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs index c6ec64295c9f..aa5b9a7a726d 100644 --- a/rust/kernel/sync/condvar.rs +++ b/rust/kernel/sync/condvar.rs @@ -36,7 +36,7 @@ pub use new_condvar; /// spuriously. /// /// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such -/// instances is with the [`pin_init`](crate::pin_init!) and [`new_condvar`] macros. +/// instances is with the [`pin_init`](pin_init::pin_init!) and [`new_condvar`] macros. /// /// # Examples /// diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 84ea9215c0a7..b8b7bba84a65 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -12,6 +12,7 @@ #include <errno.h> #include <fcntl.h> #include <limits.h> +#include <locale.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> @@ -931,6 +932,8 @@ int main(int ac, char **av) signal(SIGINT, sig_handler); + setlocale(LC_ALL, ""); + if (ac > 1 && strcmp(av[1], "-s") == 0) { silent = 1; /* Silence conf_read() until the real callback is set up */ diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index ae1fe5f60327..521700ed7152 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -7,6 +7,7 @@ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif +#include <locale.h> #include <string.h> #include <strings.h> #include <stdlib.h> @@ -1478,6 +1479,8 @@ int main(int ac, char **av) int lines, columns; char *mode; + setlocale(LC_ALL, ""); + if (ac > 1 && strcmp(av[1], "-s") == 0) { /* Silence conf_read() until the real callback is set up */ conf_set_message_callback(NULL); diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index b96538787f3d..054fdf45cc37 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then # Clear VPATH and srcroot because the source files reside in the output # directory. # shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make - "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts + "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts rm -f "${destdir}/scripts/Kbuild" fi diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 8ad5febd822a..4aec5067c59d 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -3736,6 +3736,7 @@ enum { ALC285_FIXUP_ASUS_GA605K_I2C_SPEAKER2_TO_DAC1, ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC, ALC289_FIXUP_ASUS_ZEPHYRUS_DUAL_SPK, + ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -6172,6 +6173,16 @@ static const struct hda_fixup alc269_fixups[] = { { 0x1e, 0x90170150 }, /* Internal Speaker */ { } }, + }, + [ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ + { 0x1a, 0x22a190a0 }, /* dock mic */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST } }; @@ -6578,6 +6589,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre x360 2-in-1 Laptop 16-aa0xxx", ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX), SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), + SND_PCI_QUIRK(0x103c, 0x8c2d, "HP Victus 15-fa1xxx (MB 8C2D)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), @@ -6959,6 +6971,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa554, "VAIO VJFH52", ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa559, "VAIO RPL", ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -7080,6 +7093,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), + HDA_CODEC_QUIRK(0x17aa, 0x391c, "Lenovo Yoga 7 2-in-1 14AKP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), diff --git a/sound/soc/amd/acp/amd-acp70-acpi-match.c b/sound/soc/amd/acp/amd-acp70-acpi-match.c index dcecac792e6d..871b4f054a84 100644 --- a/sound/soc/amd/acp/amd-acp70-acpi-match.c +++ b/sound/soc/amd/acp/amd-acp70-acpi-match.c @@ -30,6 +30,20 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = { .group_id = 1 }; +static const struct snd_soc_acpi_endpoint spk_2_endpoint = { + .num = 0, + .aggregated = 1, + .group_position = 2, + .group_id = 1 +}; + +static const struct snd_soc_acpi_endpoint spk_3_endpoint = { + .num = 0, + .aggregated = 1, + .group_position = 3, + .group_id = 1 +}; + static const struct snd_soc_acpi_adr_device rt711_rt1316_group_adr[] = { { .adr = 0x000030025D071101ull, @@ -112,6 +126,134 @@ static const struct snd_soc_acpi_adr_device rt1320_1_single_adr[] = { } }; +static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { + { /* Jack Playback Endpoint */ + .num = 0, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* DMIC Capture Endpoint */ + .num = 1, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* Jack Capture Endpoint */ + .num = 2, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, + { /* Speaker Playback Endpoint */ + .num = 3, + .aggregated = 0, + .group_position = 0, + .group_id = 0, + }, +}; + +static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = { + { + .adr = 0x00003001FA424301ull, + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l43" + } +}; + +static const struct snd_soc_acpi_adr_device cs42l43_1_cs35l56x4_1_adr[] = { + { + .adr = 0x00013001FA424301ull, + .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), + .endpoints = cs42l43_endpoints, + .name_prefix = "cs42l43" + }, + { + .adr = 0x00013001FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00013101FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, + { + .adr = 0x00013201FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_2_endpoint, + .name_prefix = "AMP3" + }, + { + .adr = 0x00013301FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_3_endpoint, + .name_prefix = "AMP4" + }, +}; + +static const struct snd_soc_acpi_adr_device cs35l56x4_1_adr[] = { + { + .adr = 0x00013301FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_l_endpoint, + .name_prefix = "AMP1" + }, + { + .adr = 0x00013201FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_r_endpoint, + .name_prefix = "AMP2" + }, + { + .adr = 0x00013101FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_2_endpoint, + .name_prefix = "AMP3" + }, + { + .adr = 0x00013001FA355601ull, + .num_endpoints = 1, + .endpoints = &spk_3_endpoint, + .name_prefix = "AMP4" + }, +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l1_cs35l56x4_l1[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs42l43_1_cs35l56x4_1_adr), + .adr_d = cs42l43_1_cs35l56x4_1_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0_cs35l56x4_l1[] = { + { + .mask = BIT(0), + .num_adr = ARRAY_SIZE(cs42l43_0_adr), + .adr_d = cs42l43_0_adr, + }, + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs35l56x4_1_adr), + .adr_d = cs35l56x4_1_adr, + }, + {} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs35l56x4_l1[] = { + { + .mask = BIT(1), + .num_adr = ARRAY_SIZE(cs35l56x4_1_adr), + .adr_d = cs35l56x4_1_adr, + }, + {} +}; + static const struct snd_soc_acpi_link_adr acp70_rt722_only[] = { { .mask = BIT(0), @@ -151,6 +293,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_amd_acp70_sdw_machines[] = { .links = acp70_4_in_1_sdca, .drv_name = "amd_sdw", }, + { + .link_mask = BIT(0) | BIT(1), + .links = acp70_cs42l43_l0_cs35l56x4_l1, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(1), + .links = acp70_cs42l43_l1_cs35l56x4_l1, + .drv_name = "amd_sdw", + }, + { + .link_mask = BIT(1), + .links = acp70_cs35l56x4_l1, + .drv_name = "amd_sdw", + }, {}, }; EXPORT_SYMBOL(snd_soc_acpi_amd_acp70_sdw_machines); diff --git a/sound/soc/codecs/cs-amp-lib-test.c b/sound/soc/codecs/cs-amp-lib-test.c index 2fde84309338..3406887cdfa2 100644 --- a/sound/soc/codecs/cs-amp-lib-test.c +++ b/sound/soc/codecs/cs-amp-lib-test.c @@ -7,6 +7,7 @@ #include <kunit/resource.h> #include <kunit/test.h> +#include <kunit/test-bug.h> #include <kunit/static_stub.h> #include <linux/device/faux.h> #include <linux/firmware/cirrus/cs_dsp.h> diff --git a/sound/soc/codecs/cs530x.c b/sound/soc/codecs/cs530x.c index b9eff240b929..535387cd7aa3 100644 --- a/sound/soc/codecs/cs530x.c +++ b/sound/soc/codecs/cs530x.c @@ -793,7 +793,7 @@ static int cs530x_set_sysclk(struct snd_soc_component *component, int clk_id, case CS530X_SYSCLK_SRC_PLL: break; default: - dev_err(component->dev, "Invalid clock id %d\n", clk_id); + dev_err(component->dev, "Invalid sysclk source: %d\n", source); return -EINVAL; } diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index cb1508fc99f8..5aff5a459a43 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -1239,6 +1239,8 @@ static const struct snd_soc_dapm_widget max98091_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("DMIC4_ENA", M98090_REG_DIGITAL_MIC_ENABLE, M98090_DIGMIC4_SHIFT, 0, max98090_shdn_event, SND_SOC_DAPM_POST_PMU), + SND_SOC_DAPM_SUPPLY("DMIC34_HPF", M98090_REG_FILTER_CONFIG, + M98090_FLT_DMIC34HPF_SHIFT, 0, NULL, 0), }; static const struct snd_soc_dapm_route max98090_dapm_routes[] = { @@ -1427,8 +1429,8 @@ static const struct snd_soc_dapm_route max98091_dapm_routes[] = { /* DMIC inputs */ {"DMIC3", NULL, "DMIC3_ENA"}, {"DMIC4", NULL, "DMIC4_ENA"}, - {"DMIC3", NULL, "AHPF"}, - {"DMIC4", NULL, "AHPF"}, + {"DMIC3", NULL, "DMIC34_HPF"}, + {"DMIC4", NULL, "DMIC34_HPF"}, }; static int max98090_add_widgets(struct snd_soc_component *component) diff --git a/sound/soc/codecs/rt721-sdca.c b/sound/soc/codecs/rt721-sdca.c index a4bd29d7220b..5f7b505d5414 100644 --- a/sound/soc/codecs/rt721-sdca.c +++ b/sound/soc/codecs/rt721-sdca.c @@ -281,6 +281,10 @@ static void rt721_sdca_jack_preset(struct rt721_sdca_priv *rt721) rt_sdca_index_write(rt721->mbq_regmap, RT721_BOOST_CTRL, RT721_BST_4CH_TOP_GATING_CTRL1, 0x002a); regmap_write(rt721->regmap, 0x2f58, 0x07); + + regmap_write(rt721->regmap, 0x2f51, 0x00); + rt_sdca_index_write(rt721->mbq_regmap, RT721_HDA_SDCA_FLOAT, + RT721_MISC_CTL, 0x0004); } static void rt721_sdca_jack_init(struct rt721_sdca_priv *rt721) diff --git a/sound/soc/codecs/rt721-sdca.h b/sound/soc/codecs/rt721-sdca.h index 71fac9cd8739..24ce188562ba 100644 --- a/sound/soc/codecs/rt721-sdca.h +++ b/sound/soc/codecs/rt721-sdca.h @@ -137,6 +137,7 @@ struct rt721_sdca_dmic_kctrl_priv { #define RT721_HDA_LEGACY_UAJ_CTL 0x02 #define RT721_HDA_LEGACY_CTL1 0x05 #define RT721_HDA_LEGACY_RESET_CTL 0x06 +#define RT721_MISC_CTL 0x07 #define RT721_XU_REL_CTRL 0x0c #define RT721_GE_REL_CTRL1 0x0d #define RT721_HDA_LEGACY_GPIO_WAKE_EN_CTL 0x0e diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index aabd90a8b3ec..cac26ba0aa4b 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -131,7 +131,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx943 = { .fifos = 8, .fifo_depth = 32, .dataline = 0xf, - .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_BE, + .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_LE, .use_edma = true, .use_verid = true, .volume_sx = false, @@ -823,7 +823,7 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream, break; } - if (format == SNDRV_PCM_FORMAT_DSD_U32_BE) { + if (format == SNDRV_PCM_FORMAT_DSD_U32_LE) { micfil->dec_bypass = true; /* * According to equation 29 in RM: diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 757e7868e322..72bfc91e21b9 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -353,7 +353,6 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai, break; case SND_SOC_DAIFMT_PDM: val_cr2 |= FSL_SAI_CR2_BCP; - val_cr4 &= ~FSL_SAI_CR4_MF; sai->is_pdm_mode = true; break; case SND_SOC_DAIFMT_RIGHT_J: @@ -638,7 +637,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream, val_cr5 |= FSL_SAI_CR5_WNW(slot_width); val_cr5 |= FSL_SAI_CR5_W0W(slot_width); - if (sai->is_lsb_first || sai->is_pdm_mode) + if (sai->is_lsb_first) val_cr5 |= FSL_SAI_CR5_FBT(0); else val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1); @@ -653,12 +652,12 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream, val_cr4 |= FSL_SAI_CR4_CHMOD; /* - * For SAI provider mode, when Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will - * generate bclk and frame clock for Tx(Rx), we should set RCR4(TCR4), - * RCR5(TCR5) for playback(capture), or there will be sync error. + * When Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will provide bclk and + * frame clock for Tx(Rx). We should set RCR4(TCR4), RCR5(TCR5) + * for playback(capture), or there will be sync error. */ - if (!sai->is_consumer_mode[tx] && fsl_sai_dir_is_synced(sai, adir)) { + if (fsl_sai_dir_is_synced(sai, adir)) { regmap_update_bits(sai->regmap, FSL_SAI_xCR4(!tx, ofs), FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK | FSL_SAI_CR4_CHMOD_MASK, diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c index d31058e2de5b..80c001120cdd 100644 --- a/sound/soc/intel/avs/pcm.c +++ b/sound/soc/intel/avs/pcm.c @@ -651,6 +651,7 @@ static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_ data = snd_soc_dai_get_dma_data(dai, substream); + disable_work_sync(&data->period_elapsed_work); snd_hdac_ext_stream_release(data->host_stream, HDAC_EXT_STREAM_TYPE_HOST); avs_dai_shutdown(substream, dai); } @@ -754,6 +755,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so data = snd_soc_dai_get_dma_data(dai, substream); host_stream = data->host_stream; + if (runtime->state == SNDRV_PCM_STATE_XRUN) + hdac_stream(host_stream)->prepared = false; if (hdac_stream(host_stream)->prepared) return 0; diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c index 693ecfe68fd0..74096236984a 100644 --- a/sound/soc/intel/avs/probes.c +++ b/sound/soc/intel/avs/probes.c @@ -14,8 +14,8 @@ #include "debug.h" #include "messages.h" -static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id node_id, - size_t buffer_size) +static int avs_dsp_init_probe(struct avs_dev *adev, struct snd_compr_params *params, int bps, + union avs_connector_node_id node_id, size_t buffer_size) { struct avs_probe_cfg cfg = {{0}}; struct avs_module_entry mentry; @@ -27,12 +27,16 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id return ret; /* - * Probe module uses no cycles, audio data format and input and output - * frame sizes are unused. It is also not owned by any pipeline. + * Probe module uses no cycles, input and output frame sizes are unused. + * It is also not owned by any pipeline. */ cfg.base.ibs = 1; /* BSS module descriptor is always segment of index=2. */ cfg.base.is_pages = mentry.segments[2].flags.length; + cfg.base.audio_fmt.sampling_freq = params->codec.sample_rate; + cfg.base.audio_fmt.bit_depth = bps; + cfg.base.audio_fmt.num_channels = params->codec.ch_out; + cfg.base.audio_fmt.valid_bit_depth = bps; cfg.gtw_cfg.node_id = node_id; cfg.gtw_cfg.dma_buffer_size = buffer_size; @@ -128,8 +132,6 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream, struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream); struct snd_compr_runtime *rtd = cstream->runtime; struct avs_dev *adev = to_avs_dev(dai->dev); - /* compr params do not store bit depth, default to S32_LE. */ - snd_pcm_format_t format = SNDRV_PCM_FORMAT_S32_LE; unsigned int format_val; int bps, ret; @@ -142,7 +144,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream, ret = snd_compr_malloc_pages(cstream, rtd->buffer_size); if (ret < 0) return ret; - bps = snd_pcm_format_physical_width(format); + bps = snd_pcm_format_physical_width(params->codec.format); if (bps < 0) return bps; format_val = snd_hdac_stream_format(params->codec.ch_out, bps, params->codec.sample_rate); @@ -166,7 +168,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream, node_id.vindex = hdac_stream(host_stream)->stream_tag - 1; node_id.dma_type = AVS_DMA_HDA_HOST_INPUT; - ret = avs_dsp_init_probe(adev, node_id, rtd->dma_bytes); + ret = avs_dsp_init_probe(adev, params, bps, node_id, rtd->dma_bytes); if (ret < 0) { dev_err(dai->dev, "probe init failed: %d\n", ret); avs_dsp_enable_d0ix(adev); diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c index 3c8b10e21ceb..4853f4f31786 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c @@ -227,33 +227,6 @@ static const struct snd_soc_acpi_endpoint cs42l43_amp_spkagg_endpoints[] = { }, }; -static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { - { /* Jack Playback Endpoint */ - .num = 0, - .aggregated = 0, - .group_position = 0, - .group_id = 0, - }, - { /* DMIC Capture Endpoint */ - .num = 1, - .aggregated = 0, - .group_position = 0, - .group_id = 0, - }, - { /* Jack Capture Endpoint */ - .num = 2, - .aggregated = 0, - .group_position = 0, - .group_id = 0, - }, - { /* Speaker Playback Endpoint */ - .num = 3, - .aggregated = 0, - .group_position = 0, - .group_id = 0, - }, -}; - static const struct snd_soc_acpi_adr_device cs42l43_2_adr[] = { { .adr = 0x00023001fa424301ull, @@ -305,15 +278,6 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_3amp_adr[] = { } }; -static const struct snd_soc_acpi_adr_device cs42l43_3_adr[] = { - { - .adr = 0x00033001FA424301ull, - .num_endpoints = ARRAY_SIZE(cs42l43_endpoints), - .endpoints = cs42l43_endpoints, - .name_prefix = "cs42l43" - } -}; - static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = { { .adr = 0x000030025D071101ull, @@ -486,15 +450,6 @@ static const struct snd_soc_acpi_link_adr ptl_cs42l43_l2_cs35l56x6_l13[] = { {} }; -static const struct snd_soc_acpi_link_adr ptl_cs42l43_l3[] = { - { - .mask = BIT(3), - .num_adr = ARRAY_SIZE(cs42l43_3_adr), - .adr_d = cs42l43_3_adr, - }, - {} -}; - static const struct snd_soc_acpi_link_adr ptl_rt721_l0[] = { { .mask = BIT(0), @@ -714,13 +669,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = { }, { .link_mask = BIT(3), - .links = ptl_cs42l43_l3, - .drv_name = "sof_sdw", - .sof_tplg_filename = "sof-ptl-cs42l43-l3.tplg", - .get_function_tplg_files = sof_sdw_get_tplg_files, - }, - { - .link_mask = BIT(3), .links = ptl_sdw_rt712_vb_l3_rt1320_l3, .drv_name = "sof_sdw", .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb, diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c index 5d025ad72263..c63b3444bc17 100644 --- a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c +++ b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c @@ -3176,7 +3176,6 @@ err_pm_put: static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8195_afe_runtime_suspend(&pdev->dev); } diff --git a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c index 10793bbe9275..d48252cd96ac 100644 --- a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c +++ b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c @@ -2238,7 +2238,6 @@ static void mt8365_afe_pcm_dev_remove(struct platform_device *pdev) mt8365_afe_disable_top_cg(afe, MT8365_TOP_CG_AFE); - pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8365_afe_runtime_suspend(&pdev->dev); } diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index 06a802f9dba5..67e9ca18883c 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c @@ -377,9 +377,9 @@ static void q6asm_audio_client_free_buf(struct audio_client *ac, spin_lock_irqsave(&ac->lock, flags); port->num_periods = 0; + spin_unlock_irqrestore(&ac->lock, flags); kfree(port->buf); port->buf = NULL; - spin_unlock_irqrestore(&ac->lock, flags); } /** diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c index e00940814157..81b883e8ac92 100644 --- a/sound/soc/renesas/rz-ssi.c +++ b/sound/soc/renesas/rz-ssi.c @@ -85,6 +85,7 @@ struct rz_ssi_stream { struct snd_pcm_substream *substream; int fifo_sample_size; /* sample capacity of SSI FIFO */ int dma_buffer_pos; /* The address for the next DMA descriptor */ + int completed_dma_buf_pos; /* The address of the last completed DMA descriptor. */ int period_counter; /* for keeping track of periods transferred */ int sample_width; int buffer_pos; /* current frame position in the buffer */ @@ -215,6 +216,7 @@ static void rz_ssi_stream_init(struct rz_ssi_stream *strm, rz_ssi_set_substream(strm, substream); strm->sample_width = samples_to_bytes(runtime, 1); strm->dma_buffer_pos = 0; + strm->completed_dma_buf_pos = 0; strm->period_counter = 0; strm->buffer_pos = 0; @@ -437,6 +439,10 @@ static void rz_ssi_pointer_update(struct rz_ssi_stream *strm, int frames) snd_pcm_period_elapsed(strm->substream); strm->period_counter = current_period; } + + strm->completed_dma_buf_pos += runtime->period_size; + if (strm->completed_dma_buf_pos >= runtime->buffer_size) + strm->completed_dma_buf_pos = 0; } static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) @@ -778,10 +784,14 @@ no_dma: return -ENODEV; } -static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi) +static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) { + struct snd_pcm_substream *substream = strm->substream; + struct snd_pcm_runtime *runtime = substream->runtime; int ret; + strm->dma_buffer_pos = strm->completed_dma_buf_pos + runtime->period_size; + if (rz_ssi_is_stream_running(&ssi->playback) || rz_ssi_is_stream_running(&ssi->capture)) return 0; @@ -794,16 +804,6 @@ static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi) ssi->hw_params_cache.channels); } -static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi) -{ - if (rz_ssi_is_stream_running(&ssi->playback) || - rz_ssi_is_stream_running(&ssi->capture)) - return; - - ssi->playback.dma_buffer_pos = 0; - ssi->capture.dma_buffer_pos = 0; -} - static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { @@ -813,7 +813,7 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd, switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: - ret = rz_ssi_trigger_resume(ssi); + ret = rz_ssi_trigger_resume(ssi, strm); if (ret) return ret; @@ -852,7 +852,6 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_SUSPEND: rz_ssi_stop(ssi, strm); - rz_ssi_streams_suspend(ssi); break; case SNDRV_PCM_TRIGGER_STOP: diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c index 270c66b90228..f7c8c16308de 100644 --- a/sound/soc/sdw_utils/soc_sdw_utils.c +++ b/sound/soc/sdw_utils/soc_sdw_utils.c @@ -638,7 +638,6 @@ struct asoc_sdw_codec_info codec_info_list[] = { { .direction = {true, false}, .dai_name = "cs42l43-dp6", - .component_name = "cs42l43", .dai_type = SOC_SDW_DAI_TYPE_AMP, .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID}, .init = asoc_sdw_cs42l43_spk_init, diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c index 15960d25e748..6e09e074c0e7 100644 --- a/sound/usb/mixer_s1810c.c +++ b/sound/usb/mixer_s1810c.c @@ -178,7 +178,7 @@ snd_sc1810c_get_status_field(struct usb_device *dev, pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1; pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2; - ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), + ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), SC1810C_SET_STATE_REQ, SC1810C_SET_STATE_REQTYPE, (*seqnum), 0, &pkt_out, sizeof(pkt_out)); @@ -597,15 +597,6 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer) if (!list_empty(&chip->mixer_list)) return 0; - dev_info(&dev->dev, - "Presonus Studio 1810c, device_setup: %u\n", chip->setup); - if (chip->setup == 1) - dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); - else if (chip->setup == 2) - dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); - else - dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); - ret = snd_s1810c_init_mixer_maps(chip); if (ret < 0) return ret; @@ -634,16 +625,28 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer) if (ret < 0) return ret; - // The 1824c has a Mono Main switch instead of a - // A/B select switch. - if (mixer->chip->usb_id == USB_ID(0x194f, 0x010d)) { - ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); + switch (chip->usb_id) { + case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */ + dev_info(&dev->dev, + "Presonus Studio 1810c, device_setup: %u\n", chip->setup); + if (chip->setup == 1) + dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); + else if (chip->setup == 2) + dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); + else + dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); + + ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); if (ret < 0) return ret; - } else if (mixer->chip->usb_id == USB_ID(0x194f, 0x010c)) { - ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); + + break; + case USB_ID(0x194f, 0x010d): /* Presonus Studio 1824c */ + ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); if (ret < 0) return ret; + + break; } return ret; diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index a8f6cd4841b0..dbe32a5d02cd 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -311,7 +311,7 @@ struct pt_regs___arm64 { #define __PT_RET_REG regs[31] #define __PT_FP_REG __unsupported__ #define __PT_RC_REG gpr[3] -#define __PT_SP_REG sp +#define __PT_SP_REG gpr[1] #define __PT_IP_REG nip #elif defined(bpf_target_sparc) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 620854fdaaf6..9004fbc06769 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3516,8 +3516,11 @@ static bool skip_alt_group(struct instruction *insn) { struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; + if (!insn->alt_group) + return false; + /* ANNOTATE_IGNORE_ALTERNATIVE */ - if (insn->alt_group && insn->alt_group->ignore) + if (insn->alt_group->ignore) return true; /* diff --git a/tools/testing/selftests/cachestat/.gitignore b/tools/testing/selftests/cachestat/.gitignore index d6c30b43a4bb..abbb13b6e96b 100644 --- a/tools/testing/selftests/cachestat/.gitignore +++ b/tools/testing/selftests/cachestat/.gitignore @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only test_cachestat +tmpshmcstat diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c index c952640f163b..ab838bcb9ec5 100644 --- a/tools/testing/selftests/cachestat/test_cachestat.c +++ b/tools/testing/selftests/cachestat/test_cachestat.c @@ -226,7 +226,7 @@ bool run_cachestat_test(enum file_type type) int syscall_ret; size_t compute_len = PS * 512; struct cachestat_range cs_range = { PS, compute_len }; - char *filename = "tmpshmcstat"; + char *filename = "tmpshmcstat", *map; struct cachestat cs; bool ret = true; int fd; @@ -257,7 +257,7 @@ bool run_cachestat_test(enum file_type type) } break; case FILE_MMAP: - char *map = mmap(NULL, filesize, PROT_READ | PROT_WRITE, + map = mmap(NULL, filesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (map == MAP_FAILED) { diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h index ed31606e01b7..240409bf5f8a 100644 --- a/tools/testing/selftests/vfio/lib/include/vfio_util.h +++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h @@ -206,10 +206,29 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_ void vfio_pci_device_cleanup(struct vfio_pci_device *device); void vfio_pci_device_reset(struct vfio_pci_device *device); -void vfio_pci_dma_map(struct vfio_pci_device *device, - struct vfio_dma_region *region); -void vfio_pci_dma_unmap(struct vfio_pci_device *device, - struct vfio_dma_region *region); +int __vfio_pci_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region); +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region, + u64 *unmapped); +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped); + +static inline void vfio_pci_dma_map(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0); +} + +static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region) +{ + VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0); +} + +static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device) +{ + VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0); +} void vfio_pci_config_access(struct vfio_pci_device *device, bool write, size_t config, size_t size, void *data); diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c index 0921b2451ba5..a381fd253aa7 100644 --- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c +++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c @@ -2,6 +2,7 @@ #include <dirent.h> #include <fcntl.h> #include <libgen.h> +#include <stdint.h> #include <stdlib.h> #include <string.h> #include <unistd.h> @@ -141,7 +142,7 @@ static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index, ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info); } -static void vfio_iommu_dma_map(struct vfio_pci_device *device, +static int vfio_iommu_dma_map(struct vfio_pci_device *device, struct vfio_dma_region *region) { struct vfio_iommu_type1_dma_map args = { @@ -152,10 +153,13 @@ static void vfio_iommu_dma_map(struct vfio_pci_device *device, .size = region->size, }; - ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args); + if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args)) + return -errno; + + return 0; } -static void iommufd_dma_map(struct vfio_pci_device *device, +static int iommufd_dma_map(struct vfio_pci_device *device, struct vfio_dma_region *region) { struct iommu_ioas_map args = { @@ -169,54 +173,108 @@ static void iommufd_dma_map(struct vfio_pci_device *device, .ioas_id = device->ioas_id, }; - ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args); + if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args)) + return -errno; + + return 0; } -void vfio_pci_dma_map(struct vfio_pci_device *device, +int __vfio_pci_dma_map(struct vfio_pci_device *device, struct vfio_dma_region *region) { + int ret; + if (device->iommufd) - iommufd_dma_map(device, region); + ret = iommufd_dma_map(device, region); else - vfio_iommu_dma_map(device, region); + ret = vfio_iommu_dma_map(device, region); + + if (ret) + return ret; list_add(®ion->link, &device->dma_regions); + + return 0; } -static void vfio_iommu_dma_unmap(struct vfio_pci_device *device, - struct vfio_dma_region *region) +static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags, + u64 *unmapped) { struct vfio_iommu_type1_dma_unmap args = { .argsz = sizeof(args), - .iova = region->iova, - .size = region->size, + .iova = iova, + .size = size, + .flags = flags, }; - ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args); + if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args)) + return -errno; + + if (unmapped) + *unmapped = args.size; + + return 0; } -static void iommufd_dma_unmap(struct vfio_pci_device *device, - struct vfio_dma_region *region) +static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id, + u64 *unmapped) { struct iommu_ioas_unmap args = { .size = sizeof(args), - .iova = region->iova, - .length = region->size, - .ioas_id = device->ioas_id, + .iova = iova, + .length = length, + .ioas_id = ioas_id, }; - ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args); + if (ioctl(fd, IOMMU_IOAS_UNMAP, &args)) + return -errno; + + if (unmapped) + *unmapped = args.length; + + return 0; } -void vfio_pci_dma_unmap(struct vfio_pci_device *device, - struct vfio_dma_region *region) +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, + struct vfio_dma_region *region, u64 *unmapped) { + int ret; + + if (device->iommufd) + ret = iommufd_dma_unmap(device->iommufd, region->iova, + region->size, device->ioas_id, + unmapped); + else + ret = vfio_iommu_dma_unmap(device->container_fd, region->iova, + region->size, 0, unmapped); + + if (ret) + return ret; + + list_del_init(®ion->link); + + return 0; +} + +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped) +{ + int ret; + struct vfio_dma_region *curr, *next; + if (device->iommufd) - iommufd_dma_unmap(device, region); + ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX, + device->ioas_id, unmapped); else - vfio_iommu_dma_unmap(device, region); + ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0, + VFIO_DMA_UNMAP_FLAG_ALL, unmapped); + + if (ret) + return ret; + + list_for_each_entry_safe(curr, next, &device->dma_regions, link) + list_del_init(&curr->link); - list_del(®ion->link); + return 0; } static void vfio_pci_region_get(struct vfio_pci_device *device, int index, diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c index ab19c54a774d..4f1ea79a200c 100644 --- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c @@ -112,6 +112,8 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0); FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + FIXTURE_SETUP(vfio_dma_mapping_test) { self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); @@ -129,6 +131,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap) struct vfio_dma_region region; struct iommu_mapping mapping; u64 mapping_size = size; + u64 unmapped; int rc; region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); @@ -184,7 +187,9 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap) } unmap: - vfio_pci_dma_unmap(self->device, ®ion); + rc = __vfio_pci_dma_unmap(self->device, ®ion, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region.size); printf("Unmapped IOVA 0x%lx\n", region.iova); ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr)); ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); @@ -192,6 +197,94 @@ unmap: ASSERT_TRUE(!munmap(region.vaddr, size)); } +FIXTURE(vfio_dma_map_limit_test) { + struct vfio_pci_device *device; + struct vfio_dma_region region; + size_t mmap_size; +}; + +FIXTURE_VARIANT(vfio_dma_map_limit_test) { + const char *iommu_mode; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ +FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \ + .iommu_mode = #_iommu_mode, \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); + +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + +FIXTURE_SETUP(vfio_dma_map_limit_test) +{ + struct vfio_dma_region *region = &self->region; + u64 region_size = getpagesize(); + + /* + * Over-allocate mmap by double the size to provide enough backing vaddr + * for overflow tests + */ + self->mmap_size = 2 * region_size; + + self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); + region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ASSERT_NE(region->vaddr, MAP_FAILED); + + /* One page prior to the end of address space */ + region->iova = ~(iova_t)0 & ~(region_size - 1); + region->size = region_size; +} + +FIXTURE_TEARDOWN(vfio_dma_map_limit_test) +{ + vfio_pci_device_cleanup(self->device); + ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); +} + +TEST_F(vfio_dma_map_limit_test, unmap_range) +{ + struct vfio_dma_region *region = &self->region; + u64 unmapped; + int rc; + + vfio_pci_dma_map(self->device, region); + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + + rc = __vfio_pci_dma_unmap(self->device, region, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, unmap_all) +{ + struct vfio_dma_region *region = &self->region; + u64 unmapped; + int rc; + + vfio_pci_dma_map(self->device, region); + ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + + rc = __vfio_pci_dma_unmap_all(self->device, &unmapped); + ASSERT_EQ(rc, 0); + ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, overflow) +{ + struct vfio_dma_region *region = &self->region; + int rc; + + region->size = self->mmap_size; + + rc = __vfio_pci_dma_map(self->device, region); + ASSERT_EQ(rc, -EOVERFLOW); + + rc = __vfio_pci_dma_unmap(self->device, region, NULL); + ASSERT_EQ(rc, -EOVERFLOW); +} + int main(int argc, char *argv[]) { device_bdf = vfio_selftests_get_bdf(&argc, argv); |
