From ed61378b4dc63efe76cb8c23a36b228043332da3 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Mon, 8 Dec 2025 09:05:48 -0500 Subject: iomap: replace folio_batch allocation with stack allocation Zhang Yi points out that the dynamic folio_batch allocation in iomap_fill_dirty_folios() is problematic for the ext4 on iomap work that is under development because it doesn't sufficiently handle the allocation failure case (by allowing a retry, for example). We've also seen lockdep (via syzbot) complain recently about the scope of the allocation. The dynamic allocation was initially added for simplicity and to help indicate whether the batch was used or not by the calling fs. To address these issues, put the batch on the stack of iomap_zero_range() and use a flag to control whether the batch should be used in the iomap folio lookup path. This keeps things simple and eliminates allocation issues with lockdep and for ext4 on iomap. While here, also clean up the fill helper signature to be more consistent with the underlying filemap helper. Pass through the return value of the filemap helper (folio count) and update the lookup offset via an out param. Fixes: 395ed1ef0012 ("iomap: optional zero range dirty folio processing") Signed-off-by: Brian Foster Link: https://patch.msgid.link/20251208140548.373411-1-bfoster@redhat.com Acked-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Christian Brauner --- include/linux/iomap.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 520e967cb501..6bb941707d12 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -88,6 +88,9 @@ struct vm_fault; /* * Flags set by the core iomap code during operations: * + * IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active + * for this operation, set by iomap_fill_dirty_folios(). + * * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size * has changed as the result of this write operation. * @@ -95,6 +98,7 @@ struct vm_fault; * range it covers needs to be remapped by the high level before the operation * can proceed. */ +#define IOMAP_F_FOLIO_BATCH (1U << 13) #define IOMAP_F_SIZE_CHANGED (1U << 14) #define IOMAP_F_STALE (1U << 15) @@ -352,8 +356,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio); int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops, const struct iomap_write_ops *write_ops); -loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset, - loff_t length); +unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start, + loff_t end, unsigned int *iomap_flags); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops, const struct iomap_write_ops *write_ops, void *private); -- cgit v1.2.3 From 12965a190eaea614bb49e22041e8fc0d03d0310f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 4 Dec 2025 08:48:33 -0500 Subject: filelock: allow lease_managers to dictate what qualifies as a conflict Requesting a delegation on a file from the userland fcntl() interface currently succeeds when there are conflicting opens present. This is because the lease handling code ignores conflicting opens for FL_LAYOUT and FL_DELEG leases. This was a hack put in place long ago, because nfsd already checks for conflicts in its own way. The kernel needs to perform this check for userland delegations the same way it is done for leases, however. Make this dependent on the lease_manager by adding a new ->lm_open_conflict() lease_manager operation and have generic_add_lease() call that instead of check_conflicting_open(). Morph check_conflicting_open() into a ->lm_open_conflict() op that is only called for userland leases/delegations. Set the ->lm_open_conflict() operations for nfsd to trivial functions that always return 0. Reviewed-by: Chuck Lever Signed-off-by: Jeff Layton Link: https://patch.msgid.link/20251204-dir-deleg-ro-v2-2-22d37f92ce2c@kernel.org Signed-off-by: Christian Brauner --- Documentation/filesystems/locking.rst | 1 + fs/locks.c | 90 ++++++++++++++++------------------- fs/nfsd/nfs4layouts.c | 23 ++++++++- fs/nfsd/nfs4state.c | 19 ++++++++ include/linux/filelock.h | 1 + 5 files changed, 84 insertions(+), 50 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index 77704fde9845..04c7691e50e0 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -416,6 +416,7 @@ lm_change yes no no lm_breaker_owns_lease: yes no no lm_lock_expirable yes no no lm_expire_lock no no yes +lm_open_conflict yes no no ====================== ============= ================= ========= buffer_head diff --git a/fs/locks.c b/fs/locks.c index be0b79286da8..e75c8084d937 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -585,10 +585,50 @@ lease_setup(struct file_lease *fl, void **priv) __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); } +/** + * lease_open_conflict - see if the given file points to an inode that has + * an existing open that would conflict with the + * desired lease. + * @filp: file to check + * @arg: type of lease that we're trying to acquire + * + * Check to see if there's an existing open fd on this file that would + * conflict with the lease we're trying to set. + */ +static int +lease_open_conflict(struct file *filp, const int arg) +{ + struct inode *inode = file_inode(filp); + int self_wcount = 0, self_rcount = 0; + + if (arg == F_RDLCK) + return inode_is_open_for_write(inode) ? -EAGAIN : 0; + else if (arg != F_WRLCK) + return 0; + + /* + * Make sure that only read/write count is from lease requestor. + * Note that this will result in denying write leases when i_writecount + * is negative, which is what we want. (We shouldn't grant write leases + * on files open for execution.) + */ + if (filp->f_mode & FMODE_WRITE) + self_wcount = 1; + else if (filp->f_mode & FMODE_READ) + self_rcount = 1; + + if (atomic_read(&inode->i_writecount) != self_wcount || + atomic_read(&inode->i_readcount) != self_rcount) + return -EAGAIN; + + return 0; +} + static const struct lease_manager_operations lease_manager_ops = { .lm_break = lease_break_callback, .lm_change = lease_modify, .lm_setup = lease_setup, + .lm_open_conflict = lease_open_conflict, }; /* @@ -1754,52 +1794,6 @@ int fcntl_getdeleg(struct file *filp, struct delegation *deleg) return 0; } -/** - * check_conflicting_open - see if the given file points to an inode that has - * an existing open that would conflict with the - * desired lease. - * @filp: file to check - * @arg: type of lease that we're trying to acquire - * @flags: current lock flags - * - * Check to see if there's an existing open fd on this file that would - * conflict with the lease we're trying to set. - */ -static int -check_conflicting_open(struct file *filp, const int arg, int flags) -{ - struct inode *inode = file_inode(filp); - int self_wcount = 0, self_rcount = 0; - - if (flags & FL_LAYOUT) - return 0; - if (flags & FL_DELEG) - /* We leave these checks to the caller */ - return 0; - - if (arg == F_RDLCK) - return inode_is_open_for_write(inode) ? -EAGAIN : 0; - else if (arg != F_WRLCK) - return 0; - - /* - * Make sure that only read/write count is from lease requestor. - * Note that this will result in denying write leases when i_writecount - * is negative, which is what we want. (We shouldn't grant write leases - * on files open for execution.) - */ - if (filp->f_mode & FMODE_WRITE) - self_wcount = 1; - else if (filp->f_mode & FMODE_READ) - self_rcount = 1; - - if (atomic_read(&inode->i_writecount) != self_wcount || - atomic_read(&inode->i_readcount) != self_rcount) - return -EAGAIN; - - return 0; -} - static int generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv) { @@ -1836,7 +1830,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - error = check_conflicting_open(filp, arg, lease->c.flc_flags); + error = lease->fl_lmops->lm_open_conflict(filp, arg); if (error) goto out; @@ -1893,7 +1887,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr * precedes these checks. */ smp_mb(); - error = check_conflicting_open(filp, arg, lease->c.flc_flags); + error = lease->fl_lmops->lm_open_conflict(filp, arg); if (error) { locks_unlink_lock_ctx(&lease->c); goto out; diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 683bd1130afe..ad7af8cfcf1f 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -764,9 +764,28 @@ nfsd4_layout_lm_change(struct file_lease *onlist, int arg, return lease_modify(onlist, arg, dispose); } +/** + * nfsd4_layout_lm_open_conflict - see if the given file points to an inode that has + * an existing open that would conflict with the + * desired lease. + * @filp: file to check + * @arg: type of lease that we're trying to acquire + * + * The kernel will call into this operation to determine whether there + * are conflicting opens that may prevent the layout from being granted. + * For nfsd, that check is done at a higher level, so this trivially + * returns 0. + */ +static int +nfsd4_layout_lm_open_conflict(struct file *filp, int arg) +{ + return 0; +} + static const struct lease_manager_operations nfsd4_layouts_lm_ops = { - .lm_break = nfsd4_layout_lm_break, - .lm_change = nfsd4_layout_lm_change, + .lm_break = nfsd4_layout_lm_break, + .lm_change = nfsd4_layout_lm_change, + .lm_open_conflict = nfsd4_layout_lm_open_conflict, }; int diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 808c24fb5c9a..19d6d6db107f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -5552,10 +5552,29 @@ nfsd_change_deleg_cb(struct file_lease *onlist, int arg, return -EAGAIN; } +/** + * nfsd4_deleg_lm_open_conflict - see if the given file points to an inode that has + * an existing open that would conflict with the + * desired lease. + * @filp: file to check + * @arg: type of lease that we're trying to acquire + * + * The kernel will call into this operation to determine whether there + * are conflicting opens that may prevent the deleg from being granted. + * For nfsd, that check is done at a higher level, so this trivially + * returns 0. + */ +static int +nfsd4_deleg_lm_open_conflict(struct file *filp, int arg) +{ + return 0; +} + static const struct lease_manager_operations nfsd_lease_mng_ops = { .lm_breaker_owns_lease = nfsd_breaker_owns_lease, .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, + .lm_open_conflict = nfsd4_deleg_lm_open_conflict, }; static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 54b824c05299..2f5e5588ee07 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -49,6 +49,7 @@ struct lease_manager_operations { int (*lm_change)(struct file_lease *, int, struct list_head *); void (*lm_setup)(struct file_lease *, void **); bool (*lm_breaker_owns_lease)(struct file_lease *); + int (*lm_open_conflict)(struct file *, int); }; struct lock_manager { -- cgit v1.2.3 From f157dd661339fc6f5f2b574fe2429c43bd309534 Mon Sep 17 00:00:00 2001 From: Miquel Sabaté Solà Date: Tue, 21 Oct 2025 11:11:25 +0200 Subject: btrfs: fix NULL dereference on root when tracing inode eviction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When evicting an inode the first thing we do is to setup tracing for it, which implies fetching the root's id. But in btrfs_evict_inode() the root might be NULL, as implied in the next check that we do in btrfs_evict_inode(). Hence, we either should set the ->root_objectid to 0 in case the root is NULL, or we move tracing setup after checking that the root is not NULL. Setting the rootid to 0 at least gives us the possibility to trace this call even in the case when the root is NULL, so that's the solution taken here. Fixes: 1abe9b8a138c ("Btrfs: add initial tracepoint support for btrfs") Reported-by: syzbot+d991fea1b4b23b1f6bf8@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=d991fea1b4b23b1f6bf8 Signed-off-by: Miquel Sabaté Solà Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 7e418f065b94..125bdc166bfe 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -224,7 +224,8 @@ DECLARE_EVENT_CLASS(btrfs__inode, __entry->generation = BTRFS_I(inode)->generation; __entry->last_trans = BTRFS_I(inode)->last_trans; __entry->logged_trans = BTRFS_I(inode)->logged_trans; - __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); + __entry->root_objectid = BTRFS_I(inode)->root ? + btrfs_root_id(BTRFS_I(inode)->root) : 0; ), TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu " -- cgit v1.2.3 From 2dc675f614850b80deab7cf6d12902636ed8a7f4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 8 Dec 2025 14:33:05 +0100 Subject: RDMA/ucma: Fix rdma_ucm_query_ib_service_resp struct padding On a few 32-bit architectures, the newly added ib_user_service_rec structure is not 64-bit aligned the way it is on most regular ones. Add explicit padding into the rdma_ucm_query_ib_service_resp and rdma_ucm_resolve_ib_service structures that embed it, so that the layout is compatible across all of them. This is an ABI change on i386, aligning it with x86_64 and the other 64-bit architectures to avoid having to use a compat ioctl handler. Fixes: 810f874eda8e ("RDMA/ucma: Support query resolved service records") Link: https://patch.msgid.link/r/20251208133311.313977-1-arnd@kernel.org Signed-off-by: Arnd Bergmann Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/rdma_user_cm.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index 5ded174687ee..838f8d460256 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -192,6 +192,7 @@ struct rdma_ucm_query_path_resp { struct rdma_ucm_query_ib_service_resp { __u32 num_service_recs; + __u32 reserved; struct ib_user_service_rec recs[]; }; @@ -354,7 +355,7 @@ enum { #define RDMA_USER_CM_IB_SERVICE_NAME_SIZE 64 struct rdma_ucm_ib_service { - __u64 service_id; + __aligned_u64 service_id; __u8 service_name[RDMA_USER_CM_IB_SERVICE_NAME_SIZE]; __u32 flags; __u32 reserved; @@ -362,6 +363,7 @@ struct rdma_ucm_ib_service { struct rdma_ucm_resolve_ib_service { __u32 id; + __u32 reserved; struct rdma_ucm_ib_service ibs; }; -- cgit v1.2.3 From d95e99a74eaf35c070f5939295331e5d7857c723 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 8 Dec 2025 14:38:44 +0100 Subject: RDMA/irdma: Fix irdma_alloc_ucontext_resp padding A recent commit modified struct irdma_alloc_ucontext_resp by adding a member with implicit padding in front of it, though this does not change the offset of the data members other than m68k. Reported by scripts/check-uapi.sh: ==== ABI differences detected in include/rdma/irdma-abi.h from 1dd7bde2e91c -> HEAD ==== [C] 'struct irdma_alloc_ucontext_resp' changed: type size changed from 704 to 640 (in bits) 1 data member deletion: '__u8 rsvd3[2]', at offset 640 (in bits) at irdma-abi.h:61:1 1 data member insertion: '__u8 revd3[2]', at offset 592 (in bits) at irdma-abi.h:60:1 Change the size back to the previous version, and remove the implicit padding by making it explicit and matching what x86-64 would do by placing max_hw_srq_quanta member into a naturally aligned location. Fixes: 563e1feb5f6e ("RDMA/irdma: Add SRQ support") Link: https://patch.msgid.link/r/20251208133849.315451-1-arnd@kernel.org Signed-off-by: Arnd Bergmann Reviewed-by: Geert Uytterhoeven Tested-by: Jacob Moroni Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/irdma-abi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h index f7788d33376b..36f20802bcc8 100644 --- a/include/uapi/rdma/irdma-abi.h +++ b/include/uapi/rdma/irdma-abi.h @@ -57,8 +57,8 @@ struct irdma_alloc_ucontext_resp { __u8 rsvd2; __aligned_u64 comp_mask; __u16 min_hw_wq_size; + __u8 revd3[2]; __u32 max_hw_srq_quanta; - __u8 rsvd3[2]; }; struct irdma_alloc_pd_resp { -- cgit v1.2.3 From 4a824c3128998158a093eaadd776a79abe3a601a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 4 Dec 2025 15:31:27 +0000 Subject: entry: Always inline local_irq_{enable,disable}_exit_to_user() clang needs __always_inline instead of inline, even for tiny helpers. This saves some cycles in system call fast path, and saves 195 bytes on x86_64 build: $ size vmlinux.before vmlinux.after text data bss dec hex filename 34652814 22291961 5875180 62819955 3be8e73 vmlinux.before 34652619 22291961 5875180 62819760 3be8db0 vmlinux.after Signed-off-by: Eric Dumazet Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251204153127.1321824-1-edumazet@google.com --- include/linux/irq-entry-common.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h index 6ab913e57da0..d26d1b1bcbfb 100644 --- a/include/linux/irq-entry-common.h +++ b/include/linux/irq-entry-common.h @@ -110,7 +110,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) static inline void local_irq_enable_exit_to_user(unsigned long ti_work); #ifndef local_irq_enable_exit_to_user -static inline void local_irq_enable_exit_to_user(unsigned long ti_work) +static __always_inline void local_irq_enable_exit_to_user(unsigned long ti_work) { local_irq_enable(); } @@ -125,7 +125,7 @@ static inline void local_irq_enable_exit_to_user(unsigned long ti_work) static inline void local_irq_disable_exit_to_user(void); #ifndef local_irq_disable_exit_to_user -static inline void local_irq_disable_exit_to_user(void) +static __always_inline void local_irq_disable_exit_to_user(void) { local_irq_disable(); } -- cgit v1.2.3 From 20e20b147cf7cb6780a5b95da2a0e37c52cd1015 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 15 Dec 2025 22:38:00 -0800 Subject: platform/x86/intel/vsec: correct kernel-doc comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix kernel-doc warnings in intel_vsec.h to eliminate all kernel-doc warnings: Warning: include/linux/intel_vsec.h:92 struct member 'read_telem' not described in 'pmt_callbacks' Warning: include/linux/intel_vsec.h:146 expecting prototype for struct intel_sec_device. Prototype was for struct intel_vsec_device instead Warning: include/linux/intel_vsec.h:146 struct member 'priv_data_size' not described in 'intel_vsec_device' In struct pmt_callbacks, correct the kernel-doc for @read_telem. kernel-doc doesn't support documenting callback function parameters, so drop the '@' signs on those and use "* *" to make them somewhat readable in the produced documentation output. Signed-off-by: Randy Dunlap Link: https://patch.msgid.link/20251216063801.2896495-1-rdunlap@infradead.org Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen --- include/linux/intel_vsec.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index 53f6fe88e369..1a0f357c2427 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -80,13 +80,13 @@ enum intel_vsec_quirks { /** * struct pmt_callbacks - Callback infrastructure for PMT devices - * ->read_telem() when specified, called by client driver to access PMT data (instead - * of direct copy). - * @pdev: PCI device reference for the callback's use - * @guid: ID of data to acccss - * @data: buffer for the data to be copied - * @off: offset into the requested buffer - * @count: size of buffer + * @read_telem: when specified, called by client driver to access PMT + * data (instead of direct copy). + * * pdev: PCI device reference for the callback's use + * * guid: ID of data to acccss + * * data: buffer for the data to be copied + * * off: offset into the requested buffer + * * count: size of buffer */ struct pmt_callbacks { int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); @@ -120,7 +120,7 @@ struct intel_vsec_platform_info { }; /** - * struct intel_sec_device - Auxbus specific device information + * struct intel_vsec_device - Auxbus specific device information * @auxdev: auxbus device struct for auxbus access * @pcidev: pci device associated with the device * @resource: any resources shared by the parent @@ -128,6 +128,7 @@ struct intel_vsec_platform_info { * @num_resources: number of resources * @id: xarray id * @priv_data: any private data needed + * @priv_data_size: size of private data area * @quirks: specified quirks * @base_addr: base address of entries (if specified) * @cap_id: the enumerated id of the vsec feature -- cgit v1.2.3 From c31f4aa8fed048fa70e742c4bb49bb48dc489ab3 Mon Sep 17 00:00:00 2001 From: David Gow Date: Fri, 19 Dec 2025 16:52:58 +0800 Subject: kunit: Enforce task execution in {soft,hard}irq contexts The kunit_run_irq_test() helper allows a function to be run in hardirq and softirq contexts (in addition to the task context). It does this by running the user-provided function concurrently in the three contexts, until either a timeout has expired or a number of iterations have completed in the normal task context. However, on setups where the initialisation of the hardirq and softirq contexts (or, indeed, the scheduling of those tasks) is significantly slower than the function execution, it's possible for that number of iterations to be exceeded before any runs in irq contexts actually occur. This occurs with the polyval.test_polyval_preparekey_in_irqs test, which runs 20000 iterations of the relatively fast preparekey function, and therefore fails often under many UML, 32-bit arm, m68k and other environments. Instead, ensure that the max_iterations limit counts executions in all three contexts, and requires at least one of each. This will cause the test to continue iterating until at least the irq contexts have been tested, or the 1s wall-clock limit has been exceeded. This causes the test to pass in all of my environments. In so doing, we also update the task counters to atomic ints, to better match both the 'int' max_iterations input, and to ensure they are correctly updated across contexts. Finally, we also fix a few potential assertion messages to be less-specific to the original crypto usecases. Fixes: 950a81224e8b ("lib/crypto: tests: Add hash-test-template.h and gen-hash-testvecs.py") Signed-off-by: David Gow Link: https://lore.kernel.org/r/20251219085259.1163048-1-davidgow@google.com Signed-off-by: Eric Biggers --- include/kunit/run-in-irq-context.h | 53 ++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h index 108e96433ea4..c89b1b1b12dd 100644 --- a/include/kunit/run-in-irq-context.h +++ b/include/kunit/run-in-irq-context.h @@ -20,8 +20,8 @@ struct kunit_irq_test_state { bool task_func_reported_failure; bool hardirq_func_reported_failure; bool softirq_func_reported_failure; - unsigned long hardirq_func_calls; - unsigned long softirq_func_calls; + atomic_t hardirq_func_calls; + atomic_t softirq_func_calls; struct hrtimer timer; struct work_struct bh_work; }; @@ -32,7 +32,7 @@ static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer) container_of(timer, typeof(*state), timer); WARN_ON_ONCE(!in_hardirq()); - state->hardirq_func_calls++; + atomic_inc(&state->hardirq_func_calls); if (!state->func(state->test_specific_state)) state->hardirq_func_reported_failure = true; @@ -48,7 +48,7 @@ static void kunit_irq_test_bh_work_func(struct work_struct *work) container_of(work, typeof(*state), bh_work); WARN_ON_ONCE(!in_serving_softirq()); - state->softirq_func_calls++; + atomic_inc(&state->softirq_func_calls); if (!state->func(state->test_specific_state)) state->softirq_func_reported_failure = true; @@ -59,7 +59,10 @@ static void kunit_irq_test_bh_work_func(struct work_struct *work) * hardirq context concurrently, and reports a failure to KUnit if any * invocation of @func in any context returns false. @func is passed * @test_specific_state as its argument. At most 3 invocations of @func will - * run concurrently: one in each of task, softirq, and hardirq context. + * run concurrently: one in each of task, softirq, and hardirq context. @func + * will continue running until either @max_iterations calls have been made (so + * long as at least one each runs in task, softirq, and hardirq contexts), or + * one second has passed. * * The main purpose of this interrupt context testing is to validate fallback * code paths that run in contexts where the normal code path cannot be used, @@ -85,6 +88,8 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), .test_specific_state = test_specific_state, }; unsigned long end_jiffies; + int hardirq_calls, softirq_calls; + bool allctx = false; /* * Set up a hrtimer (the way we access hardirq context) and a work @@ -94,14 +99,25 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func); - /* Run for up to max_iterations or 1 second, whichever comes first. */ + /* + * Run for up to max_iterations (including at least one task, softirq, + * and hardirq), or 1 second, whichever comes first. + */ end_jiffies = jiffies + HZ; hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL, HRTIMER_MODE_REL_HARD); - for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies); - i++) { + for (int task_calls = 0, calls = 0; + ((calls < max_iterations) || !allctx) && + !time_after(jiffies, end_jiffies); + task_calls++) { if (!func(test_specific_state)) state.task_func_reported_failure = true; + + hardirq_calls = atomic_read(&state.hardirq_func_calls); + softirq_calls = atomic_read(&state.softirq_func_calls); + calls = task_calls + hardirq_calls + softirq_calls; + allctx = (task_calls > 0) && (hardirq_calls > 0) && + (softirq_calls > 0); } /* Cancel the timer and work. */ @@ -109,21 +125,18 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *), flush_work(&state.bh_work); /* Sanity check: the timer and BH functions should have been run. */ - KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0, + KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.hardirq_func_calls), 0, "Timer function was not called"); - KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0, + KUNIT_EXPECT_GT_MSG(test, atomic_read(&state.softirq_func_calls), 0, "BH work function was not called"); - /* Check for incorrect hash values reported from any context. */ - KUNIT_EXPECT_FALSE_MSG( - test, state.task_func_reported_failure, - "Incorrect hash values reported from task context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.hardirq_func_reported_failure, - "Incorrect hash values reported from hardirq context"); - KUNIT_EXPECT_FALSE_MSG( - test, state.softirq_func_reported_failure, - "Incorrect hash values reported from softirq context"); + /* Check for failure reported from any context. */ + KUNIT_EXPECT_FALSE_MSG(test, state.task_func_reported_failure, + "Failure reported from task context"); + KUNIT_EXPECT_FALSE_MSG(test, state.hardirq_func_reported_failure, + "Failure reported from hardirq context"); + KUNIT_EXPECT_FALSE_MSG(test, state.softirq_func_reported_failure, + "Failure reported from softirq context"); } #endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */ -- cgit v1.2.3 From 754c23238438600e9236719f7e67aff2c4d02093 Mon Sep 17 00:00:00 2001 From: Thomas Hellström Date: Fri, 19 Dec 2025 12:32:59 +0100 Subject: drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In situations where no system memory is migrated to devmem, and in upcoming patches where another GPU is performing the migration to the newly allocated devmem buffer, there is nothing to ensure any ongoing clear to the devmem allocation or async eviction from the devmem allocation is complete. Address that by passing a struct dma_fence down to the copy functions, and ensure it is waited for before migration is marked complete. v3: - New patch. v4: - Update the logic used for determining when to wait for the pre_migrate_fence. - Update the logic used for determining when to warn for the pre_migrate_fence since the scheduler fences apparently can signal out-of-order. v5: - Fix a UAF (CI) - Remove references to source P2P migration (Himal) - Put the pre_migrate_fence after migration. v6: - Pipeline the pre_migrate_fence dependency (Matt Brost) Fixes: c5b3eb5a906c ("drm/xe: Add GPUSVM device memory copy vfunc functions") Cc: Matthew Brost Cc: # v6.15+ Signed-off-by: Thomas Hellström Reviewed-by: Matthew Brost Acked-by: Maarten Lankhorst # For merging through drm-xe. Link: https://patch.msgid.link/20251219113320.183860-4-thomas.hellstrom@linux.intel.com (cherry picked from commit 16b5ad31952476fb925c401897fc171cd37f536b) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/drm_pagemap.c | 17 ++++++++++---- drivers/gpu/drm/xe/xe_migrate.c | 25 ++++++++++++++++----- drivers/gpu/drm/xe/xe_migrate.h | 6 +++-- drivers/gpu/drm/xe/xe_svm.c | 49 +++++++++++++++++++++++++++++++---------- include/drm/drm_pagemap.h | 17 +++++++++++--- 5 files changed, 88 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index 37d7cfbbb3e8..06c1bd8fc4d1 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -3,6 +3,7 @@ * Copyright © 2024-2025 Intel Corporation */ +#include #include #include #include @@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, drm_pagemap_get_devmem_page(page, zdd); } - err = ops->copy_to_devmem(pages, pagemap_addr, npages); + err = ops->copy_to_devmem(pages, pagemap_addr, npages, + devmem_allocation->pre_migrate_fence); if (err) goto err_finalize; + dma_fence_put(devmem_allocation->pre_migrate_fence); + devmem_allocation->pre_migrate_fence = NULL; + /* Upon success bind devmem allocation to range and zdd */ devmem_allocation->timeslice_expiration = get_jiffies_64() + msecs_to_jiffies(timeslice_ms); @@ -596,7 +601,7 @@ retry: for (i = 0; i < npages; ++i) pages[i] = migrate_pfn_to_page(src[i]); - err = ops->copy_to_ram(pages, pagemap_addr, npages); + err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); if (err) goto err_finalize; @@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, for (i = 0; i < npages; ++i) pages[i] = migrate_pfn_to_page(migrate.src[i]); - err = ops->copy_to_ram(pages, pagemap_addr, npages); + err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); if (err) goto err_finalize; @@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get); * @ops: Pointer to the operations structure for GPU SVM device memory * @dpagemap: The struct drm_pagemap we're allocating from. * @size: Size of device memory allocation + * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. + * (May be NULL). */ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size) + struct drm_pagemap *dpagemap, size_t size, + struct dma_fence *pre_migrate_fence) { init_completion(&devmem_allocation->detached); devmem_allocation->dev = dev; @@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, devmem_allocation->ops = ops; devmem_allocation->dpagemap = dpagemap; devmem_allocation->size = size; + devmem_allocation->pre_migrate_fence = pre_migrate_fence; } EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 2184af413b91..5a95b08a4723 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, unsigned long sram_offset, struct drm_pagemap_addr *sram_addr, u64 vram_addr, + struct dma_fence *deps, const enum xe_migrate_copy_dir dir) { struct xe_gt *gt = m->tile->primary_gt; @@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); + if (deps && !dma_fence_is_signaled(deps)) { + dma_fence_get(deps); + err = drm_sched_job_add_dependency(&job->drm, deps); + if (err) + dma_fence_wait(deps, false); + err = 0; + } + mutex_lock(&m->job_mutex); xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); @@ -2175,6 +2184,8 @@ err: * @npages: Number of pages to migrate. * @src_addr: Array of DMA information (source of migrate) * @dst_addr: Device physical address of VRAM (destination of migrate) + * @deps: struct dma_fence representing the dependencies that need + * to be signaled before migration. * * Copy from an array dma addresses to a VRAM device physical address * @@ -2184,10 +2195,11 @@ err: struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, unsigned long npages, struct drm_pagemap_addr *src_addr, - u64 dst_addr) + u64 dst_addr, + struct dma_fence *deps) { return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr, - XE_MIGRATE_COPY_TO_VRAM); + deps, XE_MIGRATE_COPY_TO_VRAM); } /** @@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, * @npages: Number of pages to migrate. * @src_addr: Device physical address of VRAM (source of migrate) * @dst_addr: Array of DMA information (destination of migrate) + * @deps: struct dma_fence representing the dependencies that need + * to be signaled before migration. * * Copy from a VRAM device physical address to an array dma addresses * @@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, unsigned long npages, u64 src_addr, - struct drm_pagemap_addr *dst_addr) + struct drm_pagemap_addr *dst_addr, + struct dma_fence *deps) { return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr, - XE_MIGRATE_COPY_TO_SRAM); + deps, XE_MIGRATE_COPY_TO_SRAM); } static void xe_migrate_dma_unmap(struct xe_device *xe, @@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, &pagemap_addr[current_page], - vram_addr, write ? + vram_addr, NULL, write ? XE_MIGRATE_COPY_TO_VRAM : XE_MIGRATE_COPY_TO_SRAM); if (IS_ERR(__fence)) { diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h index 260e298e5dd7..b76441f062b4 100644 --- a/drivers/gpu/drm/xe/xe_migrate.h +++ b/drivers/gpu/drm/xe/xe_migrate.h @@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m); struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, unsigned long npages, struct drm_pagemap_addr *src_addr, - u64 dst_addr); + u64 dst_addr, + struct dma_fence *deps); struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, unsigned long npages, u64 src_addr, - struct drm_pagemap_addr *dst_addr); + struct drm_pagemap_addr *dst_addr, + struct dma_fence *deps); struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct xe_bo *src_bo, diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 894e8f092e3f..f97e0af6a9b0 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt, static int xe_svm_copy(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages, const enum xe_svm_copy_dir dir) + unsigned long npages, const enum xe_svm_copy_dir dir, + struct dma_fence *pre_migrate_fence) { struct xe_vram_region *vr = NULL; struct xe_gt *gt = NULL; @@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages, __fence = xe_migrate_from_vram(vr->migrate, i - pos + incr, vram_addr, - &pagemap_addr[pos]); + &pagemap_addr[pos], + pre_migrate_fence); } else { vm_dbg(&xe->drm, "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", @@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages, __fence = xe_migrate_to_vram(vr->migrate, i - pos + incr, &pagemap_addr[pos], - vram_addr); + vram_addr, + pre_migrate_fence); } if (IS_ERR(__fence)) { err = PTR_ERR(__fence); goto err_out; } - + pre_migrate_fence = NULL; dma_fence_put(fence); fence = __fence; } @@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages, vram_addr, (u64)pagemap_addr[pos].addr, 1); __fence = xe_migrate_from_vram(vr->migrate, 1, vram_addr, - &pagemap_addr[pos]); + &pagemap_addr[pos], + pre_migrate_fence); } else { vm_dbg(&xe->drm, "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", (u64)pagemap_addr[pos].addr, vram_addr, 1); __fence = xe_migrate_to_vram(vr->migrate, 1, &pagemap_addr[pos], - vram_addr); + vram_addr, + pre_migrate_fence); } if (IS_ERR(__fence)) { err = PTR_ERR(__fence); goto err_out; } - + pre_migrate_fence = NULL; dma_fence_put(fence); fence = __fence; } @@ -629,6 +634,8 @@ err_out: dma_fence_wait(fence, false); dma_fence_put(fence); } + if (pre_migrate_fence) + dma_fence_wait(pre_migrate_fence, false); /* * XXX: We can't derive the GT here (or anywhere in this functions, but @@ -645,16 +652,20 @@ err_out: static int xe_svm_copy_to_devmem(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages) + unsigned long npages, + struct dma_fence *pre_migrate_fence) { - return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM); + return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM, + pre_migrate_fence); } static int xe_svm_copy_to_ram(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages) + unsigned long npages, + struct dma_fence *pre_migrate_fence) { - return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM); + return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM, + pre_migrate_fence); } static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation) @@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation) struct xe_bo *bo = to_xe_bo(devmem_allocation); struct xe_device *xe = xe_bo_device(bo); + dma_fence_put(devmem_allocation->pre_migrate_fence); xe_bo_put_async(bo); xe_pm_runtime_put(xe); } @@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, unsigned long timeslice_ms) { struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap); + struct dma_fence *pre_migrate_fence = NULL; struct xe_device *xe = vr->xe; struct device *dev = xe->drm.dev; struct drm_buddy_block *block; @@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, break; } + /* Ensure that any clearing or async eviction will complete before migration. */ + if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) { + err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, + &pre_migrate_fence); + if (err) + dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, + false, MAX_SCHEDULE_TIMEOUT); + else if (pre_migrate_fence) + dma_fence_enable_sw_signaling(pre_migrate_fence); + } + drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, - &dpagemap_devmem_ops, dpagemap, end - start); + &dpagemap_devmem_ops, dpagemap, end - start, + pre_migrate_fence); blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; list_for_each_entry(block, blocks, link) diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index f6e7e234c089..70a7991f784f 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -8,6 +8,7 @@ #define NR_PAGES(order) (1U << (order)) +struct dma_fence; struct drm_pagemap; struct drm_pagemap_zdd; struct device; @@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops { * @pages: Pointer to array of device memory pages (destination) * @pagemap_addr: Pointer to array of DMA information (source) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to device memory. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_devmem)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence); /** * @copy_to_ram: Copy to system RAM (required for migration) * @pages: Pointer to array of device memory pages (source) * @pagemap_addr: Pointer to array of DMA information (destination) * @npages: Number of pages to copy + * @pre_migrate_fence: dma-fence to wait for before migration start. + * May be NULL. * * Copy pages to system RAM. If the order of a @pagemap_addr entry * is greater than 0, the entry is populated but subsequent entries @@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops { */ int (*copy_to_ram)(struct page **pages, struct drm_pagemap_addr *pagemap_addr, - unsigned long npages); + unsigned long npages, + struct dma_fence *pre_migrate_fence); }; /** @@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops { * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. * @size: Size of device memory allocation * @timeslice_expiration: Timeslice expiration in jiffies + * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. + * (May be NULL). */ struct drm_pagemap_devmem { struct device *dev; @@ -221,6 +230,7 @@ struct drm_pagemap_devmem { struct drm_pagemap *dpagemap; size_t size; u64 timeslice_expiration; + struct dma_fence *pre_migrate_fence; }; int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, @@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, struct device *dev, struct mm_struct *mm, const struct drm_pagemap_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size); + struct drm_pagemap *dpagemap, size_t size, + struct dma_fence *pre_migrate_fence); int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, unsigned long start, unsigned long end, -- cgit v1.2.3 From 06e219f6a706c367c93051f408ac61417643d2f9 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 15 Dec 2025 17:02:35 +0200 Subject: net: dsa: properly keep track of conduit reference Problem description ------------------- DSA has a mumbo-jumbo of reference handling of the conduit net device and its kobject which, sadly, is just wrong and doesn't make sense. There are two distinct problems. 1. The OF path, which uses of_find_net_device_by_node(), never releases the elevated refcount on the conduit's kobject. Nominally, the OF and non-OF paths should result in objects having identical reference counts taken, and it is already suspicious that dsa_dev_to_net_device() has a put_device() call which is missing in dsa_port_parse_of(), but we can actually even verify that an issue exists. With CONFIG_DEBUG_KOBJECT_RELEASE=y, if we run this command "before" and "after" applying this patch: (unbind the conduit driver for net device eno2) echo 0000:00:00.2 > /sys/bus/pci/drivers/fsl_enetc/unbind we see these lines in the output diff which appear only with the patch applied: kobject: 'eno2' (ffff002009a3a6b8): kobject_release, parent 0000000000000000 (delayed 1000) kobject: '109' (ffff0020099d59a0): kobject_release, parent 0000000000000000 (delayed 1000) 2. After we find the conduit interface one way (OF) or another (non-OF), it can get unregistered at any time, and DSA remains with a long-lived, but in this case stale, cpu_dp->conduit pointer. Holding the net device's underlying kobject isn't actually of much help, it just prevents it from being freed (but we never need that kobject directly). What helps us to prevent the net device from being unregistered is the parallel netdev reference mechanism (dev_hold() and dev_put()). Actually we actually use that netdev tracker mechanism implicitly on user ports since commit 2f1e8ea726e9 ("net: dsa: link interfaces with the DSA master to get rid of lockdep warnings"), via netdev_upper_dev_link(). But time still passes at DSA switch probe time between the initial of_find_net_device_by_node() code and the user port creation time, time during which the conduit could unregister itself and DSA wouldn't know about it. So we have to run of_find_net_device_by_node() under rtnl_lock() to prevent that from happening, and release the lock only with the netdev tracker having acquired the reference. Do we need to keep the reference until dsa_unregister_switch() / dsa_switch_shutdown()? 1: Maybe yes. A switch device will still be registered even if all user ports failed to probe, see commit 86f8b1c01a0a ("net: dsa: Do not make user port errors fatal"), and the cpu_dp->conduit pointers remain valid. I haven't audited all call paths to see whether they will actually use the conduit in lack of any user port, but if they do, it seems safer to not rely on user ports for that reference. 2. Definitely yes. We support changing the conduit which a user port is associated to, and we can get into a situation where we've moved all user ports away from a conduit, thus no longer hold any reference to it via the net device tracker. But we shouldn't let it go nonetheless - see the next change in relation to dsa_tree_find_first_conduit() and LAG conduits which disappear. We have to be prepared to return to the physical conduit, so the CPU port must explicitly keep another reference to it. This is also to say: the user ports and their CPU ports may not always keep a reference to the same conduit net device, and both are needed. As for the conduit's kobject for the /sys/class/net/ entry, we don't care about it, we can release it as soon as we hold the net device object itself. History and blame attribution ----------------------------- The code has been refactored so many times, it is very difficult to follow and properly attribute a blame, but I'll try to make a short history which I hope to be correct. We have two distinct probing paths: - one for OF, introduced in 2016 in commit 83c0afaec7b7 ("net: dsa: Add new binding implementation") - one for non-OF, introduced in 2017 in commit 71e0bbde0d88 ("net: dsa: Add support for platform data") These are both complete rewrites of the original probing paths (which used struct dsa_switch_driver and other weird stuff, instead of regular devices on their respective buses for register access, like MDIO, SPI, I2C etc): - one for OF, introduced in 2013 in commit 5e95329b701c ("dsa: add device tree bindings to register DSA switches") - one for non-OF, introduced in 2008 in commit 91da11f870f0 ("net: Distributed Switch Architecture protocol support") except for tiny bits and pieces like dsa_dev_to_net_device() which were seemingly carried over since the original commit, and used to this day. The point is that the original probing paths received a fix in 2015 in the form of commit 679fb46c5785 ("net: dsa: Add missing master netdev dev_put() calls"), but the fix never made it into the "new" (dsa2) probing paths that can still be traced to today, and the fixed probing path was later deleted in 2019 in commit 93e86b3bc842 ("net: dsa: Remove legacy probing support"). That is to say, the new probing paths were never quite correct in this area. The existence of the legacy probing support which was deleted in 2019 explains why dsa_dev_to_net_device() returns a conduit with elevated refcount (because it was supposed to be released during dsa_remove_dst()). After the removal of the legacy code, the only user of dsa_dev_to_net_device() calls dev_put(conduit) immediately after this function returns. This pattern makes no sense today, and can only be interpreted historically to understand why dev_hold() was there in the first place. Change details -------------- Today we have a better netdev tracking infrastructure which we should use. Logically netdev_hold() belongs in common code (dsa_port_parse_cpu(), where dp->conduit is assigned), but there is a tradeoff to be made with the rtnl_lock() section which would become a bit too long if we did that - dsa_port_parse_cpu() also calls request_module(). So we duplicate a bit of logic in order for the callers of dsa_port_parse_cpu() to be the ones responsible of holding the conduit reference and releasing it on error. This shortens the rtnl_lock() section significantly. In the dsa_switch_probe() error path, dsa_switch_release_ports() will be called in a number of situations, one being where dsa_port_parse_cpu() maybe didn't get the chance to run at all (a different port failed earlier, etc). So we have to test for the conduit being NULL prior to calling netdev_put(). There have still been so many transformations to the code since the blamed commits (rename master -> conduit, commit 0650bf52b31f ("net: dsa: be compatible with masters which unregister on shutdown")), that it only makes sense to fix the code using the best methods available today and see how it can be backported to stable later. I suspect the fix cannot even be backported to kernels which lack dsa_switch_shutdown(), and I suspect this is also maybe why the long-lived conduit reference didn't make it into the new DSA probing paths at the time (problems during shutdown). Because dsa_dev_to_net_device() has a single call site and has to be changed anyway, the logic was just absorbed into the non-OF dsa_port_parse(). Tested on the ocelot/felix switch and on dsa_loop, both on the NXP LS1028A with CONFIG_DEBUG_KOBJECT_RELEASE=y. Reported-by: Ma Ke Closes: https://lore.kernel.org/netdev/20251214131204.4684-1-make24@iscas.ac.cn/ Fixes: 83c0afaec7b7 ("net: dsa: Add new binding implementation") Fixes: 71e0bbde0d88 ("net: dsa: Add support for platform data") Reviewed-by: Jonas Gorski Signed-off-by: Vladimir Oltean Link: https://patch.msgid.link/20251215150236.3931670-1-vladimir.oltean@nxp.com Signed-off-by: Paolo Abeni --- include/net/dsa.h | 1 + net/dsa/dsa.c | 59 ++++++++++++++++++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/include/net/dsa.h b/include/net/dsa.h index cced1a866757..6b2b5ed64ea4 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -302,6 +302,7 @@ struct dsa_port { struct devlink_port devlink_port; struct phylink *pl; struct phylink_config pl_config; + netdevice_tracker conduit_tracker; struct dsa_lag *lag; struct net_device *hsr_dev; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index a20efabe778f..50b3fceb5c04 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1253,14 +1253,25 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) if (ethernet) { struct net_device *conduit; const char *user_protocol; + int err; + rtnl_lock(); conduit = of_find_net_device_by_node(ethernet); of_node_put(ethernet); - if (!conduit) + if (!conduit) { + rtnl_unlock(); return -EPROBE_DEFER; + } + + netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL); + put_device(&conduit->dev); + rtnl_unlock(); user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL); - return dsa_port_parse_cpu(dp, conduit, user_protocol); + err = dsa_port_parse_cpu(dp, conduit, user_protocol); + if (err) + netdev_put(conduit, &dp->conduit_tracker); + return err; } if (link) @@ -1393,37 +1404,30 @@ static struct device *dev_find_class(struct device *parent, char *class) return device_find_child(parent, class, dev_is_class); } -static struct net_device *dsa_dev_to_net_device(struct device *dev) -{ - struct device *d; - - d = dev_find_class(dev, "net"); - if (d != NULL) { - struct net_device *nd; - - nd = to_net_dev(d); - dev_hold(nd); - put_device(d); - - return nd; - } - - return NULL; -} - static int dsa_port_parse(struct dsa_port *dp, const char *name, struct device *dev) { if (!strcmp(name, "cpu")) { struct net_device *conduit; + struct device *d; + int err; - conduit = dsa_dev_to_net_device(dev); - if (!conduit) + rtnl_lock(); + d = dev_find_class(dev, "net"); + if (!d) { + rtnl_unlock(); return -EPROBE_DEFER; + } - dev_put(conduit); + conduit = to_net_dev(d); + netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL); + put_device(d); + rtnl_unlock(); - return dsa_port_parse_cpu(dp, conduit, NULL); + err = dsa_port_parse_cpu(dp, conduit, NULL); + if (err) + netdev_put(conduit, &dp->conduit_tracker); + return err; } if (!strcmp(name, "dsa")) @@ -1491,6 +1495,9 @@ static void dsa_switch_release_ports(struct dsa_switch *ds) struct dsa_vlan *v, *n; dsa_switch_for_each_port_safe(dp, next, ds) { + if (dsa_port_is_cpu(dp) && dp->conduit) + netdev_put(dp->conduit, &dp->conduit_tracker); + /* These are either entries that upper layers lost track of * (probably due to bugs), or installed through interfaces * where one does not necessarily have to remove them, like @@ -1635,8 +1642,10 @@ void dsa_switch_shutdown(struct dsa_switch *ds) /* Disconnect from further netdevice notifiers on the conduit, * since netdev_uses_dsa() will now return false. */ - dsa_switch_for_each_cpu_port(dp, ds) + dsa_switch_for_each_cpu_port(dp, ds) { dp->conduit->dsa_ptr = NULL; + netdev_put(dp->conduit, &dp->conduit_tracker); + } rtnl_unlock(); out: -- cgit v1.2.3 From 5393802c94e0ab1295c04c94c57bcb00222d4674 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 27 Nov 2025 10:39:24 -0800 Subject: genalloc.h: fix htmldocs warning WARNING: include/linux/genalloc.h:52 function parameter 'start_addr' not described in 'genpool_algo_t' Fixes: 52fbf1134d47 ("lib/genalloc.c: fix allocation of aligned buffer from non-aligned chunk") Reported-by: Stephen Rothwell Closes: https://lkml.kernel.org/r/20251127130624.563597e3@canb.auug.org.au Acked-by: Randy Dunlap Tested-by: Randy Dunlap Cc: Alexey Skidanov Signed-off-by: Andrew Morton --- include/linux/genalloc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 0bd581003cd5..60de63e46b33 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -44,6 +44,7 @@ struct gen_pool; * @nr: The number of zeroed bits we're looking for * @data: optional additional data used by the callback * @pool: the pool being allocated from + * @start_addr: start address of memory chunk */ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, -- cgit v1.2.3 From 007f5da43b3d0ecff972e2616062b8da1f862f5e Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Thu, 4 Dec 2025 18:59:55 +0000 Subject: mm/kasan: fix incorrect unpoisoning in vrealloc for KASAN Patch series "kasan: vmalloc: Fixes for the percpu allocator and vrealloc", v3. Patches fix two issues related to KASAN and vmalloc. The first one, a KASAN tag mismatch, possibly resulting in a kernel panic, can be observed on systems with a tag-based KASAN enabled and with multiple NUMA nodes. Initially it was only noticed on x86 [1] but later a similar issue was also reported on arm64 [2]. Specifically the problem is related to how vm_structs interact with pcpu_chunks - both when they are allocated, assigned and when pcpu_chunk addresses are derived. When vm_structs are allocated they are unpoisoned, each with a different random tag, if vmalloc support is enabled along the KASAN mode. Later when first pcpu chunk is allocated it gets its 'base_addr' field set to the first allocated vm_struct. With that it inherits that vm_struct's tag. When pcpu_chunk addresses are later derived (by pcpu_chunk_addr(), for example in pcpu_alloc_noprof()) the base_addr field is used and offsets are added to it. If the initial conditions are satisfied then some of the offsets will point into memory allocated with a different vm_struct. So while the lower bits will get accurately derived the tag bits in the top of the pointer won't match the shadow memory contents. The solution (proposed at v2 of the x86 KASAN series [3]) is to unpoison the vm_structs with the same tag when allocating them for the per cpu allocator (in pcpu_get_vm_areas()). The second one reported by syzkaller [4] is related to vrealloc and happens because of random tag generation when unpoisoning memory without allocating new pages. This breaks shadow memory tracking and needs to reuse the existing tag instead of generating a new one. At the same time an inconsistency in used flags is corrected. This patch (of 3): Syzkaller reported a memory out-of-bounds bug [4]. This patch fixes two issues: 1. In vrealloc the KASAN_VMALLOC_VM_ALLOC flag is missing when unpoisoning the extended region. This flag is required to correctly associate the allocation with KASAN's vmalloc tracking. Note: In contrast, vzalloc (via __vmalloc_node_range_noprof) explicitly sets KASAN_VMALLOC_VM_ALLOC and calls kasan_unpoison_vmalloc() with it. vrealloc must behave consistently -- especially when reusing existing vmalloc regions -- to ensure KASAN can track allocations correctly. 2. When vrealloc reuses an existing vmalloc region (without allocating new pages) KASAN generates a new tag, which breaks tag-based memory access tracking. Introduce KASAN_VMALLOC_KEEP_TAG, a new KASAN flag that allows reusing the tag already attached to the pointer, ensuring consistent tag behavior during reallocation. Pass KASAN_VMALLOC_KEEP_TAG and KASAN_VMALLOC_VM_ALLOC to the kasan_unpoison_vmalloc inside vrealloc_node_align_noprof(). Link: https://lkml.kernel.org/r/cover.1765978969.git.m.wieczorretman@pm.me Link: https://lkml.kernel.org/r/38dece0a4074c43e48150d1e242f8242c73bf1a5.1764874575.git.m.wieczorretman@pm.me Link: https://lore.kernel.org/all/e7e04692866d02e6d3b32bb43b998e5d17092ba4.1738686764.git.maciej.wieczor-retman@intel.com/ [1] Link: https://lore.kernel.org/all/aMUrW1Znp1GEj7St@MiWiFi-R3L-srv/ [2] Link: https://lore.kernel.org/all/CAPAsAGxDRv_uFeMYu9TwhBVWHCCtkSxoWY4xmFB_vowMbi8raw@mail.gmail.com/ [3] Link: https://syzkaller.appspot.com/bug?extid=997752115a851cb0cf36 [4] Fixes: a0309faf1cb0 ("mm: vmalloc: support more granular vrealloc() sizing") Signed-off-by: Jiayuan Chen Co-developed-by: Maciej Wieczor-Retman Signed-off-by: Maciej Wieczor-Retman Reported-by: syzbot+997752115a851cb0cf36@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/68e243a2.050a0220.1696c6.007d.GAE@google.com/T/ Reviewed-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Danilo Krummrich Cc: Dmitriy Vyukov Cc: Kees Cook Cc: Marco Elver Cc: "Uladzislau Rezki (Sony)" Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- include/linux/kasan.h | 1 + mm/kasan/hw_tags.c | 2 +- mm/kasan/shadow.c | 4 +++- mm/vmalloc.c | 4 +++- 4 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f335c1d7b61d..df3d8567dde9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) +#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u) #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 1c373cc4b3fa..cbef5e450954 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -361,7 +361,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, return (void *)start; } - tag = kasan_random_tag(); + tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag(); start = set_tag(start, tag); /* Unpoison and initialize memory up to size. */ diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 29a751a8a08d..32fbdf759ea2 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -631,7 +631,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, !(flags & KASAN_VMALLOC_PROT_NORMAL)) return (void *)start; - start = set_tag(start, kasan_random_tag()); + if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG))) + start = set_tag(start, kasan_random_tag()); + kasan_unpoison(start, size, false); return (void *)start; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ecbac900c35f..94c0a9262a46 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4331,7 +4331,9 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align */ if (size <= alloced_size) { kasan_unpoison_vmalloc(p + old_size, size - old_size, - KASAN_VMALLOC_PROT_NORMAL); + KASAN_VMALLOC_PROT_NORMAL | + KASAN_VMALLOC_VM_ALLOC | + KASAN_VMALLOC_KEEP_TAG); /* * No need to zero memory here, as unused memory will have * already been zeroed at initial allocation time or during -- cgit v1.2.3 From 6f13db031e27e88213381039032a9cc061578ea6 Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Thu, 4 Dec 2025 19:00:04 +0000 Subject: kasan: refactor pcpu kasan vmalloc unpoison A KASAN tag mismatch, possibly causing a kernel panic, can be observed on systems with a tag-based KASAN enabled and with multiple NUMA nodes. It was reported on arm64 and reproduced on x86. It can be explained in the following points: 1. There can be more than one virtual memory chunk. 2. Chunk's base address has a tag. 3. The base address points at the first chunk and thus inherits the tag of the first chunk. 4. The subsequent chunks will be accessed with the tag from the first chunk. 5. Thus, the subsequent chunks need to have their tag set to match that of the first chunk. Refactor code by reusing __kasan_unpoison_vmalloc in a new helper in preparation for the actual fix. Link: https://lkml.kernel.org/r/eb61d93b907e262eefcaa130261a08bcb6c5ce51.1764874575.git.m.wieczorretman@pm.me Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS") Signed-off-by: Maciej Wieczor-Retman Reviewed-by: Andrey Konovalov Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Danilo Krummrich Cc: Dmitriy Vyukov Cc: Jiayuan Chen Cc: Kees Cook Cc: Marco Elver Cc: "Uladzislau Rezki (Sony)" Cc: Vincenzo Frascino Cc: [6.1+] Signed-off-by: Andrew Morton --- include/linux/kasan.h | 15 +++++++++++++++ mm/kasan/common.c | 17 +++++++++++++++++ mm/vmalloc.c | 4 +--- 3 files changed, 33 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index df3d8567dde9..9c6ac4b62eb9 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -631,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start, __kasan_poison_vmalloc(start, size); } +void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags); +static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + if (kasan_enabled()) + __kasan_unpoison_vmap_areas(vms, nr_vms, flags); +} + #else /* CONFIG_KASAN_VMALLOC */ static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -655,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start, static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } +static __always_inline void +kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ } + #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 1d27f1bd260b..b2b40c59ce18 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "kasan.h" #include "../slab.h" @@ -575,3 +576,19 @@ bool __kasan_check_byte(const void *address, unsigned long ip) } return true; } + +#ifdef CONFIG_KASAN_VMALLOC +void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, + kasan_vmalloc_flags_t flags) +{ + unsigned long size; + void *addr; + int area; + + for (area = 0 ; area < nr_vms ; area++) { + size = vms[area]->size; + addr = vms[area]->addr; + vms[area]->addr = __kasan_unpoison_vmalloc(addr, size, flags); + } +} +#endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 94c0a9262a46..41dd01e8430c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -5027,9 +5027,7 @@ retry: * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ - for (area = 0; area < nr_vms; area++) - vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, - vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); + kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL); kfree(vas); return vms; -- cgit v1.2.3 From 6ba776b533ca902631fa106b8a90811b3f40b08d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 14 Dec 2025 12:15:17 -0800 Subject: mm: leafops.h: correct kernel-doc function param. names Modify the kernel-doc function parameter names to prevent kernel-doc warnings: Warning: include/linux/leafops.h:135 function parameter 'entry' not described in 'leafent_type' Warning: include/linux/leafops.h:540 function parameter 'pte' not described in 'pte_is_uffd_marker' Link: https://lkml.kernel.org/r/20251214201517.2187051-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Reviewed-by: Lorenzo Stoakes Cc: Liam Howlett Cc: Michal Hocko Cc: Mike Rapoport Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/leafops.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/leafops.h b/include/linux/leafops.h index cfafe7a5e7b1..a9ff94b744f2 100644 --- a/include/linux/leafops.h +++ b/include/linux/leafops.h @@ -133,7 +133,7 @@ static inline bool softleaf_is_none(softleaf_t entry) /** * softleaf_type() - Identify the type of leaf entry. - * @enntry: Leaf entry. + * @entry: Leaf entry. * * Returns: the leaf entry type associated with @entry. */ @@ -534,7 +534,7 @@ static inline bool pte_is_uffd_wp_marker(pte_t pte) /** * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker * leaf entry? - * @entry: Leaf entry. + * @pte: PTE entry. * * It's useful to be able to determine which leaf entries encode UFFD-specific * markers so we can handle these correctly. -- cgit v1.2.3 From fe55ea85939efcbf0e6baa234f0d70acb79e7b58 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Tue, 16 Dec 2025 09:48:51 +0800 Subject: kernel/kexec: change the prototype of kimage_map_segment() The kexec segment index will be required to extract the corresponding information for that segment in kimage_map_segment(). Additionally, kexec_segment already holds the kexec relocation destination address and size. Therefore, the prototype of kimage_map_segment() can be changed. Link: https://lkml.kernel.org/r/20251216014852.8737-1-piliu@redhat.com Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu Acked-by: Baoquan He Cc: Mimi Zohar Cc: Roberto Sassu Cc: Alexander Graf Cc: Steven Chen Cc: Signed-off-by: Andrew Morton --- include/linux/kexec.h | 4 ++-- kernel/kexec_core.c | 9 ++++++--- security/integrity/ima/ima_kexec.c | 4 +--- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ff7e231b0485..8a22bc9b8c6c 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print; #define kexec_dprintk(fmt, arg...) \ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0) -extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size); +extern void *kimage_map_segment(struct kimage *image, int idx); extern void kimage_unmap_segment(void *buffer); #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; @@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } -static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size) +static inline void *kimage_map_segment(struct kimage *image, int idx) { return NULL; } static inline void kimage_unmap_segment(void *buffer) { } #define kexec_in_progress false diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 0f92acdd354d..1a79c5b18d8f 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -953,17 +953,20 @@ int kimage_load_segment(struct kimage *image, int idx) return result; } -void *kimage_map_segment(struct kimage *image, - unsigned long addr, unsigned long size) +void *kimage_map_segment(struct kimage *image, int idx) { + unsigned long addr, size, eaddr; unsigned long src_page_addr, dest_page_addr = 0; - unsigned long eaddr = addr + size; kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; void *vaddr = NULL; int i; + addr = image->segment[idx].mem; + size = image->segment[idx].memsz; + eaddr = addr + size; + /* * Collect the source pages and map them in a contiguous VA range. */ diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 7362f68f2d8b..5beb69edd12f 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -250,9 +250,7 @@ void ima_kexec_post_load(struct kimage *image) if (!image->ima_buffer_addr) return; - ima_kexec_buffer = kimage_map_segment(image, - image->ima_buffer_addr, - image->ima_buffer_size); + ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index); if (!ima_kexec_buffer) { pr_err("Could not map measurements buffer.\n"); return; -- cgit v1.2.3 From e6dbcb7c0e7b508d443a9aa6f77f63a2f83b1ae4 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Thu, 11 Dec 2025 07:06:01 +0000 Subject: mm: fixup pfnmap memory failure handling to use pgoff The memory failure handling implementation for the PFNMAP memory with no struct pages is faulty. The VA of the mapping is determined based on the the PFN. It should instead be based on the file mapping offset. At the occurrence of poison, the memory_failure_pfn is triggered on the poisoned PFN. Introduce a callback function that allows mm to translate the PFN to the corresponding file page offset. The kernel module using the registration API must implement the callback function and provide the translation. The translated value is then used to determine the VA information and sending the SIGBUS to the usermode process mapped to the poisoned PFN. The callback is also useful for the driver to be notified of the poisoned PFN, which may then track it. Link: https://lkml.kernel.org/r/20251211070603.338701-2-ankita@nvidia.com Fixes: 2ec41967189c ("mm: handle poisoning of pfn without struct pages") Signed-off-by: Ankit Agrawal Suggested-by: Jason Gunthorpe Cc: Kevin Tian Cc: Matthew R. Ochs Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Neo Jia Cc: Vikram Sethi Cc: Yishai Hadas Cc: Zhi Wang Signed-off-by: Andrew Morton --- include/linux/memory-failure.h | 2 ++ mm/memory-failure.c | 29 ++++++++++++++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h index bc326503d2d2..7b5e11cf905f 100644 --- a/include/linux/memory-failure.h +++ b/include/linux/memory-failure.h @@ -9,6 +9,8 @@ struct pfn_address_space; struct pfn_address_space { struct interval_tree_node node; struct address_space *mapping; + int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma, + unsigned long pfn, pgoff_t *pgoff); }; int register_pfn_address_space(struct pfn_address_space *pfn_space); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fbc5a01260c8..c80c2907da33 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2161,6 +2161,9 @@ int register_pfn_address_space(struct pfn_address_space *pfn_space) { guard(mutex)(&pfn_space_lock); + if (!pfn_space->pfn_to_vma_pgoff) + return -EINVAL; + if (interval_tree_iter_first(&pfn_space_itree, pfn_space->node.start, pfn_space->node.last)) @@ -2183,10 +2186,10 @@ void unregister_pfn_address_space(struct pfn_address_space *pfn_space) } EXPORT_SYMBOL_GPL(unregister_pfn_address_space); -static void add_to_kill_pfn(struct task_struct *tsk, - struct vm_area_struct *vma, - struct list_head *to_kill, - unsigned long pfn) +static void add_to_kill_pgoff(struct task_struct *tsk, + struct vm_area_struct *vma, + struct list_head *to_kill, + pgoff_t pgoff) { struct to_kill *tk; @@ -2197,12 +2200,12 @@ static void add_to_kill_pfn(struct task_struct *tsk, } /* Check for pgoff not backed by struct page */ - tk->addr = vma_address(vma, pfn, 1); + tk->addr = vma_address(vma, pgoff, 1); tk->size_shift = PAGE_SHIFT; if (tk->addr == -EFAULT) pr_info("Unable to find address %lx in %s\n", - pfn, tsk->comm); + pgoff, tsk->comm); get_task_struct(tsk); tk->tsk = tsk; @@ -2212,11 +2215,12 @@ static void add_to_kill_pfn(struct task_struct *tsk, /* * Collect processes when the error hit a PFN not backed by struct page. */ -static void collect_procs_pfn(struct address_space *mapping, +static void collect_procs_pfn(struct pfn_address_space *pfn_space, unsigned long pfn, struct list_head *to_kill) { struct vm_area_struct *vma; struct task_struct *tsk; + struct address_space *mapping = pfn_space->mapping; i_mmap_lock_read(mapping); rcu_read_lock(); @@ -2226,9 +2230,12 @@ static void collect_procs_pfn(struct address_space *mapping, t = task_early_kill(tsk, true); if (!t) continue; - vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) { - if (vma->vm_mm == t->mm) - add_to_kill_pfn(t, vma, to_kill, pfn); + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) { + pgoff_t pgoff; + + if (vma->vm_mm == t->mm && + !pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff)) + add_to_kill_pgoff(t, vma, to_kill, pgoff); } } rcu_read_unlock(); @@ -2264,7 +2271,7 @@ static int memory_failure_pfn(unsigned long pfn, int flags) struct pfn_address_space *pfn_space = container_of(node, struct pfn_address_space, node); - collect_procs_pfn(pfn_space->mapping, pfn, &tokill); + collect_procs_pfn(pfn_space, pfn, &tokill); mf_handled = true; } -- cgit v1.2.3 From f183663901f21fe0fba8bd31ae894bc529709ee0 Mon Sep 17 00:00:00 2001 From: Bijan Tabatabai Date: Tue, 16 Dec 2025 14:07:27 -0600 Subject: mm: consider non-anon swap cache folios in folio_expected_ref_count() Currently, folio_expected_ref_count() only adds references for the swap cache if the folio is anonymous. However, according to the comment above the definition of PG_swapcache in enum pageflags, shmem folios can also have PG_swapcache set. This patch makes sure references for the swap cache are added if folio_test_swapcache(folio) is true. This issue was found when trying to hot-unplug memory in a QEMU/KVM virtual machine. When initiating hot-unplug when most of the guest memory is allocated, hot-unplug hangs partway through removal due to migration failures. The following message would be printed several times, and would be printed again about every five seconds: [ 49.641309] migrating pfn b12f25 failed ret:7 [ 49.641310] page: refcount:2 mapcount:0 mapping:0000000033bd8fe2 index:0x7f404d925 pfn:0xb12f25 [ 49.641311] aops:swap_aops [ 49.641313] flags: 0x300000000030508(uptodate|active|owner_priv_1|reclaim|swapbacked|node=0|zone=3) [ 49.641314] raw: 0300000000030508 ffffed312c4bc908 ffffed312c4bc9c8 0000000000000000 [ 49.641315] raw: 00000007f404d925 00000000000c823b 00000002ffffffff 0000000000000000 [ 49.641315] page dumped because: migration failure When debugging this, I found that these migration failures were due to __migrate_folio() returning -EAGAIN for a small set of folios because the expected reference count it calculates via folio_expected_ref_count() is one less than the actual reference count of the folios. Furthermore, all of the affected folios were not anonymous, but had the PG_swapcache flag set, inspiring this patch. After applying this patch, the memory hot-unplug behaves as expected. I tested this on a machine running Ubuntu 24.04 with kernel version 6.8.0-90-generic and 64GB of memory. The guest VM is managed by libvirt and runs Ubuntu 24.04 with kernel version 6.18 (though the head of the mm-unstable branch as a Dec 16, 2025 was also tested and behaves the same) and 48GB of memory. The libvirt XML definition for the VM can be found at [1]. CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is set in the guest kernel so the hot-pluggable memory is automatically onlined. Below are the steps to reproduce this behavior: 1) Define and start and virtual machine host$ virsh -c qemu:///system define ./test_vm.xml # test_vm.xml from [1] host$ virsh -c qemu:///system start test_vm 2) Setup swap in the guest guest$ sudo fallocate -l 32G /swapfile guest$ sudo chmod 0600 /swapfile guest$ sudo mkswap /swapfile guest$ sudo swapon /swapfile 3) Use alloc_data [2] to allocate most of the remaining guest memory guest$ ./alloc_data 45 4) In a separate guest terminal, monitor the amount of used memory guest$ watch -n1 free -h 5) When alloc_data has finished allocating, initiate the memory hot-unplug using the provided xml file [3] host$ virsh -c qemu:///system detach-device test_vm ./remove.xml --live After initiating the memory hot-unplug, you should see the amount of available memory in the guest decrease, and the amount of used swap data increase. If everything works as expected, when all of the memory is unplugged, there should be around 8.5-9GB of data in swap. If the unplugging is unsuccessful, the amount of used swap data will settle below that. If that happens, you should be able to see log messages in dmesg similar to the one posted above. Link: https://lkml.kernel.org/r/20251216200727.2360228-1-bijan311@gmail.com Link: https://github.com/BijanT/linux_patch_files/blob/main/test_vm.xml [1] Link: https://github.com/BijanT/linux_patch_files/blob/main/alloc_data.c [2] Link: https://github.com/BijanT/linux_patch_files/blob/main/remove.xml [3] Fixes: 86ebd50224c0 ("mm: add folio_expected_ref_count() for reference count calculation") Signed-off-by: Bijan Tabatabai Acked-by: David Hildenbrand (Red Hat) Acked-by: Zi Yan Reviewed-by: Baolin Wang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Mike Rapoport Cc: Shivank Garg Cc: Suren Baghdasaryan Cc: Vlastimil Babka Cc: Kairui Song Cc: Signed-off-by: Andrew Morton --- include/linux/mm.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 15076261d0c2..6f959d8ca4b4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2459,10 +2459,10 @@ static inline int folio_expected_ref_count(const struct folio *folio) if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio))) return 0; - if (folio_test_anon(folio)) { - /* One reference per page from the swapcache. */ - ref_count += folio_test_swapcache(folio) << order; - } else { + /* One reference per page from the swapcache. */ + ref_count += folio_test_swapcache(folio) << order; + + if (!folio_test_anon(folio)) { /* One reference per page from the pagecache. */ ref_count += !!folio->mapping << order; /* One reference from PG_private. */ -- cgit v1.2.3 From dc85a46928c41423ad89869baf05a589e2975575 Mon Sep 17 00:00:00 2001 From: Kevin Tian Date: Thu, 18 Dec 2025 08:16:49 +0000 Subject: vfio/pci: Disable qword access to the PCI ROM bar Commit 2b938e3db335 ("vfio/pci: Enable iowrite64 and ioread64 for vfio pci") enables qword access to the PCI bar resources. However certain devices (e.g. Intel X710) are observed with problem upon qword accesses to the rom bar, e.g. triggering PCI aer errors. This is triggered by Qemu which caches the rom content by simply does a pread() of the remaining size until it gets the full contents. The other bars would only perform operations at the same access width as their guest drivers. Instead of trying to identify all broken devices, universally disable qword access to the rom bar i.e. going back to the old way which worked reliably for years. Reported-by: Farrah Chen Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220740 Fixes: 2b938e3db335 ("vfio/pci: Enable iowrite64 and ioread64 for vfio pci") Cc: stable@vger.kernel.org Signed-off-by: Kevin Tian Tested-by: Farrah Chen Link: https://lore.kernel.org/r/20251218081650.555015-2-kevin.tian@intel.com Signed-off-by: Alex Williamson --- drivers/vfio/pci/nvgrace-gpu/main.c | 4 ++-- drivers/vfio/pci/vfio_pci_rdwr.c | 25 ++++++++++++++++++------- include/linux/vfio_pci_core.h | 10 +++++++++- 3 files changed, 29 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index 84d142a47ec6..b45a24d00387 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -561,7 +561,7 @@ nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev, ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, nvdev->resmem.ioaddr, buf, offset, mem_count, - 0, 0, false); + 0, 0, false, VFIO_PCI_IO_WIDTH_8); } return ret; @@ -693,7 +693,7 @@ nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev, ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, nvdev->resmem.ioaddr, (char __user *)buf, pos, mem_count, - 0, 0, true); + 0, 0, true, VFIO_PCI_IO_WIDTH_8); } return ret; diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 6192788c8ba3..25380b7dfe18 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -135,7 +135,8 @@ VFIO_IORDWR(64) ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite) + size_t x_end, bool iswrite, + enum vfio_pci_io_width max_width) { ssize_t done = 0; int ret; @@ -150,20 +151,19 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, else fillable = 0; - if (fillable >= 8 && !(off % 8)) { + if (fillable >= 8 && !(off % 8) && max_width >= 8) { ret = vfio_pci_iordwr64(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) return ret; - } else - if (fillable >= 4 && !(off % 4)) { + } else if (fillable >= 4 && !(off % 4) && max_width >= 4) { ret = vfio_pci_iordwr32(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) return ret; - } else if (fillable >= 2 && !(off % 2)) { + } else if (fillable >= 2 && !(off % 2) && max_width >= 2) { ret = vfio_pci_iordwr16(vdev, iswrite, test_mem, io, buf, off, &filled); if (ret) @@ -234,6 +234,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, void __iomem *io; struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; + enum vfio_pci_io_width max_width = VFIO_PCI_IO_WIDTH_8; if (pci_resource_start(pdev, bar)) end = pci_resource_len(pdev, bar); @@ -262,6 +263,16 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, if (!io) return -ENOMEM; x_end = end; + + /* + * Certain devices (e.g. Intel X710) don't support qword + * access to the ROM bar. Otherwise PCI AER errors might be + * triggered. + * + * Disable qword access to the ROM bar universally, which + * worked reliably for years before qword access is enabled. + */ + max_width = VFIO_PCI_IO_WIDTH_4; } else { int ret = vfio_pci_core_setup_barmap(vdev, bar); if (ret) { @@ -278,7 +289,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, } done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, - count, x_start, x_end, iswrite); + count, x_start, x_end, iswrite, max_width); if (done >= 0) *ppos += done; @@ -352,7 +363,7 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, * to the memory enable bit in the command register. */ done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count, - 0, 0, iswrite); + 0, 0, iswrite, VFIO_PCI_IO_WIDTH_8); vga_put(vdev->pdev, rsrc); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 706877f998ff..1ac86896875c 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -145,6 +145,13 @@ struct vfio_pci_core_device { struct list_head dmabufs; }; +enum vfio_pci_io_width { + VFIO_PCI_IO_WIDTH_1 = 1, + VFIO_PCI_IO_WIDTH_2 = 2, + VFIO_PCI_IO_WIDTH_4 = 4, + VFIO_PCI_IO_WIDTH_8 = 8, +}; + /* Will be exported for vfio pci drivers usage */ int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, unsigned int type, unsigned int subtype, @@ -188,7 +195,8 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite); + size_t x_end, bool iswrite, + enum vfio_pci_io_width max_width); bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, loff_t reg_start, size_t reg_cnt, -- cgit v1.2.3 From 3dd57ddec9e3a98387196a3f53b8c036977d8c0f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 16 Dec 2025 08:19:39 +0000 Subject: get rid of bogus __user in struct xattr_args::value The first member of struct xattr_args is declared as __aligned_u64 __user value; which makes no sense whatsoever; __user is a qualifier and what that declaration says is "all struct xattr_args instances have .value _stored_ in user address space, no matter where the rest of the structure happens to be". Something like "int __user *p" stands for "value of p is a pointer to an instance of int that happens to live in user address space"; it says nothing about location of p itself, just as const char *p declares a pointer to unmodifiable char rather than an unmodifiable pointer to char. With xattr_args the intent clearly had been "the 64bit value represents a _pointer_ to object in user address space", but __user has nothing to do with that. All it gets us is a couple of bogus warnings in fs/xattr.c where (userland) instance of xattr_args is copied to local variable of that type (in kernel address space), followed by access to its members. Since we've told sparse that args.value must somehow be located in userland memory, we get warned that looking at that 64bit unsigned integer (in a variable already on kernel stack) is not allowed. Note that sparse has no way to express "this integer shall never be cast into a pointer to be dereferenced directly" and I don't see any way to assign a sane semantics to that. In any case, __user is not it. Signed-off-by: Al Viro Link: https://patch.msgid.link/20251216081939.GQ1712166@ZenIV Signed-off-by: Christian Brauner --- include/uapi/linux/xattr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index c7c85bb504ba..2e5aef48fa7e 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h @@ -23,7 +23,7 @@ #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ struct xattr_args { - __aligned_u64 __user value; + __aligned_u64 value; __u32 size; __u32 flags; }; -- cgit v1.2.3 From 5623eb1ed035f01dfa620366a82b667545b10c82 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 31 Dec 2025 08:12:46 -0700 Subject: io_uring/tctx: add separate lock for list of tctx's in ctx ctx->tcxt_list holds the tasks using this ring, and it's currently protected by the normal ctx->uring_lock. However, this can cause a circular locking issue, as reported by syzbot, where cancelations off exec end up needing to remove an entry from this list: ====================================================== WARNING: possible circular locking dependency detected syzkaller #0 Tainted: G L ------------------------------------------------------ syz.0.9999/12287 is trying to acquire lock: ffff88805851c0a8 (&ctx->uring_lock){+.+.}-{4:4}, at: io_uring_del_tctx_node+0xf0/0x2c0 io_uring/tctx.c:179 but task is already holding lock: ffff88802db5a2e0 (&sig->cred_guard_mutex){+.+.}-{4:4}, at: prepare_bprm_creds fs/exec.c:1360 [inline] ffff88802db5a2e0 (&sig->cred_guard_mutex){+.+.}-{4:4}, at: bprm_execve+0xb9/0x1400 fs/exec.c:1733 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&sig->cred_guard_mutex){+.+.}-{4:4}: __mutex_lock_common kernel/locking/mutex.c:614 [inline] __mutex_lock+0x187/0x1350 kernel/locking/mutex.c:776 proc_pid_attr_write+0x547/0x630 fs/proc/base.c:2837 vfs_write+0x27e/0xb30 fs/read_write.c:684 ksys_write+0x145/0x250 fs/read_write.c:738 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xec/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #1 (sb_writers#3){.+.+}-{0:0}: percpu_down_read_internal include/linux/percpu-rwsem.h:53 [inline] percpu_down_read_freezable include/linux/percpu-rwsem.h:83 [inline] __sb_start_write include/linux/fs/super.h:19 [inline] sb_start_write+0x4d/0x1c0 include/linux/fs/super.h:125 mnt_want_write+0x41/0x90 fs/namespace.c:499 open_last_lookups fs/namei.c:4529 [inline] path_openat+0xadd/0x3dd0 fs/namei.c:4784 do_filp_open+0x1fa/0x410 fs/namei.c:4814 io_openat2+0x3e0/0x5c0 io_uring/openclose.c:143 __io_issue_sqe+0x181/0x4b0 io_uring/io_uring.c:1792 io_issue_sqe+0x165/0x1060 io_uring/io_uring.c:1815 io_queue_sqe io_uring/io_uring.c:2042 [inline] io_submit_sqe io_uring/io_uring.c:2320 [inline] io_submit_sqes+0xbf4/0x2140 io_uring/io_uring.c:2434 __do_sys_io_uring_enter io_uring/io_uring.c:3280 [inline] __se_sys_io_uring_enter+0x2e0/0x2b60 io_uring/io_uring.c:3219 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xec/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f -> #0 (&ctx->uring_lock){+.+.}-{4:4}: check_prev_add kernel/locking/lockdep.c:3165 [inline] check_prevs_add kernel/locking/lockdep.c:3284 [inline] validate_chain kernel/locking/lockdep.c:3908 [inline] __lock_acquire+0x15a6/0x2cf0 kernel/locking/lockdep.c:5237 lock_acquire+0x107/0x340 kernel/locking/lockdep.c:5868 __mutex_lock_common kernel/locking/mutex.c:614 [inline] __mutex_lock+0x187/0x1350 kernel/locking/mutex.c:776 io_uring_del_tctx_node+0xf0/0x2c0 io_uring/tctx.c:179 io_uring_clean_tctx+0xd4/0x1a0 io_uring/tctx.c:195 io_uring_cancel_generic+0x6ca/0x7d0 io_uring/cancel.c:646 io_uring_task_cancel include/linux/io_uring.h:24 [inline] begin_new_exec+0x10ed/0x2440 fs/exec.c:1131 load_elf_binary+0x9f8/0x2d70 fs/binfmt_elf.c:1010 search_binary_handler fs/exec.c:1669 [inline] exec_binprm fs/exec.c:1701 [inline] bprm_execve+0x92e/0x1400 fs/exec.c:1753 do_execveat_common+0x510/0x6a0 fs/exec.c:1859 do_execve fs/exec.c:1933 [inline] __do_sys_execve fs/exec.c:2009 [inline] __se_sys_execve fs/exec.c:2004 [inline] __x64_sys_execve+0x94/0xb0 fs/exec.c:2004 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xec/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f other info that might help us debug this: Chain exists of: &ctx->uring_lock --> sb_writers#3 --> &sig->cred_guard_mutex Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sig->cred_guard_mutex); lock(sb_writers#3); lock(&sig->cred_guard_mutex); lock(&ctx->uring_lock); *** DEADLOCK *** 1 lock held by syz.0.9999/12287: #0: ffff88802db5a2e0 (&sig->cred_guard_mutex){+.+.}-{4:4}, at: prepare_bprm_creds fs/exec.c:1360 [inline] #0: ffff88802db5a2e0 (&sig->cred_guard_mutex){+.+.}-{4:4}, at: bprm_execve+0xb9/0x1400 fs/exec.c:1733 stack backtrace: CPU: 0 UID: 0 PID: 12287 Comm: syz.0.9999 Tainted: G L syzkaller #0 PREEMPT(full) Tainted: [L]=SOFTLOCKUP Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/25/2025 Call Trace: dump_stack_lvl+0xe8/0x150 lib/dump_stack.c:120 print_circular_bug+0x2e2/0x300 kernel/locking/lockdep.c:2043 check_noncircular+0x12e/0x150 kernel/locking/lockdep.c:2175 check_prev_add kernel/locking/lockdep.c:3165 [inline] check_prevs_add kernel/locking/lockdep.c:3284 [inline] validate_chain kernel/locking/lockdep.c:3908 [inline] __lock_acquire+0x15a6/0x2cf0 kernel/locking/lockdep.c:5237 lock_acquire+0x107/0x340 kernel/locking/lockdep.c:5868 __mutex_lock_common kernel/locking/mutex.c:614 [inline] __mutex_lock+0x187/0x1350 kernel/locking/mutex.c:776 io_uring_del_tctx_node+0xf0/0x2c0 io_uring/tctx.c:179 io_uring_clean_tctx+0xd4/0x1a0 io_uring/tctx.c:195 io_uring_cancel_generic+0x6ca/0x7d0 io_uring/cancel.c:646 io_uring_task_cancel include/linux/io_uring.h:24 [inline] begin_new_exec+0x10ed/0x2440 fs/exec.c:1131 load_elf_binary+0x9f8/0x2d70 fs/binfmt_elf.c:1010 search_binary_handler fs/exec.c:1669 [inline] exec_binprm fs/exec.c:1701 [inline] bprm_execve+0x92e/0x1400 fs/exec.c:1753 do_execveat_common+0x510/0x6a0 fs/exec.c:1859 do_execve fs/exec.c:1933 [inline] __do_sys_execve fs/exec.c:2009 [inline] __se_sys_execve fs/exec.c:2004 [inline] __x64_sys_execve+0x94/0xb0 fs/exec.c:2004 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0xec/0xf80 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7ff3a8b8f749 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ff3a9a97038 EFLAGS: 00000246 ORIG_RAX: 000000000000003b RAX: ffffffffffffffda RBX: 00007ff3a8de5fa0 RCX: 00007ff3a8b8f749 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000200000000400 RBP: 00007ff3a8c13f91 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ff3a8de6038 R14: 00007ff3a8de5fa0 R15: 00007ff3a8f0fa28 Add a separate lock just for the tctx_list, tctx_lock. This can nest under ->uring_lock, where necessary, and be used separately for list manipulation. For the cancelation off exec side, this removes the need to grab ->uring_lock, hence fixing the circular locking dependency. Reported-by: syzbot+b0e3b77ffaa8a4067ce5@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 8 +++++++- io_uring/cancel.c | 5 +++++ io_uring/io_uring.c | 5 +++++ io_uring/register.c | 2 ++ io_uring/tctx.c | 8 ++++---- 5 files changed, 23 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index e1adb0d20a0a..a3e8ddc9b380 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -424,11 +424,17 @@ struct io_ring_ctx { struct user_struct *user; struct mm_struct *mm_account; + /* + * List of tctx nodes for this ctx, protected by tctx_lock. For + * cancelation purposes, nests under uring_lock. + */ + struct list_head tctx_list; + struct mutex tctx_lock; + /* ctx exit and cancelation */ struct llist_head fallback_llist; struct delayed_work fallback_work; struct work_struct exit_work; - struct list_head tctx_list; struct completion ref_comp; /* io-wq management, e.g. thread count */ diff --git a/io_uring/cancel.c b/io_uring/cancel.c index ca12ac10c0ae..07b8d852218b 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -184,7 +184,9 @@ static int __io_async_cancel(struct io_cancel_data *cd, } while (1); /* slow path, try all io-wq's */ + __set_current_state(TASK_RUNNING); io_ring_submit_lock(ctx, issue_flags); + mutex_lock(&ctx->tctx_lock); ret = -ENOENT; list_for_each_entry(node, &ctx->tctx_list, ctx_node) { ret = io_async_cancel_one(node->task->io_uring, cd); @@ -194,6 +196,7 @@ static int __io_async_cancel(struct io_cancel_data *cd, nr++; } } + mutex_unlock(&ctx->tctx_lock); io_ring_submit_unlock(ctx, issue_flags); return all ? nr : ret; } @@ -484,6 +487,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) bool ret = false; mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->tctx_lock); list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; @@ -496,6 +500,7 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); ret |= (cret != IO_WQ_CANCEL_NOTFOUND); } + mutex_unlock(&ctx->tctx_lock); mutex_unlock(&ctx->uring_lock); return ret; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 709943fedaf4..87a87396e940 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -340,6 +340,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->ltimeout_list); init_llist_head(&ctx->work_llist); INIT_LIST_HEAD(&ctx->tctx_list); + mutex_init(&ctx->tctx_lock); ctx->submit_state.free_list.next = NULL; INIT_HLIST_HEAD(&ctx->waitid_list); xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC); @@ -3045,6 +3046,7 @@ static __cold void io_ring_exit_work(struct work_struct *work) exit.ctx = ctx; mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->tctx_lock); while (!list_empty(&ctx->tctx_list)) { WARN_ON_ONCE(time_after(jiffies, timeout)); @@ -3056,6 +3058,7 @@ static __cold void io_ring_exit_work(struct work_struct *work) if (WARN_ON_ONCE(ret)) continue; + mutex_unlock(&ctx->tctx_lock); mutex_unlock(&ctx->uring_lock); /* * See comment above for @@ -3064,7 +3067,9 @@ static __cold void io_ring_exit_work(struct work_struct *work) */ wait_for_completion_interruptible(&exit.completion); mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->tctx_lock); } + mutex_unlock(&ctx->tctx_lock); mutex_unlock(&ctx->uring_lock); spin_lock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock); diff --git a/io_uring/register.c b/io_uring/register.c index 62d39b3ff317..3d3822ff3fd9 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -320,6 +320,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, return 0; /* now propagate the restriction to all registered users */ + mutex_lock(&ctx->tctx_lock); list_for_each_entry(node, &ctx->tctx_list, ctx_node) { tctx = node->task->io_uring; if (WARN_ON_ONCE(!tctx->io_wq)) @@ -330,6 +331,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, /* ignore errors, it always returns zero anyway */ (void)io_wq_max_workers(tctx->io_wq, new_count); } + mutex_unlock(&ctx->tctx_lock); return 0; err: if (sqd) { diff --git a/io_uring/tctx.c b/io_uring/tctx.c index 5b66755579c0..6d6f44215ec8 100644 --- a/io_uring/tctx.c +++ b/io_uring/tctx.c @@ -136,9 +136,9 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) return ret; } - mutex_lock(&ctx->uring_lock); + mutex_lock(&ctx->tctx_lock); list_add(&node->ctx_node, &ctx->tctx_list); - mutex_unlock(&ctx->uring_lock); + mutex_unlock(&ctx->tctx_lock); } return 0; } @@ -176,9 +176,9 @@ __cold void io_uring_del_tctx_node(unsigned long index) WARN_ON_ONCE(current != node->task); WARN_ON_ONCE(list_empty(&node->ctx_node)); - mutex_lock(&node->ctx->uring_lock); + mutex_lock(&node->ctx->tctx_lock); list_del(&node->ctx_node); - mutex_unlock(&node->ctx->uring_lock); + mutex_unlock(&node->ctx->tctx_lock); if (tctx->last == node->ctx) tctx->last = NULL; -- cgit v1.2.3 From c6c209ceb87f64a6ceebe61761951dcbbf4a0baa Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 9 Dec 2025 19:28:49 -0500 Subject: NFSD: Remove NFSERR_EAGAIN I haven't found an NFSERR_EAGAIN in RFCs 1094, 1813, 7530, or 8881. None of these RFCs have an NFS status code that match the numeric value "11". Based on the meaning of the EAGAIN errno, I presume the use of this status in NFSD means NFS4ERR_DELAY. So replace the one usage of nfserr_eagain, and remove it from NFSD's NFS status conversion tables. As far as I can tell, NFSERR_EAGAIN has existed since the pre-git era, but was not actually used by any code until commit f4e44b393389 ("NFSD: delay unmount source's export after inter-server copy completed."), at which time it become possible for NFSD to return a status code of 11 (which is not valid NFS protocol). Fixes: f4e44b393389 ("NFSD: delay unmount source's export after inter-server copy completed.") Cc: stable@vger.kernel.org Reviewed-by: NeilBrown Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfs_common/common.c | 1 - fs/nfsd/nfs4proc.c | 2 +- fs/nfsd/nfsd.h | 1 - include/trace/misc/nfs.h | 2 -- include/uapi/linux/nfs.h | 1 - 5 files changed, 1 insertion(+), 6 deletions(-) (limited to 'include') diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c index af09aed09fd2..0778743ae2c2 100644 --- a/fs/nfs_common/common.c +++ b/fs/nfs_common/common.c @@ -17,7 +17,6 @@ static const struct { { NFSERR_NOENT, -ENOENT }, { NFSERR_IO, -EIO }, { NFSERR_NXIO, -ENXIO }, -/* { NFSERR_EAGAIN, -EAGAIN }, */ { NFSERR_ACCES, -EACCES }, { NFSERR_EXIST, -EEXIST }, { NFSERR_XDEV, -EXDEV }, diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 7f7e6bb23a90..42a6b914c0fe 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1506,7 +1506,7 @@ try_again: (schedule_timeout(20*HZ) == 0)) { finish_wait(&nn->nfsd_ssc_waitq, &wait); kfree(work); - return nfserr_eagain; + return nfserr_jukebox; } finish_wait(&nn->nfsd_ssc_waitq, &wait); goto try_again; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 50be785f1d2c..b0283213a8f5 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -233,7 +233,6 @@ void nfsd_lockd_shutdown(void); #define nfserr_noent cpu_to_be32(NFSERR_NOENT) #define nfserr_io cpu_to_be32(NFSERR_IO) #define nfserr_nxio cpu_to_be32(NFSERR_NXIO) -#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) #define nfserr_acces cpu_to_be32(NFSERR_ACCES) #define nfserr_exist cpu_to_be32(NFSERR_EXIST) #define nfserr_xdev cpu_to_be32(NFSERR_XDEV) diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h index c82233e950ac..a394b4d38e18 100644 --- a/include/trace/misc/nfs.h +++ b/include/trace/misc/nfs.h @@ -16,7 +16,6 @@ TRACE_DEFINE_ENUM(NFSERR_PERM); TRACE_DEFINE_ENUM(NFSERR_NOENT); TRACE_DEFINE_ENUM(NFSERR_IO); TRACE_DEFINE_ENUM(NFSERR_NXIO); -TRACE_DEFINE_ENUM(NFSERR_EAGAIN); TRACE_DEFINE_ENUM(NFSERR_ACCES); TRACE_DEFINE_ENUM(NFSERR_EXIST); TRACE_DEFINE_ENUM(NFSERR_XDEV); @@ -52,7 +51,6 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX); { NFSERR_NXIO, "NXIO" }, \ { ECHILD, "CHILD" }, \ { ETIMEDOUT, "TIMEDOUT" }, \ - { NFSERR_EAGAIN, "AGAIN" }, \ { NFSERR_ACCES, "ACCES" }, \ { NFSERR_EXIST, "EXIST" }, \ { NFSERR_XDEV, "XDEV" }, \ diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index f356f2ba3814..71c7196d3281 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -49,7 +49,6 @@ NFSERR_NOENT = 2, /* v2 v3 v4 */ NFSERR_IO = 5, /* v2 v3 v4 */ NFSERR_NXIO = 6, /* v2 v3 v4 */ - NFSERR_EAGAIN = 11, /* v2 v3 */ NFSERR_ACCES = 13, /* v2 v3 v4 */ NFSERR_EXIST = 17, /* v2 v3 v4 */ NFSERR_XDEV = 18, /* v3 v4 */ -- cgit v1.2.3 From c1ef9a6cabb34dbc09e31417b0c0a672fe0de13a Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Fri, 5 Dec 2025 11:51:48 +0200 Subject: Revert "drm/atomic-helper: Re-order bridge chain pre-enable and post-disable" This reverts commit c9b1150a68d9362a0827609fc0dc1664c0d8bfe1. Changing the enable/disable sequence has caused regressions on multiple platforms: R-Car, MCDE, Rockchip. A series (see link below) was sent to fix these, but it was decided that it's better to revert the original patch and change the enable/disable sequence only in the tidss driver. Reverting this commit breaks tidss's DSI and OLDI outputs, which will be fixed in the following commits. Signed-off-by: Tomi Valkeinen Link: https://lore.kernel.org/all/20251202-mcde-drm-regression-thirdfix-v6-0-f1bffd4ec0fa%40kernel.org/ Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable") Cc: stable@vger.kernel.org # v6.17+ Reviewed-by: Aradhya Bhatia Reviewed-by: Maxime Ripard Reviewed-by: Linus Walleij Tested-by: Linus Walleij Signed-off-by: Linus Walleij Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-1-fda68fa1b3de@ideasonboard.com --- drivers/gpu/drm/drm_atomic_helper.c | 8 +- include/drm/drm_bridge.h | 249 ++++++++++-------------------------- 2 files changed, 70 insertions(+), 187 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 10adac9397cf..ef97f37560b2 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1341,9 +1341,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state) { encoder_bridge_disable(dev, state); - crtc_disable(dev, state); - encoder_bridge_post_disable(dev, state); + + crtc_disable(dev, state); } /** @@ -1682,10 +1682,10 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *state) { - encoder_bridge_pre_enable(dev, state); - crtc_enable(dev, state); + encoder_bridge_pre_enable(dev, state); + encoder_bridge_enable(dev, state); drm_atomic_helper_commit_writebacks(dev, state); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 0ff7ab4aa868..dbafe136833f 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -176,33 +176,17 @@ struct drm_bridge_funcs { /** * @disable: * - * The @disable callback should disable the bridge. + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @disable vfunc. If the preceding element is a &drm_encoder + * it's called right before the &drm_encoder_helper_funcs.disable, + * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms + * hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. * - * - * If the preceding element is a &drm_bridge, then this is called before - * that bridge is disabled via one of: - * - * - &drm_bridge_funcs.disable - * - &drm_bridge_funcs.atomic_disable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called before the encoder is disabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_disable - * - &drm_encoder_helper_funcs.prepare - * - &drm_encoder_helper_funcs.disable - * - &drm_encoder_helper_funcs.dpms - * - * and the CRTC is disabled via one of: - * - * - &drm_crtc_helper_funcs.prepare - * - &drm_crtc_helper_funcs.atomic_disable - * - &drm_crtc_helper_funcs.disable - * - &drm_crtc_helper_funcs.dpms. - * * The @disable callback is optional. * * NOTE: @@ -215,34 +199,17 @@ struct drm_bridge_funcs { /** * @post_disable: * - * The bridge must assume that the display pipe (i.e. clocks and timing - * signals) feeding this bridge is no longer running when the - * @post_disable is called. + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @post_disable function. If the preceding element is a &drm_encoder + * it's called right after the encoder's + * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare + * or &drm_encoder_helper_funcs.dpms hook. * - * This callback should perform all the actions required by the hardware - * after it has stopped receiving signals from the preceding element. - * - * If the preceding element is a &drm_bridge, then this is called after - * that bridge is post-disabled (unless marked otherwise by the - * @pre_enable_prev_first flag) via one of: - * - * - &drm_bridge_funcs.post_disable - * - &drm_bridge_funcs.atomic_post_disable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called after the encoder is disabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_disable - * - &drm_encoder_helper_funcs.prepare - * - &drm_encoder_helper_funcs.disable - * - &drm_encoder_helper_funcs.dpms - * - * and the CRTC is disabled via one of: - * - * - &drm_crtc_helper_funcs.prepare - * - &drm_crtc_helper_funcs.atomic_disable - * - &drm_crtc_helper_funcs.disable - * - &drm_crtc_helper_funcs.dpms + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. * * The @post_disable callback is optional. * @@ -285,30 +252,18 @@ struct drm_bridge_funcs { /** * @pre_enable: * - * The display pipe (i.e. clocks and timing signals) feeding this bridge - * will not yet be running when the @pre_enable is called. - * - * This callback should perform all the necessary actions to prepare the - * bridge to accept signals from the preceding element. - * - * If the preceding element is a &drm_bridge, then this is called before - * that bridge is pre-enabled (unless marked otherwise by - * @pre_enable_prev_first flag) via one of: - * - * - &drm_bridge_funcs.pre_enable - * - &drm_bridge_funcs.atomic_pre_enable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called before the CRTC is enabled via one of: - * - * - &drm_crtc_helper_funcs.atomic_enable - * - &drm_crtc_helper_funcs.commit - * - * and the encoder is enabled via one of: + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @pre_enable function. If the preceding element is a + * &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * - * - &drm_encoder_helper_funcs.atomic_enable - * - &drm_encoder_helper_funcs.enable - * - &drm_encoder_helper_funcs.commit + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. * * The @pre_enable callback is optional. * @@ -322,31 +277,19 @@ struct drm_bridge_funcs { /** * @enable: * - * The @enable callback should enable the bridge. + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @enable function. If the preceding element is a + * &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or + * &drm_encoder_helper_funcs.dpms hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This * callback must enable the display link feeding the next bridge in the * chain if there is one. * - * If the preceding element is a &drm_bridge, then this is called after - * that bridge is enabled via one of: - * - * - &drm_bridge_funcs.enable - * - &drm_bridge_funcs.atomic_enable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called after the CRTC is enabled via one of: - * - * - &drm_crtc_helper_funcs.atomic_enable - * - &drm_crtc_helper_funcs.commit - * - * and the encoder is enabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_enable - * - &drm_encoder_helper_funcs.enable - * - drm_encoder_helper_funcs.commit - * * The @enable callback is optional. * * NOTE: @@ -359,30 +302,17 @@ struct drm_bridge_funcs { /** * @atomic_pre_enable: * - * The display pipe (i.e. clocks and timing signals) feeding this bridge - * will not yet be running when the @atomic_pre_enable is called. - * - * This callback should perform all the necessary actions to prepare the - * bridge to accept signals from the preceding element. - * - * If the preceding element is a &drm_bridge, then this is called before - * that bridge is pre-enabled (unless marked otherwise by - * @pre_enable_prev_first flag) via one of: - * - * - &drm_bridge_funcs.pre_enable - * - &drm_bridge_funcs.atomic_pre_enable + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_pre_enable or @pre_enable function. If the preceding + * element is a &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. * - * If the preceding element of the bridge is a display controller, then - * this callback is called before the CRTC is enabled via one of: - * - * - &drm_crtc_helper_funcs.atomic_enable - * - &drm_crtc_helper_funcs.commit - * - * and the encoder is enabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_enable - * - &drm_encoder_helper_funcs.enable - * - &drm_encoder_helper_funcs.commit + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. * * The @atomic_pre_enable callback is optional. */ @@ -392,31 +322,18 @@ struct drm_bridge_funcs { /** * @atomic_enable: * - * The @atomic_enable callback should enable the bridge. + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @atomic_enable or @enable function. If the preceding element + * is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is running when this callback is called. This * callback must enable the display link feeding the next bridge in the * chain if there is one. * - * If the preceding element is a &drm_bridge, then this is called after - * that bridge is enabled via one of: - * - * - &drm_bridge_funcs.enable - * - &drm_bridge_funcs.atomic_enable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called after the CRTC is enabled via one of: - * - * - &drm_crtc_helper_funcs.atomic_enable - * - &drm_crtc_helper_funcs.commit - * - * and the encoder is enabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_enable - * - &drm_encoder_helper_funcs.enable - * - drm_encoder_helper_funcs.commit - * * The @atomic_enable callback is optional. */ void (*atomic_enable)(struct drm_bridge *bridge, @@ -424,32 +341,16 @@ struct drm_bridge_funcs { /** * @atomic_disable: * - * The @atomic_disable callback should disable the bridge. + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_disable or @disable vfunc. If the preceding element + * is a &drm_encoder it's called right before the + * &drm_encoder_helper_funcs.atomic_disable hook. * * The bridge can assume that the display pipe (i.e. clocks and timing * signals) feeding it is still running when this callback is called. * - * If the preceding element is a &drm_bridge, then this is called before - * that bridge is disabled via one of: - * - * - &drm_bridge_funcs.disable - * - &drm_bridge_funcs.atomic_disable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called before the encoder is disabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_disable - * - &drm_encoder_helper_funcs.prepare - * - &drm_encoder_helper_funcs.disable - * - &drm_encoder_helper_funcs.dpms - * - * and the CRTC is disabled via one of: - * - * - &drm_crtc_helper_funcs.prepare - * - &drm_crtc_helper_funcs.atomic_disable - * - &drm_crtc_helper_funcs.disable - * - &drm_crtc_helper_funcs.dpms. - * * The @atomic_disable callback is optional. */ void (*atomic_disable)(struct drm_bridge *bridge, @@ -458,34 +359,16 @@ struct drm_bridge_funcs { /** * @atomic_post_disable: * - * The bridge must assume that the display pipe (i.e. clocks and timing - * signals) feeding this bridge is no longer running when the - * @atomic_post_disable is called. - * - * This callback should perform all the actions required by the hardware - * after it has stopped receiving signals from the preceding element. + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @atomic_post_disable or @post_disable function. If the preceding + * element is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_disable hook. * - * If the preceding element is a &drm_bridge, then this is called after - * that bridge is post-disabled (unless marked otherwise by the - * @pre_enable_prev_first flag) via one of: - * - * - &drm_bridge_funcs.post_disable - * - &drm_bridge_funcs.atomic_post_disable - * - * If the preceding element of the bridge is a display controller, then - * this callback is called after the encoder is disabled via one of: - * - * - &drm_encoder_helper_funcs.atomic_disable - * - &drm_encoder_helper_funcs.prepare - * - &drm_encoder_helper_funcs.disable - * - &drm_encoder_helper_funcs.dpms - * - * and the CRTC is disabled via one of: - * - * - &drm_crtc_helper_funcs.prepare - * - &drm_crtc_helper_funcs.atomic_disable - * - &drm_crtc_helper_funcs.disable - * - &drm_crtc_helper_funcs.dpms + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. * * The @atomic_post_disable callback is optional. */ -- cgit v1.2.3 From d1c7dc57ff2400b141e6582a8d2dc5170108cf81 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 5 Dec 2025 11:51:50 +0200 Subject: drm/atomic-helper: Export and namespace some functions Export and namespace those not prefixed with drm_* so it becomes possible to write custom commit tail functions in individual drivers using the helper infrastructure. Tested-by: Marek Vasut Reviewed-by: Maxime Ripard Signed-off-by: Tomi Valkeinen Cc: stable@vger.kernel.org # v6.17+ Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable") Reviewed-by: Aradhya Bhatia Reviewed-by: Linus Walleij Tested-by: Linus Walleij Signed-off-by: Linus Walleij Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-3-fda68fa1b3de@ideasonboard.com --- drivers/gpu/drm/drm_atomic_helper.c | 122 +++++++++++++++++++++++++++++------- include/drm/drm_atomic_helper.h | 22 +++++++ 2 files changed, 121 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ef97f37560b2..5beea645035f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1162,8 +1162,18 @@ crtc_needs_disable(struct drm_crtc_state *old_state, new_state->self_refresh_active; } -static void -encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder + * @dev: DRM device + * @state: the driver state object + * + * Loops over all connectors in the current state and if the CRTC needs + * it, disables the bridge chain all the way, then disables the encoder + * afterwards. + */ +void +drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev, + struct drm_atomic_state *state) { struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; @@ -1229,9 +1239,18 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state) } } } +EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable); -static void -crtc_disable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_crtc_disable - disable CRTSs + * @dev: DRM device + * @state: the driver state object + * + * Loops over all CRTCs in the current state and if the CRTC needs + * it, disables it. + */ +void +drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; @@ -1282,9 +1301,18 @@ crtc_disable(struct drm_device *dev, struct drm_atomic_state *state) drm_crtc_vblank_put(crtc); } } +EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable); -static void -encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges + * @dev: DRM device + * @state: the driver state object + * + * Loops over all connectors in the current state and if the CRTC needs + * it, post-disables all encoder bridges. + */ +void +drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; @@ -1335,15 +1363,16 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta drm_bridge_put(bridge); } } +EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable); static void disable_outputs(struct drm_device *dev, struct drm_atomic_state *state) { - encoder_bridge_disable(dev, state); + drm_atomic_helper_commit_encoder_bridge_disable(dev, state); - encoder_bridge_post_disable(dev, state); + drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state); - crtc_disable(dev, state); + drm_atomic_helper_commit_crtc_disable(dev, state); } /** @@ -1446,8 +1475,17 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat } EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants); -static void -crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_crtc_set_mode - set the new mode + * @dev: DRM device + * @state: the driver state object + * + * Loops over all connectors in the current state and if the mode has + * changed, change the mode of the CRTC, then call down the bridge + * chain and change the mode in all bridges as well. + */ +void +drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; @@ -1508,6 +1546,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state) drm_bridge_put(bridge); } } +EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode); /** * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs @@ -1531,12 +1570,21 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_atomic_helper_calc_timestamping_constants(state); - crtc_set_mode(dev, state); + drm_atomic_helper_commit_crtc_set_mode(dev, state); } EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); -static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, - struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_writebacks - issue writebacks + * @dev: DRM device + * @state: atomic state object being committed + * + * This loops over the connectors, checks if the new state requires + * a writeback job to be issued and in that case issues an atomic + * commit on each connector. + */ +void drm_atomic_helper_commit_writebacks(struct drm_device *dev, + struct drm_atomic_state *state) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; @@ -1555,9 +1603,18 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, } } } +EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks); -static void -encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges + * @dev: DRM device + * @state: atomic state object being committed + * + * This loops over the connectors and if the CRTC needs it, pre-enables + * the entire bridge chain. + */ +void +drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; @@ -1588,9 +1645,18 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state drm_bridge_put(bridge); } } +EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable); -static void -crtc_enable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_crtc_enable - enables the CRTCs + * @dev: DRM device + * @state: atomic state object being committed + * + * This loops over CRTCs in the new state, and of the CRTC needs + * it, enables it. + */ +void +drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; @@ -1619,9 +1685,18 @@ crtc_enable(struct drm_device *dev, struct drm_atomic_state *state) } } } +EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable); -static void -encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) +/** + * drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges + * @dev: DRM device + * @state: atomic state object being committed + * + * This loops over all connectors in the new state, and of the CRTC needs + * it, enables the entire bridge chain. + */ +void +drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; @@ -1664,6 +1739,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) drm_bridge_put(bridge); } } +EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable); /** * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs @@ -1682,11 +1758,11 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state) void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *state) { - crtc_enable(dev, state); + drm_atomic_helper_commit_crtc_enable(dev, state); - encoder_bridge_pre_enable(dev, state); + drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state); - encoder_bridge_enable(dev, state); + drm_atomic_helper_commit_encoder_bridge_enable(dev, state); drm_atomic_helper_commit_writebacks(dev, state); } diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 53382fe93537..e154ee4f0696 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -60,6 +60,12 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state); +void drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, + struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); @@ -89,8 +95,24 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); +void drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, + struct drm_atomic_state *state); + void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *state); + +void drm_atomic_helper_commit_writebacks(struct drm_device *dev, + struct drm_atomic_state *state); + +void drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, + struct drm_atomic_state *state); + +void drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, + struct drm_atomic_state *state); + +void drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, + struct drm_atomic_state *state); + void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state); -- cgit v1.2.3 From 02d1e1a3f9239cdb3ecf2c6d365fb959d1bf39df Mon Sep 17 00:00:00 2001 From: Di Zhu Date: Wed, 24 Dec 2025 09:22:24 +0800 Subject: netdev: preserve NETIF_F_ALL_FOR_ALL across TSO updates Directly increment the TSO features incurs a side effect: it will also directly clear the flags in NETIF_F_ALL_FOR_ALL on the master device, which can cause issues such as the inability to enable the nocache copy feature on the bonding driver. The fix is to include NETIF_F_ALL_FOR_ALL in the update mask, thereby preventing it from being cleared. Fixes: b0ce3508b25e ("bonding: allow TSO being set on bonding master") Signed-off-by: Di Zhu Link: https://patch.msgid.link/20251224012224.56185-1-zhud@hygon.cn Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5870a9e514a5..d99b0fbc1942 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -5323,7 +5323,8 @@ netdev_features_t netdev_increment_features(netdev_features_t all, static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, netdev_features_t mask) { - return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); + return netdev_increment_features(features, NETIF_F_ALL_TSO | + NETIF_F_ALL_FOR_ALL, mask); } int __netdev_update_features(struct net_device *dev); -- cgit v1.2.3 From 1ca8677d9f3491e51395b0e6b9a2b7a75089dc6f Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 5 Jan 2026 11:17:05 +0100 Subject: ACPI: PCI: IRQ: Fix INTx GSIs signedness In ACPI Global System Interrupts (GSIs) are described using a 32-bit value. ACPI/PCI legacy interrupts (INTx) parsing code treats GSIs as 'int', which poses issues if the GSI interrupt value is a 32-bit value with the MSB set (as required in some interrupt configurations - eg ARM64 GICv5 systems) because acpi_pci_link_allocate_irq() treats a negative gsi return value as a failed GSI allocation (and acpi_irq_get_penalty() would trigger an out-of-bounds array dereference if the 'irq' param is a negative value). Fix ACPI/PCI legacy INTx parsing by converting variables representing GSIs from 'int' to 'u32' bringing the code in line with the ACPI specification and fixing the current parsing issue. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Bjorn Helgaas Link: https://patch.msgid.link/20260105101705.36703-1-lpieralisi@kernel.org Signed-off-by: Rafael J. Wysocki --- drivers/acpi/pci_irq.c | 19 +++++++++++-------- drivers/acpi/pci_link.c | 39 +++++++++++++++++++++++++-------------- drivers/xen/acpi.c | 13 +++++++------ include/acpi/acpi_drivers.h | 2 +- 4 files changed, 44 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index ad81aa03fe2f..c416942ff3e2 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -188,7 +188,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, * the IRQ value, which is hardwired to specific interrupt inputs on * the interrupt controller. */ - pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n", + pr_debug("%04x:%02x:%02x[%c] -> %s[%u]\n", entry->id.segment, entry->id.bus, entry->id.device, pin_name(entry->pin), prt->source, entry->index); @@ -384,7 +384,7 @@ static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin) int acpi_pci_irq_enable(struct pci_dev *dev) { struct acpi_prt_entry *entry; - int gsi; + u32 gsi; u8 pin; int triggering = ACPI_LEVEL_SENSITIVE; /* @@ -422,18 +422,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } + rc = -ENODEV; + if (entry) { if (entry->link) - gsi = acpi_pci_link_allocate_irq(entry->link, + rc = acpi_pci_link_allocate_irq(entry->link, entry->index, &triggering, &polarity, - &link); - else + &link, &gsi); + else { gsi = entry->index; - } else - gsi = -1; + rc = 0; + } + } - if (gsi < 0) { + if (rc < 0) { /* * No IRQ known to the ACPI subsystem - maybe the BIOS / * driver reported one, then use it. Exit in any case. diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index bed7dc85612e..b91b039a3d20 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -448,7 +448,7 @@ static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = { /* >IRQ15 */ }; -static int acpi_irq_pci_sharing_penalty(int irq) +static int acpi_irq_pci_sharing_penalty(u32 irq) { struct acpi_pci_link *link; int penalty = 0; @@ -474,7 +474,7 @@ static int acpi_irq_pci_sharing_penalty(int irq) return penalty; } -static int acpi_irq_get_penalty(int irq) +static int acpi_irq_get_penalty(u32 irq) { int penalty = 0; @@ -528,7 +528,7 @@ static int acpi_irq_balance = -1; /* 0: static, 1: balance */ static int acpi_pci_link_allocate(struct acpi_pci_link *link) { acpi_handle handle = link->device->handle; - int irq; + u32 irq; int i; if (link->irq.initialized) { @@ -598,44 +598,53 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) return 0; } -/* - * acpi_pci_link_allocate_irq - * success: return IRQ >= 0 - * failure: return -1 +/** + * acpi_pci_link_allocate_irq(): Retrieve a link device GSI + * + * @handle: Handle for the link device + * @index: GSI index + * @triggering: pointer to store the GSI trigger + * @polarity: pointer to store GSI polarity + * @name: pointer to store link device name + * @gsi: pointer to store GSI number + * + * Returns: + * 0 on success with @triggering, @polarity, @name, @gsi initialized. + * -ENODEV on failure */ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name) + int *polarity, char **name, u32 *gsi) { struct acpi_device *device = acpi_fetch_acpi_dev(handle); struct acpi_pci_link *link; if (!device) { acpi_handle_err(handle, "Invalid link device\n"); - return -1; + return -ENODEV; } link = acpi_driver_data(device); if (!link) { acpi_handle_err(handle, "Invalid link context\n"); - return -1; + return -ENODEV; } /* TBD: Support multiple index (IRQ) entries per Link Device */ if (index) { acpi_handle_err(handle, "Invalid index %d\n", index); - return -1; + return -ENODEV; } mutex_lock(&acpi_link_lock); if (acpi_pci_link_allocate(link)) { mutex_unlock(&acpi_link_lock); - return -1; + return -ENODEV; } if (!link->irq.active) { mutex_unlock(&acpi_link_lock); acpi_handle_err(handle, "Link active IRQ is 0!\n"); - return -1; + return -ENODEV; } link->refcnt++; mutex_unlock(&acpi_link_lock); @@ -647,7 +656,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, if (name) *name = acpi_device_bid(link->device); acpi_handle_debug(handle, "Link is referenced\n"); - return link->irq.active; + *gsi = link->irq.active; + + return 0; } /* diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c index d2ee605c5ca1..eab28cfe9939 100644 --- a/drivers/xen/acpi.c +++ b/drivers/xen/acpi.c @@ -89,11 +89,11 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev, int *trigger_out, int *polarity_out) { - int gsi; + u32 gsi; u8 pin; struct acpi_prt_entry *entry; int trigger = ACPI_LEVEL_SENSITIVE; - int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? + int ret, polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ? ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW; if (!dev || !gsi_out || !trigger_out || !polarity_out) @@ -105,17 +105,18 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev, entry = acpi_pci_irq_lookup(dev, pin); if (entry) { + ret = 0; if (entry->link) - gsi = acpi_pci_link_allocate_irq(entry->link, + ret = acpi_pci_link_allocate_irq(entry->link, entry->index, &trigger, &polarity, - NULL); + NULL, &gsi); else gsi = entry->index; } else - gsi = -1; + ret = -ENODEV; - if (gsi < 0) + if (ret < 0) return -EINVAL; *gsi_out = gsi; diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index b14d165632e7..402b97d12138 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -51,7 +51,7 @@ int acpi_irq_penalty_init(void); int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, - int *polarity, char **name); + int *polarity, char **name, u32 *gsi); int acpi_pci_link_free_irq(acpi_handle handle); /* ACPI PCI Device Binding */ -- cgit v1.2.3 From a7fc8c641cab855824c45e5e8877e40fd528b5df Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 2 Jan 2026 12:29:38 +0100 Subject: net: airoha: Fix npu rx DMA definitions Fix typos in npu rx DMA descriptor definitions. Fixes: b3ef7bdec66fb ("net: airoha: Add airoha_offload.h header") Signed-off-by: Lorenzo Bianconi Link: https://patch.msgid.link/20260102-airoha-npu-dma-rx-def-fixes-v1-1-205fc6bf7d94@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/soc/airoha/airoha_offload.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h index 4d23cbb7d407..ab64ecdf39a0 100644 --- a/include/linux/soc/airoha/airoha_offload.h +++ b/include/linux/soc/airoha/airoha_offload.h @@ -71,12 +71,12 @@ static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev, #define NPU_RX1_DESC_NUM 512 /* CTRL */ -#define NPU_RX_DMA_DESC_LAST_MASK BIT(29) -#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15) -#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1) +#define NPU_RX_DMA_DESC_LAST_MASK BIT(27) +#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(26, 14) +#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(13, 1) #define NPU_RX_DMA_DESC_DONE_MASK BIT(0) /* INFO */ -#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28) +#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 29) #define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26) #define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21) #define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16) -- cgit v1.2.3 From 5232196ff49be08350b27f1ba8e1fad87afc9cdf Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Jan 2026 14:31:48 -0500 Subject: ftrace: Make ftrace_graph_ent depth field signed The code has integrity checks to make sure that depth never goes below zero. But the depth field has recently been converted to unsigned long from "int" (for alignment reasons). As unsigned long can never be less than zero, the integrity checks no longer work. Convert depth to long from unsigned long to allow the integrity checks to work again. Cc: stable@vger.kernel.org Cc: Mathieu Desnoyers Cc: pengdonglin Link: https://patch.msgid.link/20260102143148.251c2e16@gandalf.local.home Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/aS6kGi0maWBl-MjZ@stanley.mountain/ Fixes: f83ac7544fbf7 ("function_graph: Enable funcgraph-args and funcgraph-retaddr to work simultaneously") Signed-off-by: Steven Rostedt (Google) Acked-by: Masami Hiramatsu (Google) --- include/linux/ftrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 770f0dc993cc..a3a8989e3268 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1167,7 +1167,7 @@ static inline void ftrace_init(void) { } */ struct ftrace_graph_ent { unsigned long func; /* Current function */ - unsigned long depth; + long depth; /* signed to check for less than zero */ } __packed; /* -- cgit v1.2.3 From 5f1ef0dfcb5b7f4a91a9b0e0ba533efd9f7e2cdb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 5 Jan 2026 20:31:41 -0500 Subject: tracing: Add recursion protection in kernel stack trace recording A bug was reported about an infinite recursion caused by tracing the rcu events with the kernel stack trace trigger enabled. The stack trace code called back into RCU which then called the stack trace again. Expand the ftrace recursion protection to add a set of bits to protect events from recursion. Each bit represents the context that the event is in (normal, softirq, interrupt and NMI). Have the stack trace code use the interrupt context to protect against recursion. Note, the bug showed an issue in both the RCU code as well as the tracing stacktrace code. This only handles the tracing stack trace side of the bug. The RCU fix will be handled separately. Link: https://lore.kernel.org/all/20260102122807.7025fc87@gandalf.local.home/ Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Joel Fernandes Cc: "Paul E. McKenney" Cc: Boqun Feng Link: https://patch.msgid.link/20260105203141.515cd49f@gandalf.local.home Reported-by: Yao Kai Tested-by: Yao Kai Fixes: 5f5fa7ea89dc ("rcu: Don't use negative nesting depth in __rcu_read_unlock()") Signed-off-by: Steven Rostedt (Google) --- include/linux/trace_recursion.h | 9 +++++++++ kernel/trace/trace.c | 6 ++++++ 2 files changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index ae04054a1be3..e6ca052b2a85 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -34,6 +34,13 @@ enum { TRACE_INTERNAL_SIRQ_BIT, TRACE_INTERNAL_TRANSITION_BIT, + /* Internal event use recursion bits */ + TRACE_INTERNAL_EVENT_BIT, + TRACE_INTERNAL_EVENT_NMI_BIT, + TRACE_INTERNAL_EVENT_IRQ_BIT, + TRACE_INTERNAL_EVENT_SIRQ_BIT, + TRACE_INTERNAL_EVENT_TRANSITION_BIT, + TRACE_BRANCH_BIT, /* * Abuse of the trace_recursion. @@ -58,6 +65,8 @@ enum { #define TRACE_LIST_START TRACE_INTERNAL_BIT +#define TRACE_EVENT_START TRACE_INTERNAL_EVENT_BIT + #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) /* diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6f2148df14d9..aef9058537d5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3012,6 +3012,11 @@ static void __ftrace_trace_stack(struct trace_array *tr, struct ftrace_stack *fstack; struct stack_entry *entry; int stackidx; + int bit; + + bit = trace_test_and_set_recursion(_THIS_IP_, _RET_IP_, TRACE_EVENT_START); + if (bit < 0) + return; /* * Add one, for this function and the call to save_stack_trace() @@ -3080,6 +3085,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); + trace_clear_recursion(bit); } static inline void ftrace_trace_stack(struct trace_array *tr, -- cgit v1.2.3 From 2e4b28c48f88ce9e263957b1d944cf5349952f88 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 11 Jan 2026 16:53:48 +0100 Subject: treewide: Update email address In a vain attempt to consolidate the email zoo switch everything to the kernel.org account. Signed-off-by: Thomas Gleixner Signed-off-by: Linus Torvalds --- .mailmap | 1 + CREDITS | 2 +- .../ABI/stable/sysfs-kernel-time-aux-clocks | 2 +- Documentation/arch/x86/topology.rst | 2 +- Documentation/core-api/cpu_hotplug.rst | 2 +- Documentation/core-api/genericirq.rst | 2 +- Documentation/core-api/librs.rst | 2 +- .../devicetree/bindings/timer/mrvl,mmp-timer.yaml | 2 +- Documentation/driver-api/mtdnand.rst | 4 +-- .../translations/zh_CN/core-api/cpu_hotplug.rst | 2 +- .../translations/zh_CN/core-api/genericirq.rst | 2 +- MAINTAINERS | 36 +++++++++++----------- arch/sh/kernel/perf_event.c | 2 +- arch/sparc/kernel/perf_event.c | 2 +- arch/x86/events/core.c | 2 +- arch/x86/events/perf_event.h | 2 +- arch/x86/kernel/x86_init.c | 2 +- arch/x86/mm/pti.c | 2 +- drivers/mtd/nand/ecc-sw-hamming.c | 2 +- drivers/mtd/nand/raw/diskonchip.c | 2 +- drivers/mtd/nand/raw/nand_base.c | 4 +-- drivers/mtd/nand/raw/nand_bbt.c | 2 +- drivers/mtd/nand/raw/nand_ids.c | 2 +- drivers/mtd/nand/raw/nand_jedec.c | 2 +- drivers/mtd/nand/raw/nand_legacy.c | 2 +- drivers/mtd/nand/raw/nand_onfi.c | 2 +- drivers/mtd/nand/raw/ndfc.c | 2 +- drivers/uio/uio.c | 2 +- fs/jffs2/wbuf.c | 4 +-- include/linux/hrtimer.h | 2 +- include/linux/ktime.h | 2 +- include/linux/mtd/jedec.h | 2 +- include/linux/mtd/nand-ecc-sw-hamming.h | 2 +- include/linux/mtd/ndfc.h | 2 +- include/linux/mtd/onfi.h | 2 +- include/linux/mtd/platnand.h | 2 +- include/linux/mtd/rawnand.h | 2 +- include/linux/perf_event.h | 2 +- include/linux/plist.h | 2 +- include/linux/rslib.h | 2 +- include/linux/uio_driver.h | 2 +- include/uapi/linux/perf_event.h | 2 +- kernel/events/callchain.c | 2 +- kernel/events/core.c | 2 +- kernel/events/ring_buffer.c | 2 +- kernel/irq/debugfs.c | 2 +- kernel/irq/matrix.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/pelt.c | 2 +- kernel/time/clockevents.c | 2 +- kernel/time/hrtimer.c | 2 +- kernel/time/tick-broadcast.c | 2 +- kernel/time/tick-common.c | 2 +- kernel/time/tick-oneshot.c | 2 +- kernel/time/tick-sched.c | 2 +- lib/debugobjects.c | 2 +- lib/plist.c | 2 +- lib/reed_solomon/decode_rs.c | 2 +- lib/reed_solomon/encode_rs.c | 2 +- lib/reed_solomon/reed_solomon.c | 2 +- scripts/spdxcheck.py | 2 +- tools/include/uapi/linux/perf_event.h | 2 +- tools/perf/builtin-list.c | 2 +- 63 files changed, 83 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/.mailmap b/.mailmap index b23e0853d636..fa018b5bd533 100644 --- a/.mailmap +++ b/.mailmap @@ -801,6 +801,7 @@ Tanzir Hasan Tejun Heo Tomeu Vizoso Thomas Graf +Thomas Gleixner Thomas Körper Thomas Pedersen Thorsten Blum diff --git a/CREDITS b/CREDITS index ca75f110edb6..383809bc4b7a 100644 --- a/CREDITS +++ b/CREDITS @@ -1398,7 +1398,7 @@ D: SRM environment driver (for Alpha systems) P: 1024D/8399E1BB 250D 3BCF 7127 0D8C A444 A961 1DBD 5E75 8399 E1BB N: Thomas Gleixner -E: tglx@linutronix.de +E: tglx@kernel.org D: NAND flash hardware support, JFFS2 on NAND flash N: Jérôme Glisse diff --git a/Documentation/ABI/stable/sysfs-kernel-time-aux-clocks b/Documentation/ABI/stable/sysfs-kernel-time-aux-clocks index 825508f42af6..e1a894c8dd1b 100644 --- a/Documentation/ABI/stable/sysfs-kernel-time-aux-clocks +++ b/Documentation/ABI/stable/sysfs-kernel-time-aux-clocks @@ -1,5 +1,5 @@ What: /sys/kernel/time/aux_clocks//enable Date: May 2025 -Contact: Thomas Gleixner +Contact: Thomas Gleixner Description: Controls the enablement of auxiliary clock timekeepers. diff --git a/Documentation/arch/x86/topology.rst b/Documentation/arch/x86/topology.rst index 86bec8ac2c4d..f779a68875c5 100644 --- a/Documentation/arch/x86/topology.rst +++ b/Documentation/arch/x86/topology.rst @@ -17,7 +17,7 @@ with the generic one and look at this one in parallel for the x86 specifics. Needless to say, code should use the generic functions - this file is *only* here to *document* the inner workings of x86 topology. -Started by Thomas Gleixner and Borislav Petkov . +Started by Thomas Gleixner and Borislav Petkov . The main aim of the topology facilities is to present adequate interfaces to code which needs to know/query/use the structure of the running system wrt diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst index e1b0eeabbb5e..9b4afca9fd09 100644 --- a/Documentation/core-api/cpu_hotplug.rst +++ b/Documentation/core-api/cpu_hotplug.rst @@ -8,7 +8,7 @@ CPU hotplug in the Kernel Srivatsa Vaddagiri , Ashok Raj , Joel Schopp , - Thomas Gleixner + Thomas Gleixner Introduction ============ diff --git a/Documentation/core-api/genericirq.rst b/Documentation/core-api/genericirq.rst index 582bde9bf5a9..b16d751d4b98 100644 --- a/Documentation/core-api/genericirq.rst +++ b/Documentation/core-api/genericirq.rst @@ -439,6 +439,6 @@ Credits The following people have contributed to this document: -1. Thomas Gleixner tglx@linutronix.de +1. Thomas Gleixner tglx@kernel.org 2. Ingo Molnar mingo@elte.hu diff --git a/Documentation/core-api/librs.rst b/Documentation/core-api/librs.rst index 6010f5bc5bf9..0d88893dbc03 100644 --- a/Documentation/core-api/librs.rst +++ b/Documentation/core-api/librs.rst @@ -209,4 +209,4 @@ testing. Thanks a lot. The following people have contributed to this document: -Thomas Gleixner\ tglx@linutronix.de +Thomas Gleixner\ tglx@kernel.org diff --git a/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml index fe6bc4173789..0643cfcc6bc7 100644 --- a/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml +++ b/Documentation/devicetree/bindings/timer/mrvl,mmp-timer.yaml @@ -8,7 +8,7 @@ title: Marvell MMP Timer maintainers: - Daniel Lezcano - - Thomas Gleixner + - Thomas Gleixner - Rob Herring properties: diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index ce77e024c4f1..adf03983f1ba 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -996,11 +996,11 @@ The following people have contributed to the NAND driver: 2. David Woodhouse\ dwmw2@infradead.org -3. Thomas Gleixner\ tglx@linutronix.de +3. Thomas Gleixner\ tglx@kernel.org A lot of users have provided bugfixes, improvements and helping hands for testing. Thanks a lot. The following people have contributed to this document: -1. Thomas Gleixner\ tglx@linutronix.de +1. Thomas Gleixner\ tglx@kernel.org diff --git a/Documentation/translations/zh_CN/core-api/cpu_hotplug.rst b/Documentation/translations/zh_CN/core-api/cpu_hotplug.rst index bc0d7ea6d834..3447fbf0e695 100644 --- a/Documentation/translations/zh_CN/core-api/cpu_hotplug.rst +++ b/Documentation/translations/zh_CN/core-api/cpu_hotplug.rst @@ -22,7 +22,7 @@ Srivatsa Vaddagiri , Ashok Raj , Joel Schopp , - Thomas Gleixner + Thomas Gleixner 简介 ==== diff --git a/Documentation/translations/zh_CN/core-api/genericirq.rst b/Documentation/translations/zh_CN/core-api/genericirq.rst index 05ccb954c18d..d2c1bd94bb97 100644 --- a/Documentation/translations/zh_CN/core-api/genericirq.rst +++ b/Documentation/translations/zh_CN/core-api/genericirq.rst @@ -404,6 +404,6 @@ kernel/irq/chip.c 感谢以下人士对本文档作出的贡献: -1. Thomas Gleixner tglx@linutronix.de +1. Thomas Gleixner tglx@kernel.org 2. Ingo Molnar mingo@elte.hu diff --git a/MAINTAINERS b/MAINTAINERS index 32b5e41d9849..ee036e0a3ef6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6175,7 +6175,7 @@ F: include/linux/clk.h CLOCKSOURCE, CLOCKEVENT DRIVERS M: Daniel Lezcano -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core @@ -6541,7 +6541,7 @@ S: Maintained F: drivers/cpufreq/virtual-cpufreq.c CPU HOTPLUG -M: Thomas Gleixner +M: Thomas Gleixner M: Peter Zijlstra L: linux-kernel@vger.kernel.org S: Maintained @@ -6968,7 +6968,7 @@ F: Documentation/scsi/dc395x.rst F: drivers/scsi/dc395x.* DEBUGOBJECTS: -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/debugobjects @@ -10371,7 +10371,7 @@ F: include/uapi/linux/fuse.h F: tools/testing/selftests/filesystems/fuse/ FUTEX SUBSYSTEM -M: Thomas Gleixner +M: Thomas Gleixner M: Ingo Molnar R: Peter Zijlstra R: Darren Hart @@ -10515,7 +10515,7 @@ F: drivers/base/arch_topology.c F: include/linux/arch_topology.h GENERIC ENTRY CODE -M: Thomas Gleixner +M: Thomas Gleixner M: Peter Zijlstra M: Andy Lutomirski L: linux-kernel@vger.kernel.org @@ -10628,7 +10628,7 @@ F: drivers/uio/uio_pci_generic.c GENERIC VDSO LIBRARY M: Andy Lutomirski -M: Thomas Gleixner +M: Thomas Gleixner M: Vincenzo Frascino L: linux-kernel@vger.kernel.org S: Maintained @@ -11241,7 +11241,7 @@ F: drivers/hid/hid-logitech-hidpp.c HIGH-RESOLUTION TIMERS, TIMER WHEEL, CLOCKEVENTS M: Anna-Maria Behnsen M: Frederic Weisbecker -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core @@ -11264,7 +11264,7 @@ R: Boqun Feng R: FUJITA Tomonori R: Frederic Weisbecker R: Lyude Paul -R: Thomas Gleixner +R: Thomas Gleixner R: Anna-Maria Behnsen R: John Stultz R: Stephen Boyd @@ -13334,7 +13334,7 @@ F: Documentation/devicetree/bindings/sound/irondevice,* F: sound/soc/codecs/sma* IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Thomas Gleixner +M: Thomas Gleixner S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/core-api/irq/irq-domain.rst @@ -13344,7 +13344,7 @@ F: kernel/irq/irqdomain.c F: kernel/irq/msi.c IRQ SUBSYSTEM -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -13357,7 +13357,7 @@ F: kernel/irq/ F: lib/group_cpus.c IRQCHIP DRIVERS -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -14451,7 +14451,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-nonmm-unstab F: lib/* LICENSES and SPDX stuff -M: Thomas Gleixner +M: Thomas Gleixner M: Greg Kroah-Hartman L: linux-spdx@vger.kernel.org S: Maintained @@ -18576,7 +18576,7 @@ NOHZ, DYNTICKS SUPPORT M: Anna-Maria Behnsen M: Frederic Weisbecker M: Ingo Molnar -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/nohz @@ -20761,7 +20761,7 @@ F: drivers/platform/x86/portwell-ec.c POSIX CLOCKS and TIMERS M: Anna-Maria Behnsen M: Frederic Weisbecker -M: Thomas Gleixner +M: Thomas Gleixner L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core @@ -26272,7 +26272,7 @@ F: drivers/net/wireless/ti/ TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER M: John Stultz -M: Thomas Gleixner +M: Thomas Gleixner R: Stephen Boyd L: linux-kernel@vger.kernel.org S: Supported @@ -28203,7 +28203,7 @@ F: net/lapb/ F: net/x25/ X86 ARCHITECTURE (32-BIT AND 64-BIT) -M: Thomas Gleixner +M: Thomas Gleixner M: Ingo Molnar M: Borislav Petkov M: Dave Hansen @@ -28219,7 +28219,7 @@ F: tools/testing/selftests/x86 X86 CPUID DATABASE M: Borislav Petkov -M: Thomas Gleixner +M: Thomas Gleixner M: x86@kernel.org R: Ahmed S. Darwish L: x86-cpuid@lists.linux.dev @@ -28235,7 +28235,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm F: arch/x86/entry/ X86 HARDWARE VULNERABILITIES -M: Thomas Gleixner +M: Thomas Gleixner M: Borislav Petkov M: Peter Zijlstra M: Josh Poimboeuf diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 1d2507f22437..1fbb7d46e484 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -7,7 +7,7 @@ * Heavily based on the x86 and PowerPC implementations. * * x86: - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index cae4d33002a5..0ce4ae343531 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -6,7 +6,7 @@ * This code is based almost entirely upon the x86 perf event * code, which is: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 0c38a31d5fc7..576baa9a52c5 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1,7 +1,7 @@ /* * Performance events x86 architecture code * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 3161ec0a3416..62963022b517 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1,7 +1,7 @@ /* * Performance events x86 architecture header * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 0a2bbd674a6d..ebefb77c37bb 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Thomas Gleixner + * Copyright (C) 2009 Linutronix GmbH, Thomas Gleixner * * For licencing details see kernel-base/COPYING */ diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index b10d4d131dce..f7546e9e8e89 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -15,7 +15,7 @@ * Signed-off-by: Michael Schwarz * * Major changes to the original code by: Dave Hansen - * Mostly rewritten by Thomas Gleixner and + * Mostly rewritten by Thomas Gleixner and * Andy Lutomirsky */ #include diff --git a/drivers/mtd/nand/ecc-sw-hamming.c b/drivers/mtd/nand/ecc-sw-hamming.c index f2d0effad9d2..bc62a71f9fdd 100644 --- a/drivers/mtd/nand/ecc-sw-hamming.c +++ b/drivers/mtd/nand/ecc-sw-hamming.c @@ -8,7 +8,7 @@ * * Completely replaces the previous ECC implementation which was written by: * Steven J. Hill (sjhill@realitydiluted.com) - * Thomas Gleixner (tglx@linutronix.de) + * Thomas Gleixner (tglx@kernel.org) * * Information on how this algorithm works and how it was developed * can be found in Documentation/driver-api/mtd/nand_ecc.rst diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 70d6c2250f32..540b6baf8bb1 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -11,7 +11,7 @@ * Error correction code lifted from the old docecc code * Author: Fabrice Bellard (fabrice.bellard@netgem.com) * Copyright (C) 2000 Netgem S.A. - * converted to the generic Reed-Solomon library by Thomas Gleixner + * converted to the generic Reed-Solomon library by Thomas Gleixner * * Interface to generic NAND code for M-Systems DiskOnChip devices */ diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index ad6d66309597..f2322de93ab4 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -8,7 +8,7 @@ * http://www.linux-mtd.infradead.org/doc/nand.html * * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * 2002-2006 Thomas Gleixner (tglx@linutronix.de) + * 2002-2006 Thomas Gleixner (tglx@kernel.org) * * Credits: * David Woodhouse for adding multichip support @@ -6594,5 +6594,5 @@ EXPORT_SYMBOL_GPL(nand_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steven J. Hill "); -MODULE_AUTHOR("Thomas Gleixner "); +MODULE_AUTHOR("Thomas Gleixner "); MODULE_DESCRIPTION("Generic NAND flash driver code"); diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index a8fba5f39f59..3050ab7e6eb6 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -3,7 +3,7 @@ * Overview: * Bad block table support for the NAND driver * - * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de) + * Copyright © 2004 Thomas Gleixner (tglx@kernel.org) * * Description: * diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index 650351c62af6..62a8cf86d9e2 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) + * Copyright (C) 2002 Thomas Gleixner (tglx@kernel.org) */ #include diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index b3cc8f360529..89e6dd8ed1a8 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * 2002-2006 Thomas Gleixner (tglx@linutronix.de) + * 2002-2006 Thomas Gleixner (tglx@kernel.org) * * Credits: * David Woodhouse for adding multichip support diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 743792edf98d..97700f80d5b8 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * 2002-2006 Thomas Gleixner (tglx@linutronix.de) + * 2002-2006 Thomas Gleixner (tglx@kernel.org) * * Credits: * David Woodhouse for adding multichip support diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 861975e44b55..11954440e4de 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * 2002-2006 Thomas Gleixner (tglx@linutronix.de) + * 2002-2006 Thomas Gleixner (tglx@kernel.org) * * Credits: * David Woodhouse for adding multichip support diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index 13365128194d..7ad8bc04be1a 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -272,5 +272,5 @@ static struct platform_driver ndfc_driver = { module_platform_driver(ndfc_driver); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Thomas Gleixner "); +MODULE_AUTHOR("Thomas Gleixner "); MODULE_DESCRIPTION("OF Platform driver for NDFC"); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index d93ed4e86a17..fa0d4e6aee16 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -3,7 +3,7 @@ * drivers/uio/uio.c * * Copyright(C) 2005, Benedikt Spranger - * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2006, Hans J. Koch * Copyright(C) 2006, Greg Kroah-Hartman * diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index bb815a002984..3ab3f0ff7ebb 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -2,10 +2,10 @@ * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. - * Copyright © 2004 Thomas Gleixner + * Copyright © 2004 Thomas Gleixner * * Created by David Woodhouse - * Modified debugged and enhanced by Thomas Gleixner + * Modified debugged and enhanced by Thomas Gleixner * * For licensing information, see the file 'LICENCE' in this directory. * diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2cf1bf65b225..0de12f14d6a4 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -2,7 +2,7 @@ /* * hrtimers - High-resolution kernel timers * - * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar * * data type definitions, declarations, prototypes diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 383ed9985802..f247e564602f 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -3,7 +3,7 @@ * * ktime_t - nanosecond-resolution time format. * - * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar * * data type definitions, declarations, prototypes and macros. diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h index 56047a4e54c9..255972f3d88d 100644 --- a/include/linux/mtd/jedec.h +++ b/include/linux/mtd/jedec.h @@ -2,7 +2,7 @@ /* * Copyright © 2000-2010 David Woodhouse * Steven J. Hill - * Thomas Gleixner + * Thomas Gleixner * * Contains all JEDEC related definitions */ diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h index c6c71894c575..2aa2f8ef68d2 100644 --- a/include/linux/mtd/nand-ecc-sw-hamming.h +++ b/include/linux/mtd/nand-ecc-sw-hamming.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2000-2010 Steven J. Hill * David Woodhouse - * Thomas Gleixner + * Thomas Gleixner * * This file is the header for the NAND Hamming ECC implementation. */ diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h index 98f075b86931..622891191e9c 100644 --- a/include/linux/mtd/ndfc.h +++ b/include/linux/mtd/ndfc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2006 Thomas Gleixner + * Copyright (c) 2006 Linutronix GmbH, Thomas Gleixner * * Info: * Contains defines, datastructures for ndfc nand controller diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h index 55ab2e4d62f9..09a5cbd8f232 100644 --- a/include/linux/mtd/onfi.h +++ b/include/linux/mtd/onfi.h @@ -2,7 +2,7 @@ /* * Copyright © 2000-2010 David Woodhouse * Steven J. Hill - * Thomas Gleixner + * Thomas Gleixner * * Contains all ONFI related definitions */ diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h index bc11eb6b593b..2df6fba699f2 100644 --- a/include/linux/mtd/platnand.h +++ b/include/linux/mtd/platnand.h @@ -2,7 +2,7 @@ /* * Copyright © 2000-2010 David Woodhouse * Steven J. Hill - * Thomas Gleixner + * Thomas Gleixner * * Contains all platform NAND related definitions. */ diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index d30bdc3fcfd7..5c70e7bd3ed5 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -2,7 +2,7 @@ /* * Copyright © 2000-2010 David Woodhouse * Steven J. Hill - * Thomas Gleixner + * Thomas Gleixner * * Info: * Contains standard defines and IDs for NAND flash devices diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9870d768db4c..9ded2e582c60 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1,7 +1,7 @@ /* * Performance events: * - * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * diff --git a/include/linux/plist.h b/include/linux/plist.h index 8c1c8adf7fe9..16cf4355b5c1 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -8,7 +8,7 @@ * 2001-2005 (c) MontaVista Software, Inc. * Daniel Walker * - * (C) 2005 Thomas Gleixner + * (C) 2005 Linutronix GmbH, Thomas Gleixner * * Simplifications of the original code by * Oleg Nesterov diff --git a/include/linux/rslib.h b/include/linux/rslib.h index a04dacbdc8ae..a2848f6907e3 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -2,7 +2,7 @@ /* * Generic Reed Solomon encoder / decoder library * - * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) + * Copyright (C) 2004 Thomas Gleixner (tglx@kernel.org) * * RS code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 18238dc8bfd3..334641e20fb1 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -3,7 +3,7 @@ * include/linux/uio_driver.h * * Copyright(C) 2005, Benedikt Spranger - * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2006, Hans J. Koch * Copyright(C) 2006, Greg Kroah-Hartman * diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index c44a8fb3e418..72f03153dd32 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -2,7 +2,7 @@ /* * Performance events: * - * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index b9c7e00725d6..1f6589578703 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -2,7 +2,7 @@ /* * Performance events callchain code, extracted from core.c: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/events/core.c b/kernel/events/core.c index dad0d3d2e85f..f5e9d30e4fa9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2,7 +2,7 @@ /* * Performance events core code: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 20a905023736..3e7de2661417 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -2,7 +2,7 @@ /* * Performance events ring-buffer code: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 3527defd2890..5c5ebaee35f2 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright 2017 Thomas Gleixner +// Copyright 2017 Linutronix GmbH, Thomas Gleixner #include #include diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 8f222d1cccec..a50f2305a8dc 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2017 Thomas Gleixner +// Copyright (C) 2017 Linutronix GmbH, Thomas Gleixner #include #include diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da46c3164537..e71302282671 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index fa83bbaf4f3e..897790889ba3 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index a59bc75ab7c5..eaae1ce9f060 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -2,7 +2,7 @@ /* * This file contains functions which manage clock event devices. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index f8ea8c8fc895..bdb30cc5e873 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0207868c8b4d..f63c65881364 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -3,7 +3,7 @@ * This file contains functions which emulate a local clock-event * device via a broadcast event source. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7e33d3f2e889..d305d8521896 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -3,7 +3,7 @@ * This file contains the base functions to manage periodic tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index ffee943d796d..7472597f3225 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -3,7 +3,7 @@ * This file contains functions which manage high resolution tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8ddf74e705d3..2f8a7923fa27 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * diff --git a/lib/debugobjects.c b/lib/debugobjects.c index ecf8e7f978e3..89a1d6745dc2 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -2,7 +2,7 @@ /* * Generic infrastructure for lifetime debugging of objects. * - * Copyright (C) 2008, Thomas Gleixner + * Copyright (C) 2008, Linutronix GmbH, Thomas Gleixner */ #define pr_fmt(fmt) "ODEBUG: " fmt diff --git a/lib/plist.c b/lib/plist.c index ba677c31e8f3..a5bef38add43 100644 --- a/lib/plist.c +++ b/lib/plist.c @@ -10,7 +10,7 @@ * 2001-2005 (c) MontaVista Software, Inc. * Daniel Walker * - * (C) 2005 Thomas Gleixner + * (C) 2005 Linutronix GmbH, Thomas Gleixner * * Simplifications of the original code by * Oleg Nesterov diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c index 805de84ae83d..ef86ee2aec58 100644 --- a/lib/reed_solomon/decode_rs.c +++ b/lib/reed_solomon/decode_rs.c @@ -5,7 +5,7 @@ * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * - * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) + * Adaption to the kernel by Thomas Gleixner (tglx@kernel.org) * * Generic data width independent code which is included by the wrappers. */ diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c index 9112d46e869e..1d9e51dcc83d 100644 --- a/lib/reed_solomon/encode_rs.c +++ b/lib/reed_solomon/encode_rs.c @@ -5,7 +5,7 @@ * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * - * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) + * Adaption to the kernel by Thomas Gleixner (tglx@kernel.org) * * Generic data width independent code which is included by the wrappers. */ diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index bbc01bad3053..a9e2dcb6f2a7 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -2,7 +2,7 @@ /* * Generic Reed Solomon encoder / decoder library * - * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) + * Copyright (C) 2004 Thomas Gleixner (tglx@kernel.org) * * Reed Solomon code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py index 8d608f61bf37..908029e45ca2 100755 --- a/scripts/spdxcheck.py +++ b/scripts/spdxcheck.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 -# Copyright Thomas Gleixner +# Copyright Linutronix GmbH, Thomas Gleixner from argparse import ArgumentParser from ply import lex, yacc diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index c44a8fb3e418..72f03153dd32 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -2,7 +2,7 @@ /* * Performance events: * - * Copyright (C) 2008-2009, Thomas Gleixner + * Copyright (C) 2008-2009, Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index 5cbca0bacd35..87a5491048ac 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -4,7 +4,7 @@ * * Builtin list command: list all event types * - * Copyright (C) 2009, Thomas Gleixner + * Copyright (C) 2009, Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo */ -- cgit v1.2.3