summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-07 09:27:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-07 09:27:57 -0800
commitb0e7d3f88e563b5ca793fca23c7d7fa1352c1079 (patch)
tree3359d75a0233c5595182b6f953066ba65a8297ec
parentdda5df9823630a26ed24ca9150b33a7f56ba4546 (diff)
parentec4ddc90d201d09ef4e4bef8a2c6d9624525ad68 (diff)
Merge tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull binder fixes from Greg KH: "Here are some small, last-minute binder C and Rust driver fixes for reported issues. They include a number of fixes for reported crashes and other problems. All of these have been in linux-next this week, and longer" * tag 'char-misc-6.19-final' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: binderfs: fix ida_alloc_max() upper bound rust_binderfs: fix ida_alloc_max() upper bound binder: fix BR_FROZEN_REPLY error log rust_binder: add additional alignment checks binder: fix UAF in binder_netlink_report() rust_binder: correctly handle FDA objects of length zero
-rw-r--r--drivers/android/binder.c19
-rw-r--r--drivers/android/binder/rust_binderfs.c8
-rw-r--r--drivers/android/binder/thread.rs109
-rw-r--r--drivers/android/binderfs.c8
4 files changed, 94 insertions, 50 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 535fc881c8da..b356c9b88254 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2991,6 +2991,10 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
* @t: the binder transaction that failed
* @data_size: the user provided data size for the transaction
* @error: enum binder_driver_return_protocol returned to sender
+ *
+ * Note that t->buffer is not safe to access here, as it may have been
+ * released (or not yet allocated). Callers should guarantee all the
+ * transaction items used here are safe to access.
*/
static void binder_netlink_report(struct binder_proc *proc,
struct binder_transaction *t,
@@ -3780,6 +3784,14 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_proc_or_thread;
}
} else {
+ /*
+ * Make a transaction copy. It is not safe to access 't' after
+ * binder_proc_transaction() reported a pending frozen. The
+ * target could thaw and consume the transaction at any point.
+ * Instead, use a safe 't_copy' for binder_netlink_report().
+ */
+ struct binder_transaction t_copy = *t;
+
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
return_error = binder_proc_transaction(t, target_proc, NULL);
@@ -3790,7 +3802,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
- binder_netlink_report(proc, t, tr->data_size,
+ binder_netlink_report(proc, &t_copy, tr->data_size,
return_error);
}
binder_enqueue_thread_work(thread, tcomplete);
@@ -3812,8 +3824,9 @@ static void binder_transaction(struct binder_proc *proc,
return;
err_dead_proc_or_thread:
- binder_txn_error("%d:%d dead process or thread\n",
- thread->pid, proc->pid);
+ binder_txn_error("%d:%d %s process or thread\n",
+ proc->pid, thread->pid,
+ return_error == BR_FROZEN_REPLY ? "frozen" : "dead");
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
index e36011e89116..5c1319d80036 100644
--- a/drivers/android/binder/rust_binderfs.c
+++ b/drivers/android/binder/rust_binderfs.c
@@ -132,8 +132,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
mutex_lock(&binderfs_minors_mutex);
if (++info->device_count <= info->mount_opts.max)
minor = ida_alloc_max(&binderfs_minors,
- use_reserve ? BINDERFS_MAX_MINOR :
- BINDERFS_MAX_MINOR_CAPPED,
+ use_reserve ? BINDERFS_MAX_MINOR - 1 :
+ BINDERFS_MAX_MINOR_CAPPED - 1,
GFP_KERNEL);
else
minor = -ENOSPC;
@@ -399,8 +399,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
/* Reserve a new minor number for the new device. */
mutex_lock(&binderfs_minors_mutex);
minor = ida_alloc_max(&binderfs_minors,
- use_reserve ? BINDERFS_MAX_MINOR :
- BINDERFS_MAX_MINOR_CAPPED,
+ use_reserve ? BINDERFS_MAX_MINOR - 1 :
+ BINDERFS_MAX_MINOR_CAPPED - 1,
GFP_KERNEL);
mutex_unlock(&binderfs_minors_mutex);
if (minor < 0) {
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
index 1a8e6fdc0dc4..e0ea33ccfe58 100644
--- a/drivers/android/binder/thread.rs
+++ b/drivers/android/binder/thread.rs
@@ -39,6 +39,10 @@ use core::{
sync::atomic::{AtomicU32, Ordering},
};
+fn is_aligned(value: usize, to: usize) -> bool {
+ value % to == 0
+}
+
/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
/// call and is discarded when it returns.
struct ScatterGatherState {
@@ -69,17 +73,24 @@ struct ScatterGatherEntry {
}
/// This entry specifies that a fixup should happen at `target_offset` of the
-/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
-/// and is applied later. Otherwise if `skip` is zero, then the size of the
-/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
-struct PointerFixupEntry {
- /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
- skip: usize,
- /// The translated pointer to write when `skip` is zero.
- pointer_value: u64,
- /// The offset at which the value should be written. The offset is relative
- /// to the original buffer.
- target_offset: usize,
+/// buffer.
+enum PointerFixupEntry {
+ /// A fixup for a `binder_buffer_object`.
+ Fixup {
+ /// The translated pointer to write.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+ },
+ /// A skip for a `binder_fd_array_object`.
+ Skip {
+ /// The number of bytes to skip.
+ skip: usize,
+ /// The offset at which the skip should happen. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+ },
}
/// Return type of `apply_and_validate_fixup_in_parent`.
@@ -762,8 +773,7 @@ impl Thread {
parent_entry.fixup_min_offset = info.new_min_offset;
parent_entry.pointer_fixups.push(
- PointerFixupEntry {
- skip: 0,
+ PointerFixupEntry::Fixup {
pointer_value: buffer_ptr_in_user_space,
target_offset: info.target_offset,
},
@@ -789,6 +799,10 @@ impl Thread {
let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+ if !is_aligned(parent_offset, size_of::<u32>()) {
+ return Err(EINVAL.into());
+ }
+
let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
view.alloc.info_add_fd_reserve(num_fds)?;
@@ -803,13 +817,16 @@ impl Thread {
}
};
+ if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
+ return Err(EINVAL.into());
+ }
+
parent_entry.fixup_min_offset = info.new_min_offset;
parent_entry
.pointer_fixups
.push(
- PointerFixupEntry {
+ PointerFixupEntry::Skip {
skip: fds_len,
- pointer_value: 0,
target_offset: info.target_offset,
},
GFP_KERNEL,
@@ -820,6 +837,7 @@ impl Thread {
.sender_uaddr
.checked_add(parent_offset)
.ok_or(EINVAL)?;
+
let mut fda_bytes = KVec::new();
UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
.read_all(&mut fda_bytes, GFP_KERNEL)?;
@@ -871,17 +889,21 @@ impl Thread {
let mut reader =
UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
for fixup in &mut sg_entry.pointer_fixups {
- let fixup_len = if fixup.skip == 0 {
- size_of::<u64>()
- } else {
- fixup.skip
+ let (fixup_len, fixup_offset) = match fixup {
+ PointerFixupEntry::Fixup { target_offset, .. } => {
+ (size_of::<u64>(), *target_offset)
+ }
+ PointerFixupEntry::Skip {
+ skip,
+ target_offset,
+ } => (*skip, *target_offset),
};
- let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
- if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
pr_warn!(
"Fixups oob {} {} {} {}",
- fixup.target_offset,
+ fixup_offset,
end_of_previous_fixup,
offset_end,
target_offset_end
@@ -890,13 +912,13 @@ impl Thread {
}
let copy_off = end_of_previous_fixup;
- let copy_len = fixup.target_offset - end_of_previous_fixup;
+ let copy_len = fixup_offset - end_of_previous_fixup;
if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
pr_warn!("Failed copying into alloc: {:?}", err);
return Err(err.into());
}
- if fixup.skip == 0 {
- let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
+ let res = alloc.write::<u64>(fixup_offset, pointer_value);
if let Err(err) = res {
pr_warn!("Failed copying ptr into alloc: {:?}", err);
return Err(err.into());
@@ -949,25 +971,30 @@ impl Thread {
let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
- let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
- let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
- let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
- let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
let aligned_secctx_size = match secctx.as_ref() {
Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
None => 0,
};
+ if !is_aligned(offsets_size, size_of::<u64>()) {
+ return Err(EINVAL.into());
+ }
+ if !is_aligned(buffers_size, size_of::<u64>()) {
+ return Err(EINVAL.into());
+ }
+
// This guarantees that at least `sizeof(usize)` bytes will be allocated.
let len = usize::max(
aligned_data_size
- .checked_add(aligned_offsets_size)
- .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .checked_add(offsets_size)
+ .and_then(|sum| sum.checked_add(buffers_size))
.and_then(|sum| sum.checked_add(aligned_secctx_size))
.ok_or(ENOMEM)?,
- size_of::<usize>(),
+ size_of::<u64>(),
);
- let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let secctx_off = aligned_data_size + offsets_size + buffers_size;
let mut alloc =
match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
Ok(alloc) => alloc,
@@ -999,13 +1026,13 @@ impl Thread {
}
let offsets_start = aligned_data_size;
- let offsets_end = aligned_data_size + aligned_offsets_size;
+ let offsets_end = aligned_data_size + offsets_size;
// This state is used for BINDER_TYPE_PTR objects.
let sg_state = sg_state.insert(ScatterGatherState {
unused_buffer_space: UnusedBufferSpace {
offset: offsets_end,
- limit: len,
+ limit: offsets_end + buffers_size,
},
sg_entries: KVec::new(),
ancestors: KVec::new(),
@@ -1014,12 +1041,16 @@ impl Thread {
// Traverse the objects specified.
let mut view = AllocationView::new(&mut alloc, data_size);
for (index, index_offset) in (offsets_start..offsets_end)
- .step_by(size_of::<usize>())
+ .step_by(size_of::<u64>())
.enumerate()
{
- let offset = view.alloc.read(index_offset)?;
+ let offset: usize = view
+ .alloc
+ .read::<u64>(index_offset)?
+ .try_into()
+ .map_err(|_| EINVAL)?;
- if offset < end_of_previous_object {
+ if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
pr_warn!("Got transaction with invalid offset.");
return Err(EINVAL.into());
}
@@ -1051,7 +1082,7 @@ impl Thread {
}
// Update the indexes containing objects to clean up.
- let offset_after_object = index_offset + size_of::<usize>();
+ let offset_after_object = index_offset + size_of::<u64>();
view.alloc
.set_info_offsets(offsets_start..offset_after_object);
}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index b46bcb91072d..9f8a18c88d66 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -132,8 +132,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
mutex_lock(&binderfs_minors_mutex);
if (++info->device_count <= info->mount_opts.max)
minor = ida_alloc_max(&binderfs_minors,
- use_reserve ? BINDERFS_MAX_MINOR :
- BINDERFS_MAX_MINOR_CAPPED,
+ use_reserve ? BINDERFS_MAX_MINOR - 1 :
+ BINDERFS_MAX_MINOR_CAPPED - 1,
GFP_KERNEL);
else
minor = -ENOSPC;
@@ -408,8 +408,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
/* Reserve a new minor number for the new device. */
mutex_lock(&binderfs_minors_mutex);
minor = ida_alloc_max(&binderfs_minors,
- use_reserve ? BINDERFS_MAX_MINOR :
- BINDERFS_MAX_MINOR_CAPPED,
+ use_reserve ? BINDERFS_MAX_MINOR - 1 :
+ BINDERFS_MAX_MINOR_CAPPED - 1,
GFP_KERNEL);
mutex_unlock(&binderfs_minors_mutex);
if (minor < 0) {