aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@google.com>2019-06-10 09:14:25 -0700
committerTodd Kjos <tkjos@google.com>2019-11-04 10:57:11 -0800
commit502a526abc2b77975063ab74c9c5446190ea14e9 (patch)
tree7e0e25252eee337497df83f47d09759683196e9e
parent3fcf2711397454533fc3beabb71eef59f5247f2b (diff)
binder: binder: fix possible UAF when freeing bufferASB-2019-12-05_4.9-q-releaseASB-2019-11-05_4.9-q-release
There is a race between the binder driver cleaning up a completed transaction via binder_free_transaction() and a user calling binder_ioctl(BC_FREE_BUFFER) to release a buffer. It doesn't matter which is first but they need to be protected against running concurrently which can result in a UAF. Bug: 133758011 Change-Id: Ie1426ff3d00218d050d61ff77b333ddf8818b7c9 Signed-off-by: Todd Kjos <tkjos@google.com>
-rw-r--r--drivers/android/binder.c42
1 files changed, 32 insertions, 10 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a61621f499ee..06b239ae4cca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -527,7 +527,8 @@ struct binder_priority {
* @requested_threads_started: number binder threads started
* (protected by @inner_lock)
* @tmp_ref: temporary reference to indicate proc is in use
- * (protected by @inner_lock)
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
@@ -561,7 +562,7 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int tmp_ref;
+ atomic_t tmp_ref;
struct binder_priority default_priority;
struct dentry *debugfs_entry;
struct binder_alloc alloc;
@@ -2072,9 +2073,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
binder_inner_proc_lock(proc);
- proc->tmp_ref--;
+ atomic_dec(&proc->tmp_ref);
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
- !proc->tmp_ref) {
+ !atomic_read(&proc->tmp_ref)) {
binder_inner_proc_unlock(proc);
binder_free_proc(proc);
return;
@@ -2136,8 +2137,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
static void binder_free_transaction(struct binder_transaction *t)
{
- if (t->buffer)
- t->buffer->transaction = NULL;
+ struct binder_proc *target_proc;
+
+ spin_lock(&t->lock);
+ target_proc = t->to_proc;
+ if (target_proc) {
+ atomic_inc(&target_proc->tmp_ref);
+ spin_unlock(&t->lock);
+
+ binder_inner_proc_lock(target_proc);
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ binder_inner_proc_unlock(target_proc);
+ binder_proc_dec_tmpref(target_proc);
+ } else {
+ /*
+ * If the transaction has no target_proc, then
+ * t->buffer->transaction * has already been cleared.
+ */
+ spin_unlock(&t->lock);
+ }
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -2969,7 +2988,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
- node->proc->tmp_ref++;
+ atomic_inc(&node->proc->tmp_ref);
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
@@ -3066,7 +3085,7 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
target_proc = target_thread->proc;
- target_proc->tmp_ref++;
+ atomic_inc(&target_proc->tmp_ref);
binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
@@ -3846,10 +3865,12 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
+ binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
+ binder_inner_proc_unlock(proc);
if (buffer->async_transaction && buffer->target_node) {
struct binder_node *buf_node;
struct binder_work *w;
@@ -4709,7 +4730,7 @@ static int binder_thread_release(struct binder_proc *proc,
* The corresponding dec is when we actually
* free the thread in binder_free_thread()
*/
- proc->tmp_ref++;
+ atomic_inc(&proc->tmp_ref);
/*
* take a ref on this thread to ensure it
* survives while we are releasing it
@@ -5204,6 +5225,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
+ atomic_set(&proc->tmp_ref, 0);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
mutex_init(&proc->files_lock);
@@ -5383,7 +5405,7 @@ static void binder_deferred_release(struct binder_proc *proc)
* Make sure proc stays alive after we
* remove all the threads
*/
- proc->tmp_ref++;
+ atomic_inc(&proc->tmp_ref);
proc->is_dead = true;
threads = 0;