aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarrison Lingren <hlingren@google.com>2019-09-11 20:57:56 -0700
committerHarrison Lingren <hlingren@google.com>2019-09-11 20:57:56 -0700
commit7cce48a52887cd9bbbb1c59c74e598f82ada4481 (patch)
tree7872a1df22e803c522273d64244c5080e0dcdaaa
parent3206e079fc69dda776bc2d89acc1d8c15d9f4ed3 (diff)
parenta8d94a80baa1da8927747a97beee7bfb6b02b121 (diff)
Merge branch 'android-msm-pixel-4.9-qt-security-next' into android-msm-pixel-4.9-qtandroid-10.0.0_r0.20android-10.0.0_r0.19
Nov 2019.1 Bug: 140747057 Bug: 140748328 Change-Id: Ia14269e758839fdef4c3e37fd0a1cd6aa6a57f79 Signed-off-by: Harrison Lingren <hlingren@google.com>
-rw-r--r--drivers/android/binder.c45
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c15
-rw-r--r--fs/ext4/extents.c17
3 files changed, 63 insertions, 14 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 187eb124acdf..3b39c548661a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -527,7 +527,8 @@ struct binder_priority {
* @requested_threads_started: number binder threads started
* (protected by @inner_lock)
* @tmp_ref: temporary reference to indicate proc is in use
- * (protected by @inner_lock)
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
* @default_priority: default scheduler priority
* (invariant after initialized)
* @debugfs_entry: debugfs node
@@ -561,7 +562,7 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int tmp_ref;
+ atomic_t tmp_ref;
struct binder_priority default_priority;
struct dentry *debugfs_entry;
struct binder_alloc alloc;
@@ -2120,9 +2121,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
static void binder_proc_dec_tmpref(struct binder_proc *proc)
{
binder_inner_proc_lock(proc);
- proc->tmp_ref--;
+ atomic_dec(&proc->tmp_ref);
if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
- !proc->tmp_ref) {
+ !atomic_read(&proc->tmp_ref)) {
binder_inner_proc_unlock(proc);
binder_free_proc(proc);
return;
@@ -2184,8 +2185,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
static void binder_free_transaction(struct binder_transaction *t)
{
- if (t->buffer)
- t->buffer->transaction = NULL;
+ struct binder_proc *target_proc;
+
+ spin_lock(&t->lock);
+ target_proc = t->to_proc;
+ if (target_proc) {
+ atomic_inc(&target_proc->tmp_ref);
+ spin_unlock(&t->lock);
+
+ binder_inner_proc_lock(target_proc);
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ binder_inner_proc_unlock(target_proc);
+ binder_proc_dec_tmpref(target_proc);
+ } else {
+ /*
+ * If the transaction has no target_proc, then
+ * t->buffer->transaction * has already been cleared.
+ */
+ spin_unlock(&t->lock);
+ }
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -3017,7 +3036,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
- node->proc->tmp_ref++;
+ atomic_inc(&node->proc->tmp_ref);
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
@@ -3114,7 +3133,7 @@ static void binder_transaction(struct binder_proc *proc,
goto err_dead_binder;
}
target_proc = target_thread->proc;
- target_proc->tmp_ref++;
+ atomic_inc(&target_proc->tmp_ref);
binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
@@ -3380,7 +3399,8 @@ static void binder_transaction(struct binder_proc *proc,
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
- sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
+ sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
+ ALIGN(secctx_sz, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3893,10 +3913,12 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
+ binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
+ binder_inner_proc_unlock(proc);
if (buffer->async_transaction && buffer->target_node) {
struct binder_node *buf_node;
struct binder_work *w;
@@ -4756,7 +4778,7 @@ static int binder_thread_release(struct binder_proc *proc,
* The corresponding dec is when we actually
* free the thread in binder_free_thread()
*/
- proc->tmp_ref++;
+ atomic_inc(&proc->tmp_ref);
/*
* take a ref on this thread to ensure it
* survives while we are releasing it
@@ -5251,6 +5273,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
return -ENOMEM;
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
+ atomic_set(&proc->tmp_ref, 0);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
mutex_init(&proc->files_lock);
@@ -5430,7 +5453,7 @@ static void binder_deferred_release(struct binder_proc *proc)
* Make sure proc stays alive after we
* remove all the threads
*/
- proc->tmp_ref++;
+ atomic_inc(&proc->tmp_ref);
proc->is_dead = true;
threads = 0;
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index b5999e6fb6a2..ff199a26ba56 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017,2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,19 @@ void kgsl_snapshot_push_object(struct kgsl_process_private *process,
for (index = 0; index < objbufptr; index++) {
if (objbuf[index].gpuaddr == gpuaddr &&
objbuf[index].entry->priv == process) {
+ /*
+ * Check if newly requested size is within the
+ * allocated range or not, otherwise continue
+ * with previous size.
+ */
+ if (!kgsl_gpuaddr_in_memdesc(
+ &objbuf[index].entry->memdesc,
+ gpuaddr, dwords << 2)) {
+ KGSL_CORE_ERR(
+ "snapshot: IB 0x%016llx size is not within the memdesc range\n",
+ gpuaddr);
+ return;
+ }
objbuf[index].size = max_t(uint64_t,
objbuf[index].size,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 94ed1b200012..2080bee85de4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1047,6 +1047,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
+ size_t ext_size = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
@@ -1138,6 +1139,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
le16_add_cpu(&neh->eh_entries, m);
}
+ /* zero out unused area in the extent block */
+ ext_size = sizeof(struct ext4_extent_header) +
+ sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
+ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@@ -1217,6 +1222,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
+ /* zero out unused area in the extent block */
+ ext_size = sizeof(struct ext4_extent_header) +
+ (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
+ memset(bh->b_data + ext_size, 0,
+ inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@@ -1282,6 +1292,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
+ size_t ext_size = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
@@ -1307,9 +1318,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
goto out;
}
+ ext_size = sizeof(EXT4_I(inode)->i_data);
/* move top-level index/leaf into new block */
- memmove(bh->b_data, EXT4_I(inode)->i_data,
- sizeof(EXT4_I(inode)->i_data));
+ memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
+ /* zero out unused area in the extent block */
+ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
/* set size of new block */
neh = ext_block_hdr(bh);