aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmit Pundir <amit.pundir@linaro.org>2018-07-19 22:06:06 +0530
committerAmit Pundir <amit.pundir@linaro.org>2018-07-19 22:08:41 +0530
commite2df7f1153f05733f6a00d0bc5756828ed9f7253 (patch)
tree5a78908887e137863c2d3f8c44a50880bd9a9814
parent9cffeb019816c7ab0fa608f87206284fc9d456c0 (diff)
parent47bbcd6bf8f926e4e009c12b18f349ffa41bafd4 (diff)
Merge remote branch 'aosp/android-4.9' into linux-linaro-lsk-v4.9-androidlsk-v4.9-18.09-androidlsk-v4.9-18.07-android
git remote aosp --> https://android.googlesource.com/kernel/common * aosp/android-4.9: ANDROID: Fix massive cpufreq_times memory leaks ANDROID: Reduce use of #ifdef CONFIG_CPU_FREQ_TIMES UPSTREAM: binder: replace "%p" with "%pK" UPSTREAM: binder: free memory on error UPSTREAM: binder: fix proc->files use-after-free UPSTREAM: Revert "FROMLIST: binder: fix proc->files use-after-free" UPSTREAM: ANDROID: binder: change down_write to down_read UPSTREAM: ANDROID: binder: correct the cmd print for BINDER_WORK_RETURN_ERROR UPSTREAM: ANDROID: binder: remove 32-bit binder interface. UPSTREAM: ANDROID: binder: re-order some conditions UPSTREAM: android: binder: use VM_ALLOC to get vm area UPSTREAM: android: binder: Use true and false for boolean values UPSTREAM: android: binder: Use octal permissions UPSTREAM: android: binder: Prefer __func__ to using hardcoded function name UPSTREAM: ANDROID: binder: make binder_alloc_new_buf_locked static and indent its arguments UPSTREAM: android: binder: Check for errors in binder_alloc_shrinker_init(). ANDROID: Add kconfig to make dm-verity check_at_most_once default enabled ANDROID: xt_qtaguid: Remove unnecessary null checks to device's name UPSTREAM: cpufreq: schedutil: use now as reference when aggregating shared policy requests ANDROID: sdcardfs: fix potential crash when reserved_mb is not zero Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
-rw-r--r--drivers/android/Kconfig15
-rw-r--r--drivers/android/binder.c140
-rw-r--r--drivers/android/binder_alloc.c31
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--drivers/cpufreq/cpufreq_times.c9
-rw-r--r--drivers/md/Kconfig20
-rw-r--r--drivers/md/dm-verity-target.c8
-rw-r--r--fs/sdcardfs/inode.c6
-rw-r--r--include/linux/cpufreq_times.h6
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/cpufreq_schedutil.c7
-rw-r--r--kernel/sched/cputime.c5
-rw-r--r--net/netfilter/xt_qtaguid.c5
15 files changed, 160 insertions, 109 deletions
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 01de42c8b74b..63ed9ceebf7b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -9,7 +9,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU
+ depends on MMU && !M68K
default n
---help---
Binder is used in Android for both communication between processes,
@@ -31,19 +31,6 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
-config ANDROID_BINDER_IPC_32BIT
- bool
- depends on !64BIT && ANDROID_BINDER_IPC
- default y
- ---help---
- The Binder API has been changed to support both 32 and 64bit
- applications in a mixed environment.
-
- Enable this to support an old 32-bit Android user-space (v4.4 and
- earlier).
-
- Note that enabling this will break newer Android user-space.
-
config ANDROID_BINDER_IPC_SELFTEST
bool "Android Binder IPC Driver Selftest"
depends on ANDROID_BINDER_IPC
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 957eb3cfab79..44530d62aa0d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -71,10 +71,6 @@
#include <linux/security.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
-#define BINDER_IPC_32BIT 1
-#endif
-
#include <uapi/linux/android/binder.h>
#include "binder_alloc.h"
#include "binder_trace.h"
@@ -142,7 +138,7 @@ enum {
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug_mask, binder_debug_mask, uint, 0644);
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, S_IRUGO);
@@ -161,7 +157,7 @@ static int binder_set_stop_on_user_error(const char *val,
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
- param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+ param_get_int, &binder_stop_on_user_error, 0644);
#define binder_debug(mask, x...) \
do { \
@@ -250,7 +246,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
unsigned int cur = atomic_inc_return(&log->cur);
if (cur >= ARRAY_SIZE(log->entry))
- log->full = 1;
+ log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0);
/*
@@ -465,8 +461,9 @@ struct binder_ref {
};
enum binder_deferred_state {
- BINDER_DEFERRED_FLUSH = 0x01,
- BINDER_DEFERRED_RELEASE = 0x02,
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
};
/**
@@ -503,6 +500,9 @@ struct binder_priority {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
+ * @files files_struct for process
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -547,6 +547,8 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
+ struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -941,33 +943,27 @@ static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
-struct files_struct *binder_get_files_struct(struct binder_proc *proc)
-{
- return get_files_struct(proc->tsk);
-}
-
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files;
unsigned long rlim_cur;
unsigned long irqs;
int ret;
- files = binder_get_files_struct(proc);
- if (files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
if (!lock_task_sighand(proc->tsk, &irqs)) {
ret = -EMFILE;
goto err;
}
-
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- ret = __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
err:
- put_files_struct(files);
+ mutex_unlock(&proc->files_lock);
return ret;
}
@@ -977,12 +973,10 @@ err:
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- struct files_struct *files = binder_get_files_struct(proc);
-
- if (files) {
- __fd_install(files, fd, file);
- put_files_struct(files);
- }
+ mutex_lock(&proc->files_lock);
+ if (proc->files)
+ __fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -990,21 +984,22 @@ static void task_fd_install(
*/
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
- struct files_struct *files = binder_get_files_struct(proc);
int retval;
- if (files == NULL)
- return -ESRCH;
-
- retval = __close_fd(files, fd);
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
+ retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
- put_files_struct(files);
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
@@ -2214,8 +2209,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
struct binder_object_header *hdr;
size_t object_size = 0;
- if (offset > buffer->data_size - sizeof(*hdr) ||
- buffer->data_size < sizeof(*hdr) ||
+ if (buffer->data_size < sizeof(*hdr) ||
+ offset > buffer->data_size - sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
@@ -2355,7 +2350,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %p\n",
+ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -2804,7 +2799,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (node->has_async_transaction) {
pending_async = true;
} else {
- node->has_async_transaction = 1;
+ node->has_async_transaction = true;
}
}
@@ -3669,7 +3664,7 @@ static int binder_thread_write(struct binder_proc *proc,
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w) {
- buf_node->has_async_transaction = 0;
+ buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
@@ -3891,7 +3886,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -4097,6 +4092,7 @@ retry:
binder_inner_proc_unlock(proc);
if (put_user(e->cmd, (uint32_t __user *)ptr))
return -EFAULT;
+ cmd = e->cmd;
e->cmd = BR_OK;
ptr += sizeof(uint32_t);
@@ -4865,6 +4861,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4901,16 +4898,22 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
failure_string = "bad vm_flags";
goto err_bad_arg;
}
- vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+ vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
-
- return ret;
+ if (ret)
+ return ret;
+ mutex_lock(&proc->files_lock);
+ proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
+ return 0;
err_bad_arg:
- pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
@@ -4920,7 +4923,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
struct binder_proc *proc;
struct binder_device *binder_dev;
- binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -4930,6 +4933,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
@@ -4965,7 +4969,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
- proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
@@ -5086,6 +5090,8 @@ static void binder_deferred_release(struct binder_proc *proc)
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
+ BUG_ON(proc->files);
+
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -5167,6 +5173,8 @@ static void binder_deferred_release(struct binder_proc *proc)
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
+ struct files_struct *files;
+
int defer;
do {
@@ -5183,11 +5191,23 @@ static void binder_deferred_func(struct work_struct *work)
}
mutex_unlock(&binder_deferred_lock);
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
+ }
+
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
+
+ if (files)
+ put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5216,7 +5236,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -5241,7 +5261,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %p\n",
+ seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
buffer->data);
}
@@ -5776,11 +5796,13 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names;
+ char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
- binder_alloc_shrinker_init();
+ ret = binder_alloc_shrinker_init();
+ if (ret)
+ return ret;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5792,27 +5814,27 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);
@@ -5829,7 +5851,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
- while ((device_name = strsep(&device_names, ","))) {
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -5843,6 +5866,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
+
+ kfree(device_names);
+
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 1c76daacfb0b..bec6c0a90429 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -219,7 +219,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_write(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = alloc->vma;
}
@@ -288,7 +288,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
return 0;
@@ -321,17 +321,18 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
-struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+static struct binder_buffer *binder_alloc_new_buf_locked(
+ struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -669,7 +670,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
@@ -1008,8 +1009,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
INIT_LIST_HEAD(&alloc->buffers);
}
-void binder_alloc_shrinker_init(void)
+int binder_alloc_shrinker_init(void)
{
- list_lru_init(&binder_alloc_lru);
- register_shrinker(&binder_shrinker);
+ int ret = list_lru_init(&binder_alloc_lru);
+
+ if (ret == 0) {
+ ret = register_shrinker(&binder_shrinker);
+ if (ret)
+ list_lru_destroy(&binder_alloc_lru);
+ }
+ return ret;
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 0b145307f1fd..9ef64e563856 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -130,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
-void binder_alloc_shrinker_init(void);
+extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index 6254f45ca907..0e8754b6dd72 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -234,16 +234,19 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
void cpufreq_task_times_init(struct task_struct *p)
{
- void *temp;
unsigned long flags;
- unsigned int max_state;
spin_lock_irqsave(&task_time_in_state_lock, flags);
p->time_in_state = NULL;
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
p->max_state = 0;
+}
- max_state = READ_ONCE(next_offset);
+void cpufreq_task_times_alloc(struct task_struct *p)
+{
+ void *temp;
+ unsigned long flags;
+ unsigned int max_state = READ_ONCE(next_offset);
/* We use one array to avoid multiple allocs per task */
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index e7b8f49e060f..72c45c356054 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -534,4 +534,24 @@ config DM_ANDROID_VERITY
of the metadata contents are verified against the key included
in the system keyring. Upon success, the underlying verity
target is setup.
+
+config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
+ bool "Verity will validate blocks at most once"
+ depends on DM_VERITY
+ ---help---
+ Default enables at_most_once option for dm-verity
+
+ Verify data blocks only the first time they are read from the
+ data device, rather than every time. This reduces the overhead
+ of dm-verity so that it can be used on systems that are memory
+ and/or CPU constrained. However, it provides a reduced level
+ of security because only offline tampering of the data device's
+ content will be detected, not online tampering.
+
+ Hash blocks are still verified each time they are read from the
+ hash device, since verification of hash blocks is less performance
+ critical than data blocks, and a hash block will not be verified
+ any more after all the data blocks it covers have been verified anyway.
+
+ If unsure, say N.
endif # MD
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index d96aa84cacd2..0a7a8287e58c 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1049,6 +1049,14 @@ int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED
+ if (!v->validated_blocks) {
+ r = verity_alloc_most_once(v);
+ if (r)
+ goto bad;
+ }
+#endif
+
v->hash_per_block_bits =
__fls((1 << v->hash_dev_block_bits) / v->digest_size);
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 9cdb396d9d37..81fe9e61a792 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -270,6 +270,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
struct dentry *lower_parent_dentry = NULL;
+ struct dentry *parent_dentry = NULL;
struct path lower_path;
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
const struct cred *saved_cred = NULL;
@@ -289,11 +290,14 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
/* check disk space */
- if (!check_min_free_space(dentry, 0, 1)) {
+ parent_dentry = dget_parent(dentry);
+ if (!check_min_free_space(parent_dentry, 0, 1)) {
pr_err("sdcardfs: No minimum free space.\n");
err = -ENOSPC;
+ dput(parent_dentry);
goto out_revert;
}
+ dput(parent_dentry);
/* the lower_dentry is negative here */
sdcardfs_get_lower_path(dentry, &lower_path);
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
index 3fb38750c853..356a3fad03c9 100644
--- a/include/linux/cpufreq_times.h
+++ b/include/linux/cpufreq_times.h
@@ -22,6 +22,7 @@
#ifdef CONFIG_CPU_FREQ_TIMES
void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_alloc(struct task_struct *p);
void cpufreq_task_times_exit(struct task_struct *p);
int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p);
@@ -31,6 +32,11 @@ void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
int single_uid_time_in_state_open(struct inode *inode, struct file *file);
#else
+static inline void cpufreq_task_times_init(struct task_struct *p) {}
+static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+static inline void cpufreq_acct_update_power(struct task_struct *p,
+ u64 cputime) {}
static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
static inline void cpufreq_times_record_transition(
struct cpufreq_freqs *freq) {}
diff --git a/kernel/exit.c b/kernel/exit.c
index 40fe1a0af097..61921bc3fd5f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,7 +54,6 @@
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
-#include <linux/cpufreq_times.h>
#include "sched/tune.h"
@@ -172,9 +171,6 @@ void release_task(struct task_struct *p)
{
struct task_struct *leader;
int zap_leader;
-#ifdef CONFIG_CPU_FREQ_TIMES
- cpufreq_task_times_exit(p);
-#endif
repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
diff --git a/kernel/fork.c b/kernel/fork.c
index 70e10cb49be0..24ce22c41f21 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -77,6 +77,7 @@
#include <linux/compiler.h>
#include <linux/sysctl.h>
#include <linux/kcov.h>
+#include <linux/cpufreq_times.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -339,6 +340,8 @@ void put_task_stack(struct task_struct *tsk)
void free_task(struct task_struct *tsk)
{
+ cpufreq_task_times_exit(tsk);
+
#ifndef CONFIG_THREAD_INFO_IN_TASK
/*
* The task is finally done with both the stack and thread_info,
@@ -1527,6 +1530,8 @@ static __latent_entropy struct task_struct *copy_process(
if (!p)
goto fork_out;
+ cpufreq_task_times_init(p);
+
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
@@ -1963,6 +1968,8 @@ long _do_fork(unsigned long clone_flags,
struct completion vfork;
struct pid *pid;
+ cpufreq_task_times_alloc(p);
+
trace_sched_process_fork(current, p);
pid = get_task_pid(p, PIDTYPE_PID);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 487661cb7e8c..d3b4d628abe3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2261,10 +2261,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
-#ifdef CONFIG_CPU_FREQ_TIMES
- cpufreq_task_times_init(p);
-#endif
-
RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3edb2bbee896..0526dc0365db 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -304,11 +304,10 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- u64 last_freq_update_time = sg_policy->last_freq_update_time;
unsigned long util = 0, max = 1;
unsigned int j;
@@ -324,7 +323,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
* enough, don't take the CPU into account as it probably is
* idle now (and clear iowait_boost for it).
*/
- delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
continue;
@@ -368,7 +367,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
if (flags & SCHED_CPUFREQ_DL)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
- next_f = sugov_next_freq_shared(sg_cpu);
+ next_f = sugov_next_freq_shared(sg_cpu, time);
sugov_update_commit(sg_policy, time, next_f);
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 366e50752c56..9fc85ba6f0ff 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -164,10 +164,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
/* Account for user time used */
acct_account_cputime(p);
-#ifdef CONFIG_CPU_FREQ_TIMES
/* Account power usage for user time */
cpufreq_acct_update_power(p, cputime);
-#endif
}
/*
@@ -218,10 +216,9 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
/* Account for system time used */
acct_account_cputime(p);
-#ifdef CONFIG_CPU_FREQ_TIMES
+
/* Account power usage for system time */
cpufreq_acct_update_power(p, cputime);
-#endif
}
/*
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3dfa5a000853..d6779099d453 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1191,11 +1191,6 @@ static void get_dev_and_dir(const struct sk_buff *skb,
par->hooknum, __func__);
BUG();
}
- if (unlikely(!(*el_dev)->name)) {
- pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
- par->hooknum, __func__);
- BUG();
- }
if (skb->dev && *el_dev != skb->dev) {
MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs par->%s=%p %s\n",
par->hooknum, skb->dev, skb->dev->name,