aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2015-11-04 09:14:16 -0800
committerKevin Hilman <khilman@linaro.org>2015-11-04 09:14:16 -0800
commit9ce0d731981a1489331e12bb7f94907438515622 (patch)
treedbaf88cffe4c6207b21fc39061fae46f38b6fb2c /kernel
parentc1fa16368f309a6f636ad193a47cde9253db97a7 (diff)
parentb12403044336e7d567f309eb443aa9acf76380af (diff)
Merge tag 'v3.18.24' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into linux-linaro-lsk-v3.18
Linux 3.18.24 # gpg: Signature made Sat Oct 31 13:44:30 2015 PDT using RSA key ID 97772CDC # gpg: Can't check signature: public key not found * tag 'v3.18.24' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable: (234 commits) Linux 3.18.24 tty: fix stall caused by missing memory barrier in drivers/tty/n_tty.c Revert "tty: fix stall caused by missing memory barrier in drivers/tty/n_tty.c" Linux 3.18.23 x86: Init per-cpu shadow copy of CR4 on 32-bit CPUs too 3w-9xxx: don't unmap bounce buffered commands fib_rules: Fix dump_rules() not to exit early vfs: Test for and handle paths that are unreachable from their mnt_root md: flush ->event_work before stopping array. x86/nmi/64: Fix a paravirt stack-clobbering bug in the NMI code Revert "iio: bmg160: IIO_BUFFER and IIO_TRIGGERED_BUFFER are required" net: Fix skb_set_peeked use-after-free bug mm: check if section present during memory block registering hpfs: update ctime and mtime on directory modification drivercore: Fix unregistration path of platform devices ARM: OMAP2+: DRA7: clockdomain: change l4per2_7xx_clkdm to SW_WKUP of/address: Don't loop forever in of_find_matching_node_by_address(). auxdisplay: ks0108: fix refcount Doc: ABI: testing: configfs-usb-gadget-sourcesink Doc: ABI: testing: configfs-usb-gadget-loopback ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c28
-rw-r--r--kernel/irq/proc.c19
-rw-r--r--kernel/sched/core.c25
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/sched.h5
-rw-r--r--kernel/time/timekeeping.c2
6 files changed, 69 insertions, 35 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 9b7d746d6d62..0a4f601e35ab 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1800,13 +1800,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
CLONE_NEWUSER|CLONE_NEWPID))
return -EINVAL;
/*
- * Not implemented, but pretend it works if there is nothing to
- * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
- * needs to unshare vm.
+ * Not implemented, but pretend it works if there is nothing
+ * to unshare. Note that unsharing the address space or the
+ * signal handlers also need to unshare the signal queues (aka
+ * CLONE_THREAD).
*/
if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
- /* FIXME: get_task_mm() increments ->mm_users */
- if (atomic_read(&current->mm->mm_users) > 1)
+ if (!thread_group_empty(current))
+ return -EINVAL;
+ }
+ if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
+ if (atomic_read(&current->sighand->count) > 1)
+ return -EINVAL;
+ }
+ if (unshare_flags & CLONE_VM) {
+ if (!current_is_single_threaded())
return -EINVAL;
}
@@ -1875,16 +1883,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (unshare_flags & CLONE_NEWUSER)
unshare_flags |= CLONE_THREAD | CLONE_FS;
/*
- * If unsharing a thread from a thread group, must also unshare vm.
- */
- if (unshare_flags & CLONE_THREAD)
- unshare_flags |= CLONE_VM;
- /*
* If unsharing vm, must also unshare signal handlers.
*/
if (unshare_flags & CLONE_VM)
unshare_flags |= CLONE_SIGHAND;
/*
+ * If unsharing a signal handlers, must also unshare the signal queues.
+ */
+ if (unshare_flags & CLONE_SIGHAND)
+ unshare_flags |= CLONE_THREAD;
+ /*
* If unsharing namespace, must also unshare filesystem information.
*/
if (unshare_flags & CLONE_NEWNS)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 9dc9bfd8a678..9791f93dd5f2 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
#include "internals.h"
@@ -326,18 +327,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
{
+ static DEFINE_MUTEX(register_lock);
char name [MAX_NAMELEN];
- if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+ if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
return;
+ /*
+ * irq directories are registered only when a handler is
+ * added, not when the descriptor is created, so multiple
+ * tasks might try to register at the same time.
+ */
+ mutex_lock(&register_lock);
+
+ if (desc->dir)
+ goto out_unlock;
+
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
desc->dir = proc_mkdir(name, root_irq_dir);
if (!desc->dir)
- return;
+ goto out_unlock;
#ifdef CONFIG_SMP
/* create /proc/irq/<irq>/smp_affinity */
@@ -358,6 +370,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
proc_create_data("spurious", 0444, desc->dir,
&irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+ mutex_unlock(&register_lock);
}
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6810e572eda5..a882dd91722d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2256,11 +2256,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
- * The test for TASK_DEAD must occur while the runqueue locks are
- * still held, otherwise prev could be scheduled on another cpu, die
- * there before we look at prev->state, and then the reference would
- * be dropped twice.
- * Manfred Spraul <manfred@colorfullife.com>
+ *
+ * We must observe prev->state before clearing prev->on_cpu (in
+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * running on another CPU and we could rave with its RUNNING -> DEAD
+ * transition, resulting in a double drop.
*/
prev_state = prev->state;
vtime_task_switch(prev);
@@ -2404,13 +2404,20 @@ unsigned long nr_running(void)
/*
* Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race. The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
*/
bool single_task_running(void)
{
- if (cpu_rq(smp_processor_id())->nr_running == 1)
- return true;
- else
- return false;
+ return raw_rq()->nr_running == 1;
}
EXPORT_SYMBOL(single_task_running);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2246a36050f9..07a75c150eeb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4844,18 +4844,21 @@ again:
* entity, update_curr() will update its vruntime, otherwise
* forget we've ever seen it.
*/
- if (curr && curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
+ if (curr) {
+ if (curr->on_rq)
+ update_curr(cfs_rq);
+ else
+ curr = NULL;
- /*
- * This call to check_cfs_rq_runtime() will do the throttle and
- * dequeue its entity in the parent(s). Therefore the 'simple'
- * nr_running test will indeed be correct.
- */
- if (unlikely(check_cfs_rq_runtime(cfs_rq)))
- goto simple;
+ /*
+ * This call to check_cfs_rq_runtime() will do the
+ * throttle and dequeue its entity in the parent(s).
+ * Therefore the 'simple' nr_running test will indeed
+ * be correct.
+ */
+ if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+ goto simple;
+ }
se = pick_next_entity(cfs_rq, curr);
cfs_rq = group_cfs_rq(se);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2df8ef067cc5..f698089e10ca 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -994,9 +994,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
* After ->on_cpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
+ *
+ * Pairs with the control dependency and rmb in try_to_wake_up().
*/
- smp_wmb();
- prev->on_cpu = 0;
+ smp_store_release(&prev->on_cpu, 0);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ec1791fae965..a4038c57e25d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1369,7 +1369,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
negative = (tick_error < 0);
/* Sort out the magnitude of the correction */
- tick_error = abs(tick_error);
+ tick_error = abs64(tick_error);
for (adj = 0; tick_error > interval; adj++)
tick_error >>= 1;