aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-11-17 14:23:01 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-11-17 14:23:01 -0500
commit6dec543df2571b64c3a4acc0d4793c9a6c903b81 (patch)
tree283975841df06642c8fc8a9014db9e74cd2a07f9 /kernel
parent4f61be1d72be7c63c9a6f86f1bb85d36490ba8b2 (diff)
parentf82786d7a94f06a35ab273002cedc1385bae8e9f (diff)
Merge tag 'v4.9.55' into v4.9-rt
This is the 4.9.55 stable release
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/trace/ftrace.c14
6 files changed, 26 insertions, 22 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 779c871c5dcd..372454aa7f37 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1720,7 +1720,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
} else {
if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
+ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
+ BPF_CLASS(insn->code) == BPF_ALU64) {
verbose("BPF_END uses reserved fields\n");
return -EINVAL;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 24817f8f67d5..1dd63833ecdc 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2276,6 +2276,13 @@ retry:
mutex_unlock(&cpuset_mutex);
}
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+ force_rebuild = true;
+}
+
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
@@ -2350,8 +2357,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated)
+ if (cpus_updated || force_rebuild) {
+ force_rebuild = false;
rebuild_sched_domains();
+ }
}
void cpuset_update_active_cpus(bool cpu_online)
@@ -2370,6 +2379,11 @@ void cpuset_update_active_cpus(bool cpu_online)
schedule_work(&cpuset_hotplug_work);
}
+void cpuset_wait_for_hotplug(void)
+{
+ flush_work(&cpuset_hotplug_work);
+}
+
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c87f78e25de5..554aebf7e88b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7872,6 +7872,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
}
}
event->tp_event->prog = prog;
+ event->tp_event->bpf_prog_owner = event;
return 0;
}
@@ -7886,7 +7887,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
return;
prog = event->tp_event->prog;
- if (prog) {
+ if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL;
bpf_prog_put(prog);
}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 2fba066e125f..8ea24ded1dab 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -18,8 +18,9 @@
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
+#include <linux/cpuset.h>
-/*
+/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -200,6 +201,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
+ cpuset_wait_for_hotplug();
+
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0486c2a8a8d5..b6bc4fd2e3d8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7637,16 +7637,15 @@ static void cpuset_cpu_active(void)
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
- num_cpus_frozen--;
- if (likely(num_cpus_frozen)) {
- partition_sched_domains(1, NULL, NULL);
+ partition_sched_domains(1, NULL, NULL);
+ if (--num_cpus_frozen)
return;
- }
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
+ cpuset_force_rebuild();
}
cpuset_update_active_cpus(true);
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53ed8ae5de1c..5b8d7189e147 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4381,9 +4381,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
static int __init set_graph_function(char *str)
{
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -5981,17 +5978,6 @@ void unregister_ftrace_graph(void)
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-#ifdef CONFIG_DYNAMIC_FTRACE
- /*
- * Function graph does not allocate the trampoline, but
- * other global_ops do. We need to reset the ALLOC_TRAMP flag
- * if one was used.
- */
- global_ops.trampoline = save_global_trampoline;
- if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
- global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
out:
mutex_unlock(&ftrace_lock);
}