aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/trace/ring_buffer.c31
4 files changed, 20 insertions, 19 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 0c9dc860cc15..2c0ecd1753de 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1628,7 +1628,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
audit_log_format(ab, " %s=", prefix);
CAP_FOR_EACH_U32(i) {
audit_log_format(ab, "%08x",
- cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+ cap->cap[CAP_LAST_U32 - i]);
}
}
diff --git a/kernel/capability.c b/kernel/capability.c
index 1191a44786df..00adb2193d01 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
i++;
}
+ effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+ permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+ inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
+
new = prepare_creds();
if (!new)
return -ENOMEM;
diff --git a/kernel/smp.c b/kernel/smp.c
index ffee35bef179..ff87d4479558 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -617,7 +617,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
- WARN_ON_ONCE(!ret);
+ WARN_ON_ONCE(ret);
}
preempt_enable();
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0954450df7dc..a53f1bbc546b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1981,7 +1981,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
/**
* rb_update_event - update event type and data
- * @event: the even to update
+ * @event: the event to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
*
@@ -3354,21 +3354,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
/* Iterator usage is expected to have record disabled */
- if (list_empty(&cpu_buffer->reader_page->list)) {
- iter->head_page = rb_set_head_page(cpu_buffer);
- if (unlikely(!iter->head_page))
- return;
- iter->head = iter->head_page->read;
- } else {
- iter->head_page = cpu_buffer->reader_page;
- iter->head = cpu_buffer->reader_page->read;
- }
+ iter->head_page = cpu_buffer->reader_page;
+ iter->head = cpu_buffer->reader_page->read;
+
+ iter->cache_reader_page = iter->head_page;
+ iter->cache_read = iter->head;
+
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
else
iter->read_stamp = iter->head_page->page->time_stamp;
- iter->cache_reader_page = cpu_buffer->reader_page;
- iter->cache_read = cpu_buffer->read;
}
/**
@@ -3761,12 +3756,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
/*
- * We repeat when a time extend is encountered.
- * Since the time extend is always attached to a data event,
- * we should never loop more than once.
- * (We never hit the following condition more than twice).
+ * We repeat when a time extend is encountered or we hit
+ * the end of the page. Since the time extend is always attached
+ * to a data event, we should never loop more than three times.
+ * Once for going to next page, once on time extend, and
+ * finally once to get the event.
+ * (We never hit the following condition more than thrice).
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))