aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/decompress_inflate.c2
-rw-r--r--lib/genalloc.c19
-rw-r--r--lib/random32.c14
-rw-r--r--lib/scatterlist.c3
-rw-r--r--lib/vsprintf.c33
7 files changed, 54 insertions, 20 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 566cf2bc08ea..74fdc5cf4adc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1272,7 +1272,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
help
Provide stacktrace filter for fault-injection capabilities
diff --git a/lib/Makefile b/lib/Makefile
index c55a037a354e..9efe480b975e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+GCOV_PROFILE_hweight.o := n
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 19ff89e34eec..d619b28c456f 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
- out_len = 0x7fffffff; /* no limit */
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index b35cfa9bc3d4..2a39bf62d8c1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -37,6 +37,11 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
+static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
+{
+ return chunk->end_addr - chunk->start_addr + 1;
+}
+
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
{
unsigned long val, nval;
@@ -188,7 +193,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
chunk->phys_addr = phys;
chunk->start_addr = virt;
- chunk->end_addr = virt + size;
+ chunk->end_addr = virt + size - 1;
atomic_set(&chunk->avail, size);
spin_lock(&pool->lock);
@@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
paddr = chunk->phys_addr + (addr - chunk->start_addr);
break;
}
@@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool)
chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
list_del(&chunk->next_chunk);
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
@@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
if (size > atomic_read(&chunk->avail))
continue;
- end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit = chunk_size(chunk) >> order;
retry:
start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
pool->data);
@@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
nbits = (size + (1UL << order) - 1) >> order;
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
- if (addr >= chunk->start_addr && addr < chunk->end_addr) {
- BUG_ON(addr + size > chunk->end_addr);
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+ BUG_ON(addr + size - 1 > chunk->end_addr);
start_bit = (addr - chunk->start_addr) >> order;
remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
BUG_ON(remain);
@@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
- size += chunk->end_addr - chunk->start_addr;
+ size += chunk_size(chunk);
rcu_read_unlock();
return size;
}
diff --git a/lib/random32.c b/lib/random32.c
index 52280d5526be..01e8890d1089 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -141,7 +141,7 @@ void prandom_seed(u32 entropy)
*/
for_each_possible_cpu (i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
- state->s1 = __seed(state->s1 ^ entropy, 1);
+ state->s1 = __seed(state->s1 ^ entropy, 2);
}
}
EXPORT_SYMBOL(prandom_seed);
@@ -158,9 +158,9 @@ static int __init prandom_init(void)
struct rnd_state *state = &per_cpu(net_rand_state,i);
#define LCG(x) ((x) * 69069) /* super-duper LCG */
- state->s1 = __seed(LCG(i + jiffies), 1);
- state->s2 = __seed(LCG(state->s1), 7);
- state->s3 = __seed(LCG(state->s2), 15);
+ state->s1 = __seed(LCG(i + jiffies), 2);
+ state->s2 = __seed(LCG(state->s1), 8);
+ state->s3 = __seed(LCG(state->s2), 16);
/* "warm it up" */
prandom_u32_state(state);
@@ -187,9 +187,9 @@ static int __init prandom_reseed(void)
u32 seeds[3];
get_random_bytes(&seeds, sizeof(seeds));
- state->s1 = __seed(seeds[0], 1);
- state->s2 = __seed(seeds[1], 7);
- state->s3 = __seed(seeds[2], 15);
+ state->s1 = __seed(seeds[0], 2);
+ state->s2 = __seed(seeds[1], 8);
+ state->s3 = __seed(seeds[2], 16);
/* mix it in */
prandom_u32_state(state);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a1cf8cae60e7..3e7df38067ae 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -529,7 +529,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if (miter->__flags & SG_MITER_TO_SG)
+ if ((miter->__flags & SG_MITER_TO_SG) &&
+ !PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e149c6416384..620fae4c11f6 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -26,6 +26,7 @@
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
+#include <linux/cred.h>
#include <net/addrconf.h>
#include <asm/page.h> /* for PAGE_SIZE */
@@ -1118,11 +1119,37 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
}
- if (!((kptr_restrict == 0) ||
- (kptr_restrict == 1 &&
- has_capability_noaudit(current, CAP_SYSLOG))))
+
+ switch (kptr_restrict) {
+ case 0:
+ /* Always print %pK values */
+ break;
+ case 1: {
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %pK
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ const struct cred *cred = current_cred();
+
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2:
+ default:
+ /* Always print 0's for %pK */
ptr = NULL;
+ break;
+ }
break;
+
case 'N':
switch (fmt[1]) {
case 'F':