summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug28
-rw-r--r--lib/debug_locks.c1
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/hweight.c7
-rw-r--r--lib/idr.c12
-rw-r--r--lib/lmb.c13
-rw-r--r--lib/radix-tree.c24
-rw-r--r--lib/string.c27
-rw-r--r--lib/zlib_inflate/inffast.c32
9 files changed, 121 insertions, 25 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 25c3ed594c5..5e3407d997b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -355,7 +355,7 @@ config SLUB_STATS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
- (X86 || ARM || PPC || S390)
+ (X86 || ARM || PPC || S390 || SUPERH)
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
@@ -499,6 +499,18 @@ config PROVE_LOCKING
For more details, see Documentation/lockdep-design.txt.
+config PROVE_RCU
+ bool "RCU debugging: prove RCU correctness"
+ depends on PROVE_LOCKING
+ default n
+ help
+ This feature enables lockdep extensions that check for correct
+ use of RCU APIs. This is currently under development. Say Y
+ if you want to debug RCU usage or help work on the PROVE_RCU
+ feature.
+
+ Say N if you are unsure.
+
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -765,10 +777,22 @@ config RCU_CPU_STALL_DETECTOR
CPUs are delaying the current grace period, but only when
the grace period extends for excessive time periods.
- Say Y if you want RCU to perform such checks.
+ Say N if you want to disable such checks.
+
+ Say Y if you are unsure.
+
+config RCU_CPU_STALL_VERBOSE
+ bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
+ depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
+ default n
+ help
+ This option causes RCU to printk detailed per-task information
+ for any tasks that are stalling the current RCU grace period.
Say N if you are unsure.
+ Say Y if you want to enable such checks.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9..5bf0020b924 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
* shut up after that.
*/
int debug_locks = 1;
+EXPORT_SYMBOL_GPL(debug_locks);
/*
* The locking-testsuite uses <debug_locks_silent> to get a
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 7d2f0b33e5a..ba8b67039d1 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -587,7 +587,7 @@ out_unlock:
return count;
}
-const struct file_operations filter_fops = {
+static const struct file_operations filter_fops = {
.read = filter_read,
.write = filter_write,
};
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424ecb12..63ee4eb1228 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,11 +11,18 @@
unsigned int hweight32(unsigned int w)
{
+#ifdef ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x55555555;
+ w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
+ w = (w + (w >> 4)) & 0x0f0f0f0f;
+ return (w * 0x01010101) >> 24;
+#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
+#endif
}
EXPORT_SYMBOL(hweight32);
diff --git a/lib/idr.c b/lib/idr.c
index 1cac726c44b..2eb1dca0368 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (!(p = pa[l])) {
+ if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
return IDR_NEED_TO_GROW;
}
+ p = pa[l];
+ BUG_ON(!p);
/* If we need to go up one layer, continue the
* loop; otherwise, restart from the top.
@@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
int n;
struct idr_layer *p;
- p = rcu_dereference(idp->top);
+ p = rcu_dereference_raw(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
@@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id)
while (n > 0 && p) {
n -= IDR_BITS;
BUG_ON(n != p->layer*IDR_BITS);
- p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
+ p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
}
@@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = rcu_dereference(idp->top);
+ p = rcu_dereference_raw(idp->top);
max = 1 << n;
id = 0;
@@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp,
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
+ p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
diff --git a/lib/lmb.c b/lib/lmb.c
index 9cee17142b2..b1fc5260652 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size)
}
-long lmb_remove(u64 base, u64 size)
+static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
{
- struct lmb_region *rgn = &(lmb.memory);
u64 rgnbegin, rgnend;
u64 end = base + size;
int i;
@@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size)
return lmb_add_region(rgn, end, rgnend - end);
}
+long lmb_remove(u64 base, u64 size)
+{
+ return __lmb_remove(&lmb.memory, base, size);
+}
+
+long __init lmb_free(u64 base, u64 size)
+{
+ return __lmb_remove(&lmb.reserved, base, size);
+}
+
long __init lmb_reserve(u64 base, u64 size)
{
struct lmb_region *_rgn = &lmb.reserved;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 92cdd9936e3..6b9670d6bbf 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -364,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
unsigned int height, shift;
struct radix_tree_node *node, **slot;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return NULL;
@@ -384,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = rcu_dereference(*slot);
+ node = rcu_dereference_raw(*slot);
if (node == NULL)
return NULL;
@@ -568,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (node == NULL)
return 0;
@@ -602,7 +602,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
BUG_ON(ret && saw_unset_tag);
return !!ret;
}
- node = rcu_dereference(node->slots[offset]);
+ node = rcu_dereference_raw(node->slots[offset]);
shift -= RADIX_TREE_MAP_SHIFT;
height--;
}
@@ -711,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
- slot = rcu_dereference(slot->slots[i]);
+ slot = rcu_dereference_raw(slot->slots[i]);
if (slot == NULL)
goto out;
}
@@ -758,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -787,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference(slot);
+ results[ret + nr_found] = rcu_dereference_raw(slot);
nr_found++;
}
ret += nr_found;
@@ -826,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
unsigned long cur_index = first_index;
unsigned int ret;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -915,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
}
}
shift -= RADIX_TREE_MAP_SHIFT;
- slot = rcu_dereference(slot->slots[i]);
+ slot = rcu_dereference_raw(slot->slots[i]);
if (slot == NULL)
break;
}
@@ -951,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
@@ -980,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
slot = *(((void ***)results)[ret + i]);
if (!slot)
continue;
- results[ret + nr_found] = rcu_dereference(slot);
+ results[ret + nr_found] = rcu_dereference_raw(slot);
nr_found++;
}
ret += nr_found;
@@ -1020,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
if (!root_tag_get(root, tag))
return 0;
- node = rcu_dereference(root->rnode);
+ node = rcu_dereference_raw(root->rnode);
if (!node)
return 0;
diff --git a/lib/string.c b/lib/string.c
index 9f75b4ec50b..a1cdcfcc42d 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(memscan);
*/
char *strstr(const char *s1, const char *s2)
{
- int l1, l2;
+ size_t l1, l2;
l2 = strlen(s2);
if (!l2)
@@ -684,6 +684,31 @@ char *strstr(const char *s1, const char *s2)
EXPORT_SYMBOL(strstr);
#endif
+#ifndef __HAVE_ARCH_STRNSTR
+/**
+ * strnstr - Find the first substring in a length-limited string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ * @len: the maximum number of characters to search
+ */
+char *strnstr(const char *s1, const char *s2, size_t len)
+{
+ size_t l1 = len, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strnstr);
+#endif
+
#ifndef __HAVE_ARCH_MEMCHR
/**
* memchr - Find a character in an area of memory.
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 05e1559fa15..215447c5526 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -4,12 +4,25 @@
*/
#include <linux/zutil.h>
-#include <asm/unaligned.h>
-#include <asm/byteorder.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
+/* Only do the unaligned "Faster" variant when
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set
+ *
+ * On powerpc, it won't be as we don't include autoconf.h
+ * automatically for the boot wrapper, which is intended as
+ * we run in an environment where we may not be able to deal
+ * with (even rare) alignment faults. In addition, we do not
+ * define __KERNEL__ for arch/powerpc/boot unlike x86
+ */
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#endif
+
#ifndef ASMINF
/* Allow machine dependent optimization for post-increment or pre-increment.
@@ -243,6 +256,7 @@ void inflate_fast(z_streamp strm, unsigned start)
}
}
else {
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
unsigned short *sout;
unsigned long loops;
@@ -284,6 +298,20 @@ void inflate_fast(z_streamp strm, unsigned start)
}
if (len & 1)
PUP(out) = PUP(from);
+#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ from = out - dist; /* copy direct from output */
+ do { /* minimum length is three */
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ PUP(out) = PUP(from);
+ len -= 3;
+ } while (len > 2);
+ if (len) {
+ PUP(out) = PUP(from);
+ if (len > 1)
+ PUP(out) = PUP(from);
+ }
+#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */