summaryrefslogtreecommitdiff
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h66
1 files changed, 43 insertions, 23 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500de..83203ae9390b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,7 +14,34 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
-#include <linux/kmemtrace.h>
+
+#include <trace/events/kmem.h>
+
+/*
+ * Enforce a minimum alignment for the kmalloc caches.
+ * Usually, the kmalloc caches are cache_line_size() aligned, except when
+ * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than the alignment of a 64-bit integer.
+ * ARCH_KMALLOC_MINALIGN allows that.
+ * Note that increasing this value may disable some debug features.
+ */
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+/*
+ * Enforce a minimum alignment for all caches.
+ * Intended for archs that get misalignment faults even for BYTES_PER_WORD
+ * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
+ * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
+ * some debug features.
+ */
+#define ARCH_SLAB_MINALIGN 0
+#endif
/*
* struct kmem_cache
@@ -110,12 +137,13 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
-extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(size_t size,
+ struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
static __always_inline void *
-kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
{
return kmem_cache_alloc(cachep, flags);
}
@@ -152,10 +180,7 @@ found:
#endif
cachep = malloc_sizes[i].cs_cachep;
- ret = kmem_cache_alloc_notrace(cachep, flags);
-
- trace_kmalloc(_THIS_IP_, ret,
- size, slab_buffer_size(cachep), flags);
+ ret = kmem_cache_alloc_trace(size, cachep, flags);
return ret;
}
@@ -166,15 +191,17 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
-extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid);
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid);
#else
static __always_inline void *
-kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid)
+kmem_cache_alloc_node_trace(size_t size,
+ struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
{
return kmem_cache_alloc_node(cachep, flags, nodeid);
}
@@ -183,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
- void *ret;
if (__builtin_constant_p(size)) {
int i = 0;
@@ -207,13 +233,7 @@ found:
#endif
cachep = malloc_sizes[i].cs_cachep;
- ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
-
- trace_kmalloc_node(_THIS_IP_, ret,
- size, slab_buffer_size(cachep),
- flags, node);
-
- return ret;
+ return kmem_cache_alloc_node_trace(size, cachep, flags, node);
}
return __kmalloc_node(size, flags, node);
}