diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-24 07:33:43 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-24 07:33:43 -0700 |
| commit | cedfb2db7b2d6b2c780999536aa1e2650fadee36 (patch) | |
| tree | d36479ce5997bd6b0d66764620d9139eda263c61 /include | |
| parent | 85f9642e3199271614210b8feebe18b7652894b6 (diff) | |
| parent | bb4f6b0cd7524ad7d56709723eaf8a7bf5a87b57 (diff) | |
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
slub: Use alloc_pages_exact_node() for page allocation
slub: __kmalloc_node_track_caller should trace kmalloc_large_node case
slub: Potential stack overflow
crypto: Use ARCH_KMALLOC_MINALIGN for CRYPTO_MINALIGN now that it's exposed
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slub_def.h>
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slob_def.h>
mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slab_def.h>
slab: Fix missing DEBUG_SLAB last user
slab: add memory hotplug support
slab: Fix continuation lines
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/crypto.h | 6 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 24 | ||||
| -rw-r--r-- | include/linux/slob_def.h | 8 | ||||
| -rw-r--r-- | include/linux/slub_def.h | 8 |
4 files changed, 40 insertions, 6 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 24d2e30f1b4..a6a7a1c83f5 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -99,13 +99,7 @@ * as arm where pointers are 32-bit aligned but there are data types such as * u64 which require 64-bit alignment. */ -#if defined(ARCH_KMALLOC_MINALIGN) #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN -#elif defined(ARCH_SLAB_MINALIGN) -#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN -#else -#define CRYPTO_MINALIGN __alignof__(unsigned long long) -#endif #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index ca6b2b31799..1812dac8c49 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -16,6 +16,30 @@ #include <linux/compiler.h> #include <linux/kmemtrace.h> +#ifndef ARCH_KMALLOC_MINALIGN +/* + * Enforce a minimum alignment for the kmalloc caches. + * Usually, the kmalloc caches are cache_line_size() aligned, except when + * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * ARCH_KMALLOC_MINALIGN allows that. + * Note that increasing this value may disable some debug features. + */ +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +/* + * Enforce a minimum alignment for all caches. + * Intended for archs that get misalignment faults even for BYTES_PER_WORD + * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. + * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables + * some debug features. + */ +#define ARCH_SLAB_MINALIGN 0 +#endif + /* * struct kmem_cache * diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d00..62667f72c2e 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -1,6 +1,14 @@ #ifndef __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) +#endif + void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0249d4175ba..55695c8d2f8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -116,6 +116,14 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) +#ifndef ARCH_KMALLOC_MINALIGN +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" |
