From e1168c2cc4a5a5e495a53e067a6be6b4f29abfe0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 22 Nov 2013 18:14:38 -0800 Subject: slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings Fix kernel-doc warning for duplicate definition of 'kmalloc': Documentation/DocBook/kernel-api.xml:9483: element refentry: validity error : ID API-kmalloc already defined Also combine the kernel-doc info from the 2 kmalloc definitions into one block and remove the "see kcalloc" comment since kmalloc now contains the @flags info. Signed-off-by: Randy Dunlap Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slab.h | 102 +++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 56 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index c2bba248fa63..1e2f4fe12773 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -388,10 +388,55 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) /** * kmalloc - allocate memory * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kcalloc). + * @flags: the type of memory to allocate. * * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. + * + * The @flags argument may be one of: + * + * %GFP_USER - Allocate memory on behalf of user. May sleep. + * + * %GFP_KERNEL - Allocate normal kernel ram. May sleep. + * + * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. + * For example, use this inside interrupt handlers. + * + * %GFP_HIGHUSER - Allocate pages from high memory. + * + * %GFP_NOIO - Do not do any I/O at all while trying to get memory. + * + * %GFP_NOFS - Do not make any fs calls while trying to get memory. + * + * %GFP_NOWAIT - Allocation will not sleep. + * + * %GFP_THISNODE - Allocate node-local memory only. + * + * %GFP_DMA - Allocation suitable for DMA. + * Should only be used for kmalloc() caches. Otherwise, use a + * slab created with SLAB_DMA. + * + * Also it is possible to set different flags by OR'ing + * in one or more of the following additional @flags: + * + * %__GFP_COLD - Request cache-cold pages instead of + * trying to return cache-warm pages. + * + * %__GFP_HIGH - This allocation has high priority and may use emergency pools. + * + * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail + * (think twice before using). + * + * %__GFP_NORETRY - If memory is not immediately available, + * then give up at once. + * + * %__GFP_NOWARN - If allocation fails, don't issue any warnings. + * + * %__GFP_REPEAT - If allocation fails initially, try once more before failing. + * + * There are other flags available as well, but these are not intended + * for general use, and so are not documented here. For a full list of + * potential flags, always refer to linux/gfp.h. */ static __always_inline void *kmalloc(size_t size, gfp_t flags) { @@ -501,61 +546,6 @@ struct seq_file; int cache_show(struct kmem_cache *s, struct seq_file *m); void print_slabinfo_header(struct seq_file *m); -/** - * kmalloc - allocate memory - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * The @flags argument may be one of: - * - * %GFP_USER - Allocate memory on behalf of user. May sleep. - * - * %GFP_KERNEL - Allocate normal kernel ram. May sleep. - * - * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. - * For example, use this inside interrupt handlers. - * - * %GFP_HIGHUSER - Allocate pages from high memory. - * - * %GFP_NOIO - Do not do any I/O at all while trying to get memory. - * - * %GFP_NOFS - Do not make any fs calls while trying to get memory. - * - * %GFP_NOWAIT - Allocation will not sleep. - * - * %GFP_THISNODE - Allocate node-local memory only. - * - * %GFP_DMA - Allocation suitable for DMA. - * Should only be used for kmalloc() caches. Otherwise, use a - * slab created with SLAB_DMA. - * - * Also it is possible to set different flags by OR'ing - * in one or more of the following additional @flags: - * - * %__GFP_COLD - Request cache-cold pages instead of - * trying to return cache-warm pages. - * - * %__GFP_HIGH - This allocation has high priority and may use emergency pools. - * - * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail - * (think twice before using). - * - * %__GFP_NORETRY - If memory is not immediately available, - * then give up at once. - * - * %__GFP_NOWARN - If allocation fails, don't issue any warnings. - * - * %__GFP_REPEAT - If allocation fails initially, try once more before failing. - * - * There are other flags available as well, but these are not intended - * for general use, and so are not documented here. For a full list of - * potential flags, always refer to linux/gfp.h. - * - * kmalloc is the normal method of allocating memory - * in the kernel. - */ -static __always_inline void *kmalloc(size_t size, gfp_t flags); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. -- cgit v1.2.3 From 8afb1474db4701d1ab80cd8251137a3260e6913e Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 10 Sep 2013 11:43:37 +0800 Subject: slub: Fix calculation of cpu slabs /sys/kernel/slab/:t-0000048 # cat cpu_slabs 231 N0=16 N1=215 /sys/kernel/slab/:t-0000048 # cat slabs 145 N0=36 N1=109 See, the number of slabs is smaller than that of cpu slabs. The bug was introduced by commit 49e2258586b423684f03c278149ab46d8f8b6700 ("slub: per cpu cache for partial pages"). We should use page->pages instead of page->pobjects when calculating the number of cpu partial slabs. This also fixes the mapping of slabs and nodes. As there's no variable storing the number of total/active objects in cpu partial slabs, and we don't have user interfaces requiring those statistics, I just add WARN_ON for those cases. Cc: # 3.2+ Acked-by: Christoph Lameter Reviewed-by: Wanpeng Li Signed-off-by: Li Zefan Signed-off-by: Pekka Enberg --- mm/slub.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 545a170ebf9f..89490d9d91e0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4299,7 +4299,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s, page = ACCESS_ONCE(c->partial); if (page) { - x = page->pobjects; + node = page_to_nid(page); + if (flags & SO_TOTAL) + WARN_ON_ONCE(1); + else if (flags & SO_OBJECTS) + WARN_ON_ONCE(1); + else + x = page->pages; total += x; nodes[node] += x; } -- cgit v1.2.3 From c65c1877bd6826ce0d9713d76e30a7bed8e49f38 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jan 2014 13:23:49 +0100 Subject: slub: use lockdep_assert_held Instead of using comments in an attempt at getting the locking right, use proper assertions that actively warn you if you got it wrong. Also add extra braces in a few sites to comply with coding-style. Signed-off-by: Peter Zijlstra Signed-off-by: Pekka Enberg --- mm/slub.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 89490d9d91e0..367b224f2aa5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) /* * Tracking of fully allocated slabs for debugging purposes. - * - * list_lock must be held. */ static void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + if (!(s->flags & SLAB_STORE_USER)) return; list_add(&page->lru, &n->full); } -/* - * list_lock must be held. - */ -static void remove_full(struct kmem_cache *s, struct page *page) +static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + if (!(s->flags & SLAB_STORE_USER)) return; @@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) { return 1; } static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} -static inline void remove_full(struct kmem_cache *s, struct page *page) {} +static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, + struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) @@ -1504,12 +1504,12 @@ static void discard_slab(struct kmem_cache *s, struct page *page) /* * Management of partially allocated slabs. - * - * list_lock must be held. */ static inline void add_partial(struct kmem_cache_node *n, struct page *page, int tail) { + lockdep_assert_held(&n->list_lock); + n->nr_partial++; if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); @@ -1517,12 +1517,11 @@ static inline void add_partial(struct kmem_cache_node *n, list_add(&page->lru, &n->partial); } -/* - * list_lock must be held. - */ static inline void remove_partial(struct kmem_cache_node *n, struct page *page) { + lockdep_assert_held(&n->list_lock); + list_del(&page->lru); n->nr_partial--; } @@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n, * return the pointer to the freelist. * * Returns a list of objects or NULL if it fails. - * - * Must hold list_lock since we modify the partial list. */ static inline void *acquire_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page, @@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s, unsigned long counters; struct page new; + lockdep_assert_held(&n->list_lock); + /* * Zap the freelist and set the frozen bit. * The old freelist is the list of objects for the @@ -1887,7 +1886,7 @@ redo: else if (l == M_FULL) - remove_full(s, page); + remove_full(s, n, page); if (m == M_PARTIAL) { @@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, new.inuse--; if ((!new.inuse || !prior) && !was_frozen) { - if (kmem_cache_has_cpu_partial(s) && !prior) + if (kmem_cache_has_cpu_partial(s) && !prior) { /* * Slab was on no list before and will be @@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ new.frozen = 1; - else { /* Needs to be taken off a list */ + } else { /* Needs to be taken off a list */ n = get_node(s, page_to_nid(page)); /* @@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, */ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { if (kmem_cache_debug(s)) - remove_full(s, page); + remove_full(s, n, page); add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -2614,9 +2613,10 @@ slab_empty: */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } else + } else { /* Slab must be on the full list */ - remove_full(s, page); + remove_full(s, n, page); + } spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); -- cgit v1.2.3 From 26e4f2057516f1c457e0e95346a00303f983ad53 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sat, 4 Jan 2014 16:32:31 +0900 Subject: slub: Fix possible format string bug. The "name" is determined at runtime and is parsed as format string. Acked-by: David Rientjes Signed-off-by: Tetsuo Handa Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 367b224f2aa5..a99e9e67c60e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5169,7 +5169,7 @@ static int sysfs_slab_add(struct kmem_cache *s) } s->kobj.kset = slab_kset; - err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); if (err) { kobject_put(&s->kobj); return err; -- cgit v1.2.3 From 433a91ff5fa19e3eb70b12f7056f234aebd09ac2 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 28 Jan 2014 14:24:50 -0800 Subject: mm: sl[uo]b: fix misleading comments On x86, SLUB creates and handles <=8192-byte allocations internally. It passes larger ones up to the allocator. Saying "up to order 2" is, at best, ambiguous. Is that order-1? Or (order-2 bytes)? Make it more clear. SLOB commits a similar sin. It *handles* page-size requests, but the comment says that it passes up "all page size and larger requests". SLOB also swaps around the order of the very-similarly-named KMALLOC_SHIFT_HIGH and KMALLOC_SHIFT_MAX #defines. Make it consistent with the order of the other two allocators. Cc: Matt Mackall Cc: Andrew Morton Acked-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: Dave Hansen Signed-off-by: Pekka Enberg --- include/linux/slab.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 1e2f4fe12773..f76e956b4011 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -205,8 +205,8 @@ struct kmem_cache { #ifdef CONFIG_SLUB /* - * SLUB allocates up to order 2 pages directly and otherwise - * passes the request to the page allocator. + * SLUB directly allocates requests fitting in to an order-1 page + * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) @@ -217,12 +217,12 @@ struct kmem_cache { #ifdef CONFIG_SLOB /* - * SLOB passes all page size and larger requests to the page allocator. + * SLOB passes all requests larger than one page to the page allocator. * No kmalloc array is necessary since objects of different sizes can * be allocated from the same page. */ -#define KMALLOC_SHIFT_MAX 30 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT +#define KMALLOC_SHIFT_MAX 30 #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif -- cgit v1.2.3 From 67b6c900dc6dce65478d6fe37b60cd1e65bb80c2 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 24 Jan 2014 07:20:23 -0800 Subject: mm: slub: work around unneeded lockdep warning The slub code does some setup during early boot in early_kmem_cache_node_alloc() with some local data. There is no possible way that another CPU can see this data, so the slub code doesn't unnecessarily lock it. However, some new lockdep asserts check to make sure that add_partial() _always_ has the list_lock held. Just add the locking, even though it is technically unnecessary. Cc: Peter Zijlstra Cc: Russell King Acked-by: David Rientjes Signed-off-by: Dave Hansen Signed-off-by: Pekka Enberg --- mm/slub.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/slub.c b/mm/slub.c index a99e9e67c60e..432bddf484bb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2890,7 +2890,13 @@ static void early_kmem_cache_node_alloc(int node) init_kmem_cache_node(n); inc_slabs_node(kmem_cache_node, node, page->objects); + /* + * the lock is for lockdep's sake, not for any actual + * race protection + */ + spin_lock(&n->list_lock); add_partial(n, page, DEACTIVATE_TO_HEAD); + spin_unlock(&n->list_lock); } static void free_kmem_cache_nodes(struct kmem_cache *s) -- cgit v1.2.3 From cb8ee1a3d429f8898972c869dd4792afb04e961a Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 28 Jan 2014 02:57:08 +0900 Subject: mm: Fix warning on make htmldocs caused by slab.c This patch fixed following errors while make htmldocs Warning(/mm/slab.c:1956): No description found for parameter 'page' Warning(/mm/slab.c:1956): Excess function parameter 'slabp' description in 'slab_destroy' Incorrect function parameter "slabp" was set instead of "page" Acked-by: Christoph Lameter Signed-off-by: Masanari Iida Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index eb043bf05f4c..b264214c77ea 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed - * @slabp: slab pointer being destroyed + * @page: page pointer being destroyed * * Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. The -- cgit v1.2.3