diff options
author | David S. Miller <davem@davemloft.net> | 2019-06-09 19:40:10 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-06-09 19:40:10 -0700 |
commit | e8a492cd539457b8f616f6db9b62b50cd5b9a980 (patch) | |
tree | ddf59935f7a1bc272d05ec206fa326329722fa1e /net | |
parent | 9e49fe4d6e17a38e8168effee181f001d2d22a5a (diff) | |
parent | 6dcdd884e2a4bb57b0ed3654ff28974ae17d2a08 (diff) |
Merge branch 'Avoid-local_irq_save-and-use-napi_alloc_frag-where-possible'
Sebastian Andrzej says:
====================
Avoid local_irq_save() and use napi_alloc_frag() where possible
The first two patches remove local_irq_save() around
`netdev_alloc_cache' which does not work on -RT. Besides helping -RT it
whould benefit the users of the function since they can avoid disabling
interrupts and save a few cycles.
The remaining patches are from a time when I tried to remove
`netdev_alloc_cache' but then noticed that we still have non-NAPI
drivers using netdev_alloc_skb() and I dropped that idea. Using
napi_alloc_frag() over netdev_alloc_frag() would skip the not required
local_bh_disable() around the allocation.
v1…v2:
- 1/7 + 2/7 use now "(in_irq() || irqs_disabled())" instead just
"irqs_disabled()" to align with __dev_kfree_skb_any(). Pointed out
by Eric Dumazet.
- 6/7 has a typo less. Pointed out by Sergei Shtylyov.
- 3/7 + 4/7 added acks from Ioana Radulescu.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/hwbm.c | 15 | ||||
-rw-r--r-- | net/core/skbuff.c | 68 |
2 files changed, 41 insertions, 42 deletions
diff --git a/net/core/hwbm.c b/net/core/hwbm.c index fd822ca5a245..ac1a66df9adc 100644 --- a/net/core/hwbm.c +++ b/net/core/hwbm.c @@ -43,34 +43,33 @@ int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) } EXPORT_SYMBOL_GPL(hwbm_pool_refill); -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num) { int err, i; - unsigned long flags; - spin_lock_irqsave(&bm_pool->lock, flags); + mutex_lock(&bm_pool->buf_lock); if (bm_pool->buf_num == bm_pool->size) { pr_warn("pool already filled\n"); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return bm_pool->buf_num; } if (buf_num + bm_pool->buf_num > bm_pool->size) { pr_warn("cannot allocate %d buffers for pool\n", buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return 0; } if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { pr_warn("Adding %d buffers to the %d current buffers will overflow\n", buf_num, bm_pool->buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return 0; } for (i = 0; i < buf_num; i++) { - err = hwbm_pool_refill(bm_pool, gfp); + err = hwbm_pool_refill(bm_pool, GFP_KERNEL); if (err < 0) break; } @@ -79,7 +78,7 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) bm_pool->buf_num += i; pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num); - spin_unlock_irqrestore(&bm_pool->lock, flags); + mutex_unlock(&bm_pool->buf_lock); return i; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 23c9bf8fc322..bab9484f1631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -366,19 +366,21 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); -static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) +static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { - struct page_frag_cache *nc; - unsigned long flags; - void *data; + struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - local_irq_save(flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, fragsz, gfp_mask); - local_irq_restore(flags); - return data; + return page_frag_alloc(&nc->page, fragsz, gfp_mask); } +void *napi_alloc_frag(unsigned int fragsz) +{ + fragsz = SKB_DATA_ALIGN(fragsz); + + return __napi_alloc_frag(fragsz, GFP_ATOMIC); +} +EXPORT_SYMBOL(napi_alloc_frag); + /** * netdev_alloc_frag - allocate a page fragment * @fragsz: fragment size @@ -388,26 +390,21 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) */ void *netdev_alloc_frag(unsigned int fragsz) { - fragsz = SKB_DATA_ALIGN(fragsz); - - return __netdev_alloc_frag(fragsz, GFP_ATOMIC); -} -EXPORT_SYMBOL(netdev_alloc_frag); - -static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) -{ - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); - - return page_frag_alloc(&nc->page, fragsz, gfp_mask); -} + struct page_frag_cache *nc; + void *data; -void *napi_alloc_frag(unsigned int fragsz) -{ fragsz = SKB_DATA_ALIGN(fragsz); - - return __napi_alloc_frag(fragsz, GFP_ATOMIC); + if (in_irq() || irqs_disabled()) { + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); + } else { + local_bh_disable(); + data = __napi_alloc_frag(fragsz, GFP_ATOMIC); + local_bh_enable(); + } + return data; } -EXPORT_SYMBOL(napi_alloc_frag); +EXPORT_SYMBOL(netdev_alloc_frag); /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device @@ -426,7 +423,6 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) { struct page_frag_cache *nc; - unsigned long flags; struct sk_buff *skb; bool pfmemalloc; void *data; @@ -447,13 +443,17 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - local_irq_save(flags); - - nc = this_cpu_ptr(&netdev_alloc_cache); - data = page_frag_alloc(nc, len, gfp_mask); - pfmemalloc = nc->pfmemalloc; - - local_irq_restore(flags); + if (in_irq() || irqs_disabled()) { + nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + } else { + local_bh_disable(); + nc = this_cpu_ptr(&napi_alloc_cache.page); + data = page_frag_alloc(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + local_bh_enable(); + } if (unlikely(!data)) return NULL; |