summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-09-28 12:08:49 +1000
committerDave Airlie <airlied@redhat.com>2016-09-28 12:08:49 +1000
commitca09fb9f60b5f3ab2d57e761aaeea89a5147d784 (patch)
tree908e42ecf32d2601f4c5c340c6c4626841baa661 /lib/rhashtable.c
parent9f4ef05bcdcfdf911b056b471dd3c6a4f331b644 (diff)
parent08895a8b6b06ed2323cd97a36ee40a116b3db8ed (diff)
Merge tag 'v4.8-rc8' into drm-next
Linux 4.8-rc8 There was a lot of fallout in the imx/amdgpu/i915 drivers, so backmerge it now to avoid troubles. * tag 'v4.8-rc8': (1442 commits) Linux 4.8-rc8 fault_in_multipages_readable() throws set-but-unused error mm: check VMA flags to avoid invalid PROT_NONE NUMA balancing radix tree: fix sibling entry handling in radix_tree_descend() radix tree test suite: Test radix_tree_replace_slot() for multiorder entries fix memory leaks in tracing_buffers_splice_read() tracing: Move mutex to protect against resetting of seq data MIPS: Fix delay slot emulation count in debugfs MIPS: SMP: Fix possibility of deadlock when bringing CPUs online mm: delete unnecessary and unsafe init_tlb_ubc() huge tmpfs: fix Committed_AS leak shmem: fix tmpfs to handle the huge= option properly blk-mq: skip unmapped queues in blk_mq_alloc_request_hctx MIPS: Fix pre-r6 emulation FPU initialisation arm64: kgdb: handle read-only text / modules arm64: Call numa_store_cpu_info() earlier. locking/hung_task: Fix typo in CONFIG_DETECT_HUNG_TASK help text nvme-rdma: only clear queue flags after successful connect i2c: qup: skip qup_i2c_suspend if the device is already runtime suspended perf/core: Limit matching exclusive events to one PMU ...
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..56054e541a0f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -30,7 +30,7 @@
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
-#define BUCKET_LOCKS_PER_CPU 128UL
+#define BUCKET_LOCKS_PER_CPU 32UL
static u32 head_hashfn(struct rhashtable *ht,
const struct bucket_table *tbl,
@@ -70,21 +70,25 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
unsigned int nr_pcpus = num_possible_cpus();
#endif
- nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
/* Never allocate more than 0.5 locks per bucket */
size = min_t(unsigned int, size, tbl->size >> 1);
if (sizeof(spinlock_t) != 0) {
+ tbl->locks = NULL;
#ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE &&
gfp == GFP_KERNEL)
tbl->locks = vmalloc(size * sizeof(spinlock_t));
- else
#endif
- tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
- gfp);
+ if (gfp != GFP_KERNEL)
+ gfp |= __GFP_NOWARN | __GFP_NORETRY;
+
+ if (!tbl->locks)
+ tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
+ gfp);
if (!tbl->locks)
return -ENOMEM;
for (i = 0; i < size; i++)
@@ -321,12 +325,14 @@ static int rhashtable_expand(struct rhashtable *ht)
static int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
- unsigned int size;
+ unsigned int nelems = atomic_read(&ht->nelems);
+ unsigned int size = 0;
int err;
ASSERT_RHT_MUTEX(ht);
- size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+ if (nelems)
+ size = roundup_pow_of_two(nelems * 3 / 2);
if (size < ht->p.min_size)
size = ht->p.min_size;