summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2021-04-18 22:44:16 +0000
committerDennis Zhou <dennis@kernel.org>2021-04-21 18:17:40 +0000
commit1c29a3ceaf5f02919e0a89119a70382581453dbb (patch)
treef0b01139bf50b0f8535e0563424ba5d46ffe562d /mm
parent8ea2e1e35d1eb4c76290ff5d565a1bfd6c24f117 (diff)
percpu: use pcpu_free_slot instead of pcpu_nr_slots - 1
This prepares for adding a to_depopulate list and sidelined list after the free slot in the set of lists in pcpu_slot. Signed-off-by: Dennis Zhou <dennis@kernel.org> Acked-by: Roman Gushchin <guro@fb.com> Signed-off-by: Dennis Zhou <dennis@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 5edc7bd88133..d462222f4adc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -135,6 +135,7 @@ static int pcpu_unit_size __ro_after_init;
static int pcpu_nr_units __ro_after_init;
static int pcpu_atom_size __ro_after_init;
int pcpu_nr_slots __ro_after_init;
+int pcpu_free_slot __ro_after_init;
static size_t pcpu_chunk_struct_size __ro_after_init;
/* cpus with the lowest and highest unit addresses */
@@ -237,7 +238,7 @@ static int __pcpu_size_to_slot(int size)
static int pcpu_size_to_slot(int size)
{
if (size == pcpu_unit_size)
- return pcpu_nr_slots - 1;
+ return pcpu_free_slot;
return __pcpu_size_to_slot(size);
}
@@ -1806,7 +1807,7 @@ restart:
goto fail;
}
- if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+ if (list_empty(&pcpu_slot[pcpu_free_slot])) {
chunk = pcpu_create_chunk(type, pcpu_gfp);
if (!chunk) {
err = "failed to allocate new chunk";
@@ -1958,7 +1959,7 @@ static void pcpu_balance_free(enum pcpu_chunk_type type)
{
LIST_HEAD(to_free);
struct list_head *pcpu_slot = pcpu_chunk_list(type);
- struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
+ struct list_head *free_head = &pcpu_slot[pcpu_free_slot];
struct pcpu_chunk *chunk, *next;
/*
@@ -2033,7 +2034,7 @@ retry_pop:
0, PCPU_EMPTY_POP_PAGES_HIGH);
}
- for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
+ for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
unsigned int nr_unpop = 0, rs, re;
if (!nr_to_pop)
@@ -2140,7 +2141,7 @@ void free_percpu(void __percpu *ptr)
if (chunk->free_bytes == pcpu_unit_size) {
struct pcpu_chunk *pos;
- list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+ list_for_each_entry(pos, &pcpu_slot[pcpu_free_slot], list)
if (pos != chunk) {
need_balance = true;
break;
@@ -2562,7 +2563,8 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* Allocate chunk slots. The additional last slot is for
* empty chunks.
*/
- pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
+ pcpu_free_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
+ pcpu_nr_slots = pcpu_free_slot + 1;
pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
sizeof(pcpu_chunk_lists[0]) *
PCPU_NR_CHUNK_TYPES,