diff options
author | Vineet Gupta <vgupta@kernel.org> | 2020-10-13 19:44:07 -0700 |
---|---|---|
committer | Vineet Gupta <vgupta@kernel.org> | 2021-08-26 13:43:19 -0700 |
commit | 56809a28d45fcad94b28cfd614600568c0d46545 (patch) | |
tree | 46fc37f97267e686c624ff230bd00a5b00fa1fae /arch/arc | |
parent | 8747ff704ac886f6ef992b1b7eadcf77d151fd3a (diff) |
ARC: mm: vmalloc sync from kernel to user table to update PMD ...
... not PGD
vmalloc() sets up the kernel page table (starting from @swapper_pg_dir).
But when vmalloc area is accessed in context of a user task, say opening
terminal in n_tty_open(), the user page tables need to be synced from
kernel page tables so that TLB entry is created in "user context".
The old code was doing this incorrectly, as it was updating the user pgd
entry (first level itself) to point to kernel pud table (2nd level),
effectively yanking away the entire user space translation with kernel one.
The correct way to do this is to ONLY update a user space pgd/pud/pmd entry
if it is not popluated already. This ensures that only the missing leaf
pmd entry gets updated to point to relevant kernel pte table.
From code change pov, we are chaging the pattern:
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d_k))
goto bad_area;
set_p4d(p4d, *p4d_k);
with
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (p4d_none(*p4d_k))
goto bad_area;
if (!p4d_present(*p4d))
set_p4d(p4d, *p4d_k);
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/mm/fault.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index f8994164fa36..5787c261c9a4 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -36,31 +36,31 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) pgd = pgd_offset(current->active_mm, address); pgd_k = pgd_offset_k(address); - if (!pgd_present(*pgd_k)) + if (pgd_none (*pgd_k)) goto bad_area; - - set_pgd(pgd, *pgd_k); + if (!pgd_present(*pgd)) + set_pgd(pgd, *pgd_k); p4d = p4d_offset(pgd, address); p4d_k = p4d_offset(pgd_k, address); - if (!p4d_present(*p4d_k)) + if (p4d_none(*p4d_k)) goto bad_area; - - set_p4d(p4d, *p4d_k); + if (!p4d_present(*p4d)) + set_p4d(p4d, *p4d_k); pud = pud_offset(p4d, address); pud_k = pud_offset(p4d_k, address); - if (!pud_present(*pud_k)) + if (pud_none(*pud_k)) goto bad_area; - - set_pud(pud, *pud_k); + if (!pud_present(*pud)) + set_pud(pud, *pud_k); pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) + if (pmd_none(*pmd_k)) goto bad_area; - - set_pmd(pmd, *pmd_k); + if (!pmd_present(*pmd)) + set_pmd(pmd, *pmd_k); /* XXX: create the TLB entry here */ return 0; |