summaryrefslogtreecommitdiff
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5ce1ed02f7e8..dc8023060456 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -292,7 +292,7 @@ void vmalloc_sync_all(void)
return;
for (address = VMALLOC_START & PMD_MASK;
- address >= TASK_SIZE && address < FIXADDR_TOP;
+ address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
address += PMD_SIZE) {
struct page *page;
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address)
* happen within a race in page table update. In the later
* case just flush:
*/
- pgd = pgd_offset(current->active_mm, address);
+ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
@@ -737,7 +737,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* In this case we need to make sure we're not recursively
* faulting through the emulate_vsyscall() logic.
*/
- if (current_thread_info()->sig_on_uaccess_error && signal) {
+ if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
tsk->thread.cr2 = address;
@@ -854,8 +854,13 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
- /* Kernel addresses are always protection faults: */
- if (address >= TASK_SIZE)
+
+ /*
+ * To avoid leaking information about the kernel page table
+ * layout, pretend that user-mode accesses to kernel addresses
+ * are always protection faults.
+ */
+ if (address >= TASK_SIZE_MAX)
error_code |= PF_PROT;
if (likely(show_unhandled_signals))
@@ -1348,7 +1353,7 @@ good_area:
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
/*