summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kvm/mmu.c14
-rw-r--r--arch/mips/mm/fault.c10
-rw-r--r--arch/riscv/mm/pageattr.c4
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c4
-rw-r--r--drivers/vfio/pci/vfio_pci.c8
-rw-r--r--fs/proc/base.c6
-rw-r--r--lib/test_hmm.c14
8 files changed, 34 insertions, 34 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 290154e32c0b..8c0035cab6b6 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f8d62cd83b36..9ef2dd39111e 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -190,7 +190,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -198,7 +198,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -250,14 +250,14 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 51165f73ce7d..ec2c70f84994 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
- down_read(&init_mm.mmap_sem);
+ mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
- up_read(&init_mm.mmap_sem);
+ mmap_read_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 38c576495048..a6d484ea110b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned long pfn;
unsigned long paddr;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
paddr = pfn << PAGE_SHIFT;
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
if (!table) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return (ret != orig_pte);
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 492b76c29490..c9e7df0ea5a6 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
}
mutex_unlock(&bo->mutex);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)userptr);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n");
kfree(bo->page_obj);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 3fc198f3eeb5..b5f6ef2d12f6 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1422,17 +1422,17 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
mutex_unlock(&vdev->vma_lock);
if (try) {
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
mmput(mm);
return 0;
}
} else {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
if (mmget_still_valid(mm)) {
if (try) {
if (!mutex_trylock(&vdev->vma_lock)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
}
@@ -1454,7 +1454,7 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
}
mutex_unlock(&vdev->vma_lock);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f9c88e4bd837..4f0d6f40b8f1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2322,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
if (!mm)
goto out_put_task;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret) {
mmput(mm);
goto out_put_task;
@@ -2349,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
goto out_put_task;
}
@@ -2358,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p->end = vma->vm_end;
p->mode = vma->vm_file->f_mode;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
for (i = 0; i < nr_files; i++) {
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 5c1858e325ba..28528285942c 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror,
}
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
@@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (!mmget_not_zero(mm))
return -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start ||
@@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
dmirror_migrate_finalize_and_map(&args, dmirror);
migrate_vma_finalize(&args);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
/* Return the migrated data for verification. */
@@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
return ret;
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return ret;
}
@@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror,
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;