summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c65
1 files changed, 54 insertions, 11 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index c194ec913c68..ad509b3a89ee 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -929,12 +929,12 @@ int folio_referenced(struct folio *folio, int is_locked,
return pra.referenced;
}
-static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
- unsigned long address, void *arg)
+static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
{
- DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+ int cleaned = 0;
+ struct vm_area_struct *vma = pvmw->vma;
struct mmu_notifier_range range;
- int *cleaned = arg;
+ unsigned long address = pvmw->address;
/*
* We have to assume the worse case ie pmd for invalidation. Note that
@@ -942,16 +942,16 @@ static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
0, vma, vma->vm_mm, address,
- vma_address_end(&pvmw));
+ vma_address_end(pvmw));
mmu_notifier_invalidate_range_start(&range);
- while (page_vma_mapped_walk(&pvmw)) {
+ while (page_vma_mapped_walk(pvmw)) {
int ret = 0;
- address = pvmw.address;
- if (pvmw.pte) {
+ address = pvmw->address;
+ if (pvmw->pte) {
pte_t entry;
- pte_t *pte = pvmw.pte;
+ pte_t *pte = pvmw->pte;
if (!pte_dirty(*pte) && !pte_write(*pte))
continue;
@@ -964,7 +964,7 @@ static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
ret = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pmd_t *pmd = pvmw.pmd;
+ pmd_t *pmd = pvmw->pmd;
pmd_t entry;
if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
@@ -991,11 +991,22 @@ static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
* See Documentation/vm/mmu_notifier.rst
*/
if (ret)
- (*cleaned)++;
+ cleaned++;
}
mmu_notifier_invalidate_range_end(&range);
+ return cleaned;
+}
+
+static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
+ unsigned long address, void *arg)
+{
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
+ int *cleaned = arg;
+
+ *cleaned += page_vma_mkclean_one(&pvmw);
+
return true;
}
@@ -1033,6 +1044,38 @@ int folio_mkclean(struct folio *folio)
EXPORT_SYMBOL_GPL(folio_mkclean);
/**
+ * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
+ * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff)
+ * within the @vma of shared mappings. And since clean PTEs
+ * should also be readonly, write protects them too.
+ * @pfn: start pfn.
+ * @nr_pages: number of physically contiguous pages srarting with @pfn.
+ * @pgoff: page offset that the @pfn mapped with.
+ * @vma: vma that @pfn mapped within.
+ *
+ * Returns the number of cleaned PTEs (including PMDs).
+ */
+int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
+ struct vm_area_struct *vma)
+{
+ struct page_vma_mapped_walk pvmw = {
+ .pfn = pfn,
+ .nr_pages = nr_pages,
+ .pgoff = pgoff,
+ .vma = vma,
+ .flags = PVMW_SYNC,
+ };
+
+ if (invalid_mkclean_vma(vma, NULL))
+ return 0;
+
+ pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
+ VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
+
+ return page_vma_mkclean_one(&pvmw);
+}
+
+/**
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to