| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * This kernel test validates architecture page table helpers and |
| 4 | * accessors and helps in verifying their continued compliance with |
| 5 | * expected generic MM semantics. |
| 6 | * |
| 7 | * Copyright (C) 2019 ARM Ltd. |
| 8 | * |
| 9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> |
| 10 | */ |
| 11 | #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ |
| 12 | |
| 13 | #include <linux/gfp.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/hugetlb.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/kconfig.h> |
| 18 | #include <linux/memblock.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/mman.h> |
| 21 | #include <linux/mm_types.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/pfn_t.h> |
| 24 | #include <linux/printk.h> |
| 25 | #include <linux/pgtable.h> |
| 26 | #include <linux/random.h> |
| 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/swap.h> |
| 29 | #include <linux/swapops.h> |
| 30 | #include <linux/start_kernel.h> |
| 31 | #include <linux/sched/mm.h> |
| 32 | #include <linux/io.h> |
| 33 | #include <linux/vmalloc.h> |
| 34 | |
| 35 | #include <asm/cacheflush.h> |
| 36 | #include <asm/pgalloc.h> |
| 37 | #include <asm/tlbflush.h> |
| 38 | |
| 39 | /* |
| 40 | * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics |
| 41 | * expectations that are being validated here. All future changes in here |
| 42 | * or the documentation need to be in sync. |
| 43 | */ |
| 44 | #define RANDOM_NZVALUE GENMASK(7, 0) |
| 45 | |
| 46 | struct pgtable_debug_args { |
| 47 | struct mm_struct *mm; |
| 48 | struct vm_area_struct *vma; |
| 49 | |
| 50 | pgd_t *pgdp; |
| 51 | p4d_t *p4dp; |
| 52 | pud_t *pudp; |
| 53 | pmd_t *pmdp; |
| 54 | pte_t *ptep; |
| 55 | |
| 56 | p4d_t *start_p4dp; |
| 57 | pud_t *start_pudp; |
| 58 | pmd_t *start_pmdp; |
| 59 | pgtable_t start_ptep; |
| 60 | |
| 61 | unsigned long vaddr; |
| 62 | pgprot_t page_prot; |
| 63 | pgprot_t page_prot_none; |
| 64 | |
| 65 | bool is_contiguous_page; |
| 66 | unsigned long pud_pfn; |
| 67 | unsigned long pmd_pfn; |
| 68 | unsigned long pte_pfn; |
| 69 | |
| 70 | unsigned long fixed_alignment; |
| 71 | unsigned long fixed_pgd_pfn; |
| 72 | unsigned long fixed_p4d_pfn; |
| 73 | unsigned long fixed_pud_pfn; |
| 74 | unsigned long fixed_pmd_pfn; |
| 75 | unsigned long fixed_pte_pfn; |
| 76 | }; |
| 77 | |
| 78 | static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) |
| 79 | { |
| 80 | pgprot_t prot = vm_get_page_prot(vm_flags: idx); |
| 81 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: prot); |
| 82 | unsigned long val = idx, *ptr = &val; |
| 83 | |
| 84 | pr_debug("Validating PTE basic (%pGv)\n" , ptr); |
| 85 | |
| 86 | /* |
| 87 | * This test needs to be executed after the given page table entry |
| 88 | * is created with pfn_pte() to make sure that vm_get_page_prot(idx) |
| 89 | * does not have the dirty bit enabled from the beginning. This is |
| 90 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 91 | * dirty bit being set. |
| 92 | */ |
| 93 | WARN_ON(pte_dirty(pte_wrprotect(pte))); |
| 94 | |
| 95 | WARN_ON(!pte_same(pte, pte)); |
| 96 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); |
| 97 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); |
| 98 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma))); |
| 99 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); |
| 100 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); |
| 101 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma)))); |
| 102 | WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); |
| 103 | WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); |
| 104 | } |
| 105 | |
| 106 | static void __init pte_advanced_tests(struct pgtable_debug_args *args) |
| 107 | { |
| 108 | struct page *page; |
| 109 | pte_t pte; |
| 110 | |
| 111 | /* |
| 112 | * Architectures optimize set_pte_at by avoiding TLB flush. |
| 113 | * This requires set_pte_at to be not used to update an |
| 114 | * existing pte entry. Clear pte before we do set_pte_at |
| 115 | * |
| 116 | * flush_dcache_page() is called after set_pte_at() to clear |
| 117 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared |
| 118 | * when it's released and page allocation check will fail when |
| 119 | * the page is allocated again. For architectures other than ARM64, |
| 120 | * the unexpected overhead of cache flushing is acceptable. |
| 121 | */ |
| 122 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
| 123 | if (!page) |
| 124 | return; |
| 125 | |
| 126 | pr_debug("Validating PTE advanced\n" ); |
| 127 | if (WARN_ON(!args->ptep)) |
| 128 | return; |
| 129 | |
| 130 | pte = pfn_pte(page_nr: args->pte_pfn, pgprot: args->page_prot); |
| 131 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 132 | flush_dcache_page(page); |
| 133 | ptep_set_wrprotect(mm: args->mm, addr: args->vaddr, ptep: args->ptep); |
| 134 | pte = ptep_get(ptep: args->ptep); |
| 135 | WARN_ON(pte_write(pte)); |
| 136 | ptep_get_and_clear(mm: args->mm, addr: args->vaddr, ptep: args->ptep); |
| 137 | pte = ptep_get(ptep: args->ptep); |
| 138 | WARN_ON(!pte_none(pte)); |
| 139 | |
| 140 | pte = pfn_pte(page_nr: args->pte_pfn, pgprot: args->page_prot); |
| 141 | pte = pte_wrprotect(pte); |
| 142 | pte = pte_mkclean(pte); |
| 143 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 144 | flush_dcache_page(page); |
| 145 | pte = pte_mkwrite(pte, vma: args->vma); |
| 146 | pte = pte_mkdirty(pte); |
| 147 | ptep_set_access_flags(vma: args->vma, address: args->vaddr, ptep: args->ptep, entry: pte, dirty: 1); |
| 148 | pte = ptep_get(ptep: args->ptep); |
| 149 | WARN_ON(!(pte_write(pte) && pte_dirty(pte))); |
| 150 | ptep_get_and_clear_full(mm: args->mm, addr: args->vaddr, ptep: args->ptep, full: 1); |
| 151 | pte = ptep_get(ptep: args->ptep); |
| 152 | WARN_ON(!pte_none(pte)); |
| 153 | |
| 154 | pte = pfn_pte(page_nr: args->pte_pfn, pgprot: args->page_prot); |
| 155 | pte = pte_mkyoung(pte); |
| 156 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 157 | flush_dcache_page(page); |
| 158 | ptep_test_and_clear_young(vma: args->vma, addr: args->vaddr, ptep: args->ptep); |
| 159 | pte = ptep_get(ptep: args->ptep); |
| 160 | WARN_ON(pte_young(pte)); |
| 161 | |
| 162 | ptep_get_and_clear_full(mm: args->mm, addr: args->vaddr, ptep: args->ptep, full: 1); |
| 163 | } |
| 164 | |
| 165 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 166 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) |
| 167 | { |
| 168 | pgprot_t prot = vm_get_page_prot(vm_flags: idx); |
| 169 | unsigned long val = idx, *ptr = &val; |
| 170 | pmd_t pmd; |
| 171 | |
| 172 | if (!has_transparent_hugepage()) |
| 173 | return; |
| 174 | |
| 175 | pr_debug("Validating PMD basic (%pGv)\n" , ptr); |
| 176 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: prot); |
| 177 | |
| 178 | /* |
| 179 | * This test needs to be executed after the given page table entry |
| 180 | * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) |
| 181 | * does not have the dirty bit enabled from the beginning. This is |
| 182 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 183 | * dirty bit being set. |
| 184 | */ |
| 185 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); |
| 186 | |
| 187 | |
| 188 | WARN_ON(!pmd_same(pmd, pmd)); |
| 189 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); |
| 190 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); |
| 191 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma))); |
| 192 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); |
| 193 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); |
| 194 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma)))); |
| 195 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); |
| 196 | WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); |
| 197 | /* |
| 198 | * A huge page does not point to next level page table |
| 199 | * entry. Hence this must qualify as pmd_bad(). |
| 200 | */ |
| 201 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); |
| 202 | } |
| 203 | |
| 204 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) |
| 205 | { |
| 206 | struct page *page; |
| 207 | pmd_t pmd; |
| 208 | unsigned long vaddr = args->vaddr; |
| 209 | |
| 210 | if (!has_transparent_hugepage()) |
| 211 | return; |
| 212 | |
| 213 | page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; |
| 214 | if (!page) |
| 215 | return; |
| 216 | |
| 217 | /* |
| 218 | * flush_dcache_page() is called after set_pmd_at() to clear |
| 219 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared |
| 220 | * when it's released and page allocation check will fail when |
| 221 | * the page is allocated again. For architectures other than ARM64, |
| 222 | * the unexpected overhead of cache flushing is acceptable. |
| 223 | */ |
| 224 | pr_debug("Validating PMD advanced\n" ); |
| 225 | /* Align the address wrt HPAGE_PMD_SIZE */ |
| 226 | vaddr &= HPAGE_PMD_MASK; |
| 227 | |
| 228 | pgtable_trans_huge_deposit(mm: args->mm, pmdp: args->pmdp, pgtable: args->start_ptep); |
| 229 | |
| 230 | pmd = pfn_pmd(page_nr: args->pmd_pfn, pgprot: args->page_prot); |
| 231 | set_pmd_at(mm: args->mm, addr: vaddr, pmdp: args->pmdp, pmd); |
| 232 | flush_dcache_page(page); |
| 233 | pmdp_set_wrprotect(mm: args->mm, addr: vaddr, pmdp: args->pmdp); |
| 234 | pmd = pmdp_get(pmdp: args->pmdp); |
| 235 | WARN_ON(pmd_write(pmd)); |
| 236 | pmdp_huge_get_and_clear(mm: args->mm, addr: vaddr, pmdp: args->pmdp); |
| 237 | pmd = pmdp_get(pmdp: args->pmdp); |
| 238 | WARN_ON(!pmd_none(pmd)); |
| 239 | |
| 240 | pmd = pfn_pmd(page_nr: args->pmd_pfn, pgprot: args->page_prot); |
| 241 | pmd = pmd_wrprotect(pmd); |
| 242 | pmd = pmd_mkclean(pmd); |
| 243 | set_pmd_at(mm: args->mm, addr: vaddr, pmdp: args->pmdp, pmd); |
| 244 | flush_dcache_page(page); |
| 245 | pmd = pmd_mkwrite(pmd, vma: args->vma); |
| 246 | pmd = pmd_mkdirty(pmd); |
| 247 | pmdp_set_access_flags(vma: args->vma, address: vaddr, pmdp: args->pmdp, entry: pmd, dirty: 1); |
| 248 | pmd = pmdp_get(pmdp: args->pmdp); |
| 249 | WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); |
| 250 | pmdp_huge_get_and_clear_full(vma: args->vma, address: vaddr, pmdp: args->pmdp, full: 1); |
| 251 | pmd = pmdp_get(pmdp: args->pmdp); |
| 252 | WARN_ON(!pmd_none(pmd)); |
| 253 | |
| 254 | pmd = pmd_mkhuge(pmd: pfn_pmd(page_nr: args->pmd_pfn, pgprot: args->page_prot)); |
| 255 | pmd = pmd_mkyoung(pmd); |
| 256 | set_pmd_at(mm: args->mm, addr: vaddr, pmdp: args->pmdp, pmd); |
| 257 | flush_dcache_page(page); |
| 258 | pmdp_test_and_clear_young(vma: args->vma, addr: vaddr, pmdp: args->pmdp); |
| 259 | pmd = pmdp_get(pmdp: args->pmdp); |
| 260 | WARN_ON(pmd_young(pmd)); |
| 261 | |
| 262 | /* Clear the pte entries */ |
| 263 | pmdp_huge_get_and_clear(mm: args->mm, addr: vaddr, pmdp: args->pmdp); |
| 264 | pgtable_trans_huge_withdraw(mm: args->mm, pmdp: args->pmdp); |
| 265 | } |
| 266 | |
| 267 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) |
| 268 | { |
| 269 | pmd_t pmd; |
| 270 | |
| 271 | if (!has_transparent_hugepage()) |
| 272 | return; |
| 273 | |
| 274 | pr_debug("Validating PMD leaf\n" ); |
| 275 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 276 | |
| 277 | /* |
| 278 | * PMD based THP is a leaf entry. |
| 279 | */ |
| 280 | pmd = pmd_mkhuge(pmd); |
| 281 | WARN_ON(!pmd_leaf(pmd)); |
| 282 | } |
| 283 | |
| 284 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 285 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) |
| 286 | { |
| 287 | pgprot_t prot = vm_get_page_prot(vm_flags: idx); |
| 288 | unsigned long val = idx, *ptr = &val; |
| 289 | pud_t pud; |
| 290 | |
| 291 | if (!has_transparent_pud_hugepage()) |
| 292 | return; |
| 293 | |
| 294 | pr_debug("Validating PUD basic (%pGv)\n" , ptr); |
| 295 | pud = pfn_pud(page_nr: args->fixed_pud_pfn, pgprot: prot); |
| 296 | |
| 297 | /* |
| 298 | * This test needs to be executed after the given page table entry |
| 299 | * is created with pfn_pud() to make sure that vm_get_page_prot(idx) |
| 300 | * does not have the dirty bit enabled from the beginning. This is |
| 301 | * important for platforms like arm64 where (!PTE_RDONLY) indicate |
| 302 | * dirty bit being set. |
| 303 | */ |
| 304 | WARN_ON(pud_dirty(pud_wrprotect(pud))); |
| 305 | |
| 306 | WARN_ON(!pud_same(pud, pud)); |
| 307 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); |
| 308 | WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); |
| 309 | WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); |
| 310 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); |
| 311 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); |
| 312 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); |
| 313 | WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); |
| 314 | WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); |
| 315 | |
| 316 | if (mm_pmd_folded(args->mm)) |
| 317 | return; |
| 318 | |
| 319 | /* |
| 320 | * A huge page does not point to next level page table |
| 321 | * entry. Hence this must qualify as pud_bad(). |
| 322 | */ |
| 323 | WARN_ON(!pud_bad(pud_mkhuge(pud))); |
| 324 | } |
| 325 | |
| 326 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) |
| 327 | { |
| 328 | struct page *page; |
| 329 | unsigned long vaddr = args->vaddr; |
| 330 | pud_t pud; |
| 331 | |
| 332 | if (!has_transparent_pud_hugepage()) |
| 333 | return; |
| 334 | |
| 335 | page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; |
| 336 | if (!page) |
| 337 | return; |
| 338 | |
| 339 | /* |
| 340 | * flush_dcache_page() is called after set_pud_at() to clear |
| 341 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared |
| 342 | * when it's released and page allocation check will fail when |
| 343 | * the page is allocated again. For architectures other than ARM64, |
| 344 | * the unexpected overhead of cache flushing is acceptable. |
| 345 | */ |
| 346 | pr_debug("Validating PUD advanced\n" ); |
| 347 | /* Align the address wrt HPAGE_PUD_SIZE */ |
| 348 | vaddr &= HPAGE_PUD_MASK; |
| 349 | |
| 350 | pud = pfn_pud(page_nr: args->pud_pfn, pgprot: args->page_prot); |
| 351 | /* |
| 352 | * Some architectures have debug checks to make sure |
| 353 | * huge pud mapping are only found with devmap entries |
| 354 | * For now test with only devmap entries. |
| 355 | */ |
| 356 | pud = pud_mkdevmap(pud); |
| 357 | set_pud_at(mm: args->mm, addr: vaddr, pudp: args->pudp, pud); |
| 358 | flush_dcache_page(page); |
| 359 | pudp_set_wrprotect(mm: args->mm, address: vaddr, pudp: args->pudp); |
| 360 | pud = pudp_get(pudp: args->pudp); |
| 361 | WARN_ON(pud_write(pud)); |
| 362 | |
| 363 | #ifndef __PAGETABLE_PMD_FOLDED |
| 364 | pudp_huge_get_and_clear(mm: args->mm, addr: vaddr, pudp: args->pudp); |
| 365 | pud = pudp_get(pudp: args->pudp); |
| 366 | WARN_ON(!pud_none(pud)); |
| 367 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 368 | pud = pfn_pud(page_nr: args->pud_pfn, pgprot: args->page_prot); |
| 369 | pud = pud_mkdevmap(pud); |
| 370 | pud = pud_wrprotect(pud); |
| 371 | pud = pud_mkclean(pud); |
| 372 | set_pud_at(mm: args->mm, addr: vaddr, pudp: args->pudp, pud); |
| 373 | flush_dcache_page(page); |
| 374 | pud = pud_mkwrite(pud); |
| 375 | pud = pud_mkdirty(pud); |
| 376 | pudp_set_access_flags(vma: args->vma, address: vaddr, pudp: args->pudp, entry: pud, dirty: 1); |
| 377 | pud = pudp_get(pudp: args->pudp); |
| 378 | WARN_ON(!(pud_write(pud) && pud_dirty(pud))); |
| 379 | |
| 380 | #ifndef __PAGETABLE_PMD_FOLDED |
| 381 | pudp_huge_get_and_clear_full(vma: args->vma, address: vaddr, pudp: args->pudp, full: 1); |
| 382 | pud = pudp_get(pudp: args->pudp); |
| 383 | WARN_ON(!pud_none(pud)); |
| 384 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 385 | |
| 386 | pud = pfn_pud(page_nr: args->pud_pfn, pgprot: args->page_prot); |
| 387 | pud = pud_mkdevmap(pud); |
| 388 | pud = pud_mkyoung(pud); |
| 389 | set_pud_at(mm: args->mm, addr: vaddr, pudp: args->pudp, pud); |
| 390 | flush_dcache_page(page); |
| 391 | pudp_test_and_clear_young(vma: args->vma, addr: vaddr, pudp: args->pudp); |
| 392 | pud = pudp_get(pudp: args->pudp); |
| 393 | WARN_ON(pud_young(pud)); |
| 394 | |
| 395 | pudp_huge_get_and_clear(mm: args->mm, addr: vaddr, pudp: args->pudp); |
| 396 | } |
| 397 | |
| 398 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) |
| 399 | { |
| 400 | pud_t pud; |
| 401 | |
| 402 | if (!has_transparent_pud_hugepage()) |
| 403 | return; |
| 404 | |
| 405 | pr_debug("Validating PUD leaf\n" ); |
| 406 | pud = pfn_pud(page_nr: args->fixed_pud_pfn, pgprot: args->page_prot); |
| 407 | /* |
| 408 | * PUD based THP is a leaf entry. |
| 409 | */ |
| 410 | pud = pud_mkhuge(pud); |
| 411 | WARN_ON(!pud_leaf(pud)); |
| 412 | } |
| 413 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 414 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
| 415 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } |
| 416 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
| 417 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 418 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
| 419 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } |
| 420 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
| 421 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } |
| 422 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } |
| 423 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } |
| 424 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
| 425 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 426 | |
| 427 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
| 428 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) |
| 429 | { |
| 430 | pmd_t pmd; |
| 431 | |
| 432 | if (!arch_vmap_pmd_supported(prot: args->page_prot) || |
| 433 | args->fixed_alignment < PMD_SIZE) |
| 434 | return; |
| 435 | |
| 436 | pr_debug("Validating PMD huge\n" ); |
| 437 | /* |
| 438 | * X86 defined pmd_set_huge() verifies that the given |
| 439 | * PMD is not a populated non-leaf entry. |
| 440 | */ |
| 441 | WRITE_ONCE(*args->pmdp, __pmd(0)); |
| 442 | WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); |
| 443 | WARN_ON(!pmd_clear_huge(args->pmdp)); |
| 444 | pmd = pmdp_get(pmdp: args->pmdp); |
| 445 | WARN_ON(!pmd_none(pmd)); |
| 446 | } |
| 447 | |
| 448 | static void __init pud_huge_tests(struct pgtable_debug_args *args) |
| 449 | { |
| 450 | pud_t pud; |
| 451 | |
| 452 | if (!arch_vmap_pud_supported(prot: args->page_prot) || |
| 453 | args->fixed_alignment < PUD_SIZE) |
| 454 | return; |
| 455 | |
| 456 | pr_debug("Validating PUD huge\n" ); |
| 457 | /* |
| 458 | * X86 defined pud_set_huge() verifies that the given |
| 459 | * PUD is not a populated non-leaf entry. |
| 460 | */ |
| 461 | WRITE_ONCE(*args->pudp, __pud(0)); |
| 462 | WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); |
| 463 | WARN_ON(!pud_clear_huge(args->pudp)); |
| 464 | pud = pudp_get(pudp: args->pudp); |
| 465 | WARN_ON(!pud_none(pud)); |
| 466 | } |
| 467 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 468 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } |
| 469 | static void __init pud_huge_tests(struct pgtable_debug_args *args) { } |
| 470 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 471 | |
| 472 | static void __init p4d_basic_tests(struct pgtable_debug_args *args) |
| 473 | { |
| 474 | p4d_t p4d; |
| 475 | |
| 476 | pr_debug("Validating P4D basic\n" ); |
| 477 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); |
| 478 | WARN_ON(!p4d_same(p4d, p4d)); |
| 479 | } |
| 480 | |
| 481 | static void __init pgd_basic_tests(struct pgtable_debug_args *args) |
| 482 | { |
| 483 | pgd_t pgd; |
| 484 | |
| 485 | pr_debug("Validating PGD basic\n" ); |
| 486 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); |
| 487 | WARN_ON(!pgd_same(pgd, pgd)); |
| 488 | } |
| 489 | |
| 490 | #ifndef __PAGETABLE_PUD_FOLDED |
| 491 | static void __init pud_clear_tests(struct pgtable_debug_args *args) |
| 492 | { |
| 493 | pud_t pud = pudp_get(pudp: args->pudp); |
| 494 | |
| 495 | if (mm_pmd_folded(args->mm)) |
| 496 | return; |
| 497 | |
| 498 | pr_debug("Validating PUD clear\n" ); |
| 499 | WARN_ON(pud_none(pud)); |
| 500 | pud_clear(pudp: args->pudp); |
| 501 | pud = pudp_get(pudp: args->pudp); |
| 502 | WARN_ON(!pud_none(pud)); |
| 503 | } |
| 504 | |
| 505 | static void __init pud_populate_tests(struct pgtable_debug_args *args) |
| 506 | { |
| 507 | pud_t pud; |
| 508 | |
| 509 | if (mm_pmd_folded(args->mm)) |
| 510 | return; |
| 511 | |
| 512 | pr_debug("Validating PUD populate\n" ); |
| 513 | /* |
| 514 | * This entry points to next level page table page. |
| 515 | * Hence this must not qualify as pud_bad(). |
| 516 | */ |
| 517 | pud_populate(mm: args->mm, pud: args->pudp, pmd: args->start_pmdp); |
| 518 | pud = pudp_get(pudp: args->pudp); |
| 519 | WARN_ON(pud_bad(pud)); |
| 520 | } |
| 521 | #else /* !__PAGETABLE_PUD_FOLDED */ |
| 522 | static void __init pud_clear_tests(struct pgtable_debug_args *args) { } |
| 523 | static void __init pud_populate_tests(struct pgtable_debug_args *args) { } |
| 524 | #endif /* PAGETABLE_PUD_FOLDED */ |
| 525 | |
| 526 | #ifndef __PAGETABLE_P4D_FOLDED |
| 527 | static void __init p4d_clear_tests(struct pgtable_debug_args *args) |
| 528 | { |
| 529 | p4d_t p4d = p4dp_get(p4dp: args->p4dp); |
| 530 | |
| 531 | if (mm_pud_folded(args->mm)) |
| 532 | return; |
| 533 | |
| 534 | pr_debug("Validating P4D clear\n" ); |
| 535 | WARN_ON(p4d_none(p4d)); |
| 536 | p4d_clear(p4dp: args->p4dp); |
| 537 | p4d = p4dp_get(p4dp: args->p4dp); |
| 538 | WARN_ON(!p4d_none(p4d)); |
| 539 | } |
| 540 | |
| 541 | static void __init p4d_populate_tests(struct pgtable_debug_args *args) |
| 542 | { |
| 543 | p4d_t p4d; |
| 544 | |
| 545 | if (mm_pud_folded(args->mm)) |
| 546 | return; |
| 547 | |
| 548 | pr_debug("Validating P4D populate\n" ); |
| 549 | /* |
| 550 | * This entry points to next level page table page. |
| 551 | * Hence this must not qualify as p4d_bad(). |
| 552 | */ |
| 553 | pud_clear(pudp: args->pudp); |
| 554 | p4d_clear(p4dp: args->p4dp); |
| 555 | p4d_populate(mm: args->mm, p4d: args->p4dp, pud: args->start_pudp); |
| 556 | p4d = p4dp_get(p4dp: args->p4dp); |
| 557 | WARN_ON(p4d_bad(p4d)); |
| 558 | } |
| 559 | |
| 560 | static void __init pgd_clear_tests(struct pgtable_debug_args *args) |
| 561 | { |
| 562 | pgd_t pgd = pgdp_get(pgdp: args->pgdp); |
| 563 | |
| 564 | if (mm_p4d_folded(mm: args->mm)) |
| 565 | return; |
| 566 | |
| 567 | pr_debug("Validating PGD clear\n" ); |
| 568 | WARN_ON(pgd_none(pgd)); |
| 569 | pgd_clear(args->pgdp); |
| 570 | pgd = pgdp_get(pgdp: args->pgdp); |
| 571 | WARN_ON(!pgd_none(pgd)); |
| 572 | } |
| 573 | |
| 574 | static void __init pgd_populate_tests(struct pgtable_debug_args *args) |
| 575 | { |
| 576 | pgd_t pgd; |
| 577 | |
| 578 | if (mm_p4d_folded(mm: args->mm)) |
| 579 | return; |
| 580 | |
| 581 | pr_debug("Validating PGD populate\n" ); |
| 582 | /* |
| 583 | * This entry points to next level page table page. |
| 584 | * Hence this must not qualify as pgd_bad(). |
| 585 | */ |
| 586 | p4d_clear(p4dp: args->p4dp); |
| 587 | pgd_clear(args->pgdp); |
| 588 | pgd_populate(mm: args->mm, pgd: args->pgdp, p4d: args->start_p4dp); |
| 589 | pgd = pgdp_get(pgdp: args->pgdp); |
| 590 | WARN_ON(pgd_bad(pgd)); |
| 591 | } |
| 592 | #else /* !__PAGETABLE_P4D_FOLDED */ |
| 593 | static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } |
| 594 | static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } |
| 595 | static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } |
| 596 | static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } |
| 597 | #endif /* PAGETABLE_P4D_FOLDED */ |
| 598 | |
| 599 | static void __init pte_clear_tests(struct pgtable_debug_args *args) |
| 600 | { |
| 601 | struct page *page; |
| 602 | pte_t pte = pfn_pte(page_nr: args->pte_pfn, pgprot: args->page_prot); |
| 603 | |
| 604 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
| 605 | if (!page) |
| 606 | return; |
| 607 | |
| 608 | /* |
| 609 | * flush_dcache_page() is called after set_pte_at() to clear |
| 610 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared |
| 611 | * when it's released and page allocation check will fail when |
| 612 | * the page is allocated again. For architectures other than ARM64, |
| 613 | * the unexpected overhead of cache flushing is acceptable. |
| 614 | */ |
| 615 | pr_debug("Validating PTE clear\n" ); |
| 616 | if (WARN_ON(!args->ptep)) |
| 617 | return; |
| 618 | |
| 619 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
| 620 | WARN_ON(pte_none(pte)); |
| 621 | flush_dcache_page(page); |
| 622 | barrier(); |
| 623 | ptep_clear(mm: args->mm, addr: args->vaddr, ptep: args->ptep); |
| 624 | pte = ptep_get(ptep: args->ptep); |
| 625 | WARN_ON(!pte_none(pte)); |
| 626 | } |
| 627 | |
| 628 | static void __init pmd_clear_tests(struct pgtable_debug_args *args) |
| 629 | { |
| 630 | pmd_t pmd = pmdp_get(pmdp: args->pmdp); |
| 631 | |
| 632 | pr_debug("Validating PMD clear\n" ); |
| 633 | WARN_ON(pmd_none(pmd)); |
| 634 | pmd_clear(pmdp: args->pmdp); |
| 635 | pmd = pmdp_get(pmdp: args->pmdp); |
| 636 | WARN_ON(!pmd_none(pmd)); |
| 637 | } |
| 638 | |
| 639 | static void __init pmd_populate_tests(struct pgtable_debug_args *args) |
| 640 | { |
| 641 | pmd_t pmd; |
| 642 | |
| 643 | pr_debug("Validating PMD populate\n" ); |
| 644 | /* |
| 645 | * This entry points to next level page table page. |
| 646 | * Hence this must not qualify as pmd_bad(). |
| 647 | */ |
| 648 | pmd_populate(mm: args->mm, pmd: args->pmdp, pte: args->start_ptep); |
| 649 | pmd = pmdp_get(pmdp: args->pmdp); |
| 650 | WARN_ON(pmd_bad(pmd)); |
| 651 | } |
| 652 | |
| 653 | static void __init pte_special_tests(struct pgtable_debug_args *args) |
| 654 | { |
| 655 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot); |
| 656 | |
| 657 | if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) |
| 658 | return; |
| 659 | |
| 660 | pr_debug("Validating PTE special\n" ); |
| 661 | WARN_ON(!pte_special(pte_mkspecial(pte))); |
| 662 | } |
| 663 | |
| 664 | static void __init pte_protnone_tests(struct pgtable_debug_args *args) |
| 665 | { |
| 666 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot_none); |
| 667 | |
| 668 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 669 | return; |
| 670 | |
| 671 | pr_debug("Validating PTE protnone\n" ); |
| 672 | WARN_ON(!pte_protnone(pte)); |
| 673 | WARN_ON(!pte_present(pte)); |
| 674 | } |
| 675 | |
| 676 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 677 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) |
| 678 | { |
| 679 | pmd_t pmd; |
| 680 | |
| 681 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) |
| 682 | return; |
| 683 | |
| 684 | if (!has_transparent_hugepage()) |
| 685 | return; |
| 686 | |
| 687 | pr_debug("Validating PMD protnone\n" ); |
| 688 | pmd = pmd_mkhuge(pmd: pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot_none)); |
| 689 | WARN_ON(!pmd_protnone(pmd)); |
| 690 | WARN_ON(!pmd_present(pmd)); |
| 691 | } |
| 692 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
| 693 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } |
| 694 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 695 | |
| 696 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP |
| 697 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) |
| 698 | { |
| 699 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot); |
| 700 | |
| 701 | pr_debug("Validating PTE devmap\n" ); |
| 702 | WARN_ON(!pte_devmap(pte_mkdevmap(pte))); |
| 703 | } |
| 704 | |
| 705 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 706 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) |
| 707 | { |
| 708 | pmd_t pmd; |
| 709 | |
| 710 | if (!has_transparent_hugepage()) |
| 711 | return; |
| 712 | |
| 713 | pr_debug("Validating PMD devmap\n" ); |
| 714 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 715 | WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); |
| 716 | } |
| 717 | |
| 718 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 719 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) |
| 720 | { |
| 721 | pud_t pud; |
| 722 | |
| 723 | if (!has_transparent_pud_hugepage()) |
| 724 | return; |
| 725 | |
| 726 | pr_debug("Validating PUD devmap\n" ); |
| 727 | pud = pfn_pud(page_nr: args->fixed_pud_pfn, pgprot: args->page_prot); |
| 728 | WARN_ON(!pud_devmap(pud_mkdevmap(pud))); |
| 729 | } |
| 730 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 731 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
| 732 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 733 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 734 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
| 735 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
| 736 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 737 | #else |
| 738 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } |
| 739 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
| 740 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
| 741 | #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ |
| 742 | |
| 743 | static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) |
| 744 | { |
| 745 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot); |
| 746 | |
| 747 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 748 | return; |
| 749 | |
| 750 | pr_debug("Validating PTE soft dirty\n" ); |
| 751 | WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); |
| 752 | WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); |
| 753 | } |
| 754 | |
| 755 | static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
| 756 | { |
| 757 | pte_t pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot); |
| 758 | |
| 759 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 760 | return; |
| 761 | |
| 762 | pr_debug("Validating PTE swap soft dirty\n" ); |
| 763 | WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); |
| 764 | WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); |
| 765 | } |
| 766 | |
| 767 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 768 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) |
| 769 | { |
| 770 | pmd_t pmd; |
| 771 | |
| 772 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) |
| 773 | return; |
| 774 | |
| 775 | if (!has_transparent_hugepage()) |
| 776 | return; |
| 777 | |
| 778 | pr_debug("Validating PMD soft dirty\n" ); |
| 779 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 780 | WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); |
| 781 | WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); |
| 782 | } |
| 783 | |
| 784 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
| 785 | { |
| 786 | pmd_t pmd; |
| 787 | |
| 788 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || |
| 789 | !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) |
| 790 | return; |
| 791 | |
| 792 | if (!has_transparent_hugepage()) |
| 793 | return; |
| 794 | |
| 795 | pr_debug("Validating PMD swap soft dirty\n" ); |
| 796 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 797 | WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); |
| 798 | WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); |
| 799 | } |
| 800 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
| 801 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } |
| 802 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } |
| 803 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 804 | |
| 805 | static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) |
| 806 | { |
| 807 | unsigned long max_swap_offset; |
| 808 | swp_entry_t entry, entry2; |
| 809 | pte_t pte; |
| 810 | |
| 811 | pr_debug("Validating PTE swap exclusive\n" ); |
| 812 | |
| 813 | /* See generic_max_swapfile_size(): probe the maximum offset */ |
| 814 | max_swap_offset = swp_offset(entry: pte_to_swp_entry(pte: swp_entry_to_pte(entry: swp_entry(type: 0, offset: ~0UL)))); |
| 815 | |
| 816 | /* Create a swp entry with all possible bits set */ |
| 817 | entry = swp_entry(type: (1 << MAX_SWAPFILES_SHIFT) - 1, offset: max_swap_offset); |
| 818 | |
| 819 | pte = swp_entry_to_pte(entry); |
| 820 | WARN_ON(pte_swp_exclusive(pte)); |
| 821 | WARN_ON(!is_swap_pte(pte)); |
| 822 | entry2 = pte_to_swp_entry(pte); |
| 823 | WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); |
| 824 | |
| 825 | pte = pte_swp_mkexclusive(pte); |
| 826 | WARN_ON(!pte_swp_exclusive(pte)); |
| 827 | WARN_ON(!is_swap_pte(pte)); |
| 828 | WARN_ON(pte_swp_soft_dirty(pte)); |
| 829 | entry2 = pte_to_swp_entry(pte); |
| 830 | WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); |
| 831 | |
| 832 | pte = pte_swp_clear_exclusive(pte); |
| 833 | WARN_ON(pte_swp_exclusive(pte)); |
| 834 | WARN_ON(!is_swap_pte(pte)); |
| 835 | entry2 = pte_to_swp_entry(pte); |
| 836 | WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); |
| 837 | } |
| 838 | |
| 839 | static void __init pte_swap_tests(struct pgtable_debug_args *args) |
| 840 | { |
| 841 | swp_entry_t swp; |
| 842 | pte_t pte; |
| 843 | |
| 844 | pr_debug("Validating PTE swap\n" ); |
| 845 | pte = pfn_pte(page_nr: args->fixed_pte_pfn, pgprot: args->page_prot); |
| 846 | swp = __pte_to_swp_entry(pte); |
| 847 | pte = __swp_entry_to_pte(swp); |
| 848 | WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); |
| 849 | } |
| 850 | |
| 851 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 852 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) |
| 853 | { |
| 854 | swp_entry_t swp; |
| 855 | pmd_t pmd; |
| 856 | |
| 857 | if (!has_transparent_hugepage()) |
| 858 | return; |
| 859 | |
| 860 | pr_debug("Validating PMD swap\n" ); |
| 861 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 862 | swp = __pmd_to_swp_entry(pmd); |
| 863 | pmd = __swp_entry_to_pmd(swp); |
| 864 | WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); |
| 865 | } |
| 866 | #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
| 867 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } |
| 868 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
| 869 | |
| 870 | static void __init swap_migration_tests(struct pgtable_debug_args *args) |
| 871 | { |
| 872 | struct page *page; |
| 873 | swp_entry_t swp; |
| 874 | |
| 875 | if (!IS_ENABLED(CONFIG_MIGRATION)) |
| 876 | return; |
| 877 | |
| 878 | /* |
| 879 | * swap_migration_tests() requires a dedicated page as it needs to |
| 880 | * be locked before creating a migration entry from it. Locking the |
| 881 | * page that actually maps kernel text ('start_kernel') can be real |
| 882 | * problematic. Lets use the allocated page explicitly for this |
| 883 | * purpose. |
| 884 | */ |
| 885 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
| 886 | if (!page) |
| 887 | return; |
| 888 | |
| 889 | pr_debug("Validating swap migration\n" ); |
| 890 | |
| 891 | /* |
| 892 | * make_[readable|writable]_migration_entry() expects given page to |
| 893 | * be locked, otherwise it stumbles upon a BUG_ON(). |
| 894 | */ |
| 895 | __SetPageLocked(page); |
| 896 | swp = make_writable_migration_entry(page_to_pfn(page)); |
| 897 | WARN_ON(!is_migration_entry(swp)); |
| 898 | WARN_ON(!is_writable_migration_entry(swp)); |
| 899 | |
| 900 | swp = make_readable_migration_entry(offset: swp_offset(entry: swp)); |
| 901 | WARN_ON(!is_migration_entry(swp)); |
| 902 | WARN_ON(is_writable_migration_entry(swp)); |
| 903 | |
| 904 | swp = make_readable_migration_entry(page_to_pfn(page)); |
| 905 | WARN_ON(!is_migration_entry(swp)); |
| 906 | WARN_ON(is_writable_migration_entry(swp)); |
| 907 | __ClearPageLocked(page); |
| 908 | } |
| 909 | |
| 910 | #ifdef CONFIG_HUGETLB_PAGE |
| 911 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) |
| 912 | { |
| 913 | pte_t pte; |
| 914 | |
| 915 | pr_debug("Validating HugeTLB basic\n" ); |
| 916 | pte = pfn_pte(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 917 | pte = arch_make_huge_pte(entry: pte, PMD_SHIFT, VM_ACCESS_FLAGS); |
| 918 | |
| 919 | #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB |
| 920 | WARN_ON(!pte_huge(pte)); |
| 921 | #endif |
| 922 | WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); |
| 923 | WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); |
| 924 | WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); |
| 925 | } |
| 926 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 927 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } |
| 928 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 929 | |
| 930 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 931 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) |
| 932 | { |
| 933 | pmd_t pmd; |
| 934 | |
| 935 | if (!has_transparent_hugepage()) |
| 936 | return; |
| 937 | |
| 938 | pr_debug("Validating PMD based THP\n" ); |
| 939 | /* |
| 940 | * pmd_trans_huge() and pmd_present() must return positive after |
| 941 | * MMU invalidation with pmd_mkinvalid(). This behavior is an |
| 942 | * optimization for transparent huge page. pmd_trans_huge() must |
| 943 | * be true if pmd_page() returns a valid THP to avoid taking the |
| 944 | * pmd_lock when others walk over non transhuge pmds (i.e. there |
| 945 | * are no THP allocated). Especially when splitting a THP and |
| 946 | * removing the present bit from the pmd, pmd_trans_huge() still |
| 947 | * needs to return true. pmd_present() should be true whenever |
| 948 | * pmd_trans_huge() returns true. |
| 949 | */ |
| 950 | pmd = pfn_pmd(page_nr: args->fixed_pmd_pfn, pgprot: args->page_prot); |
| 951 | WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); |
| 952 | |
| 953 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE |
| 954 | WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); |
| 955 | WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); |
| 956 | WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd)))); |
| 957 | #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ |
| 958 | } |
| 959 | |
| 960 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 961 | static void __init pud_thp_tests(struct pgtable_debug_args *args) |
| 962 | { |
| 963 | pud_t pud; |
| 964 | |
| 965 | if (!has_transparent_pud_hugepage()) |
| 966 | return; |
| 967 | |
| 968 | pr_debug("Validating PUD based THP\n" ); |
| 969 | pud = pfn_pud(page_nr: args->fixed_pud_pfn, pgprot: args->page_prot); |
| 970 | WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); |
| 971 | |
| 972 | /* |
| 973 | * pud_mkinvalid() has been dropped for now. Enable back |
| 974 | * these tests when it comes back with a modified pud_present(). |
| 975 | * |
| 976 | * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); |
| 977 | * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); |
| 978 | */ |
| 979 | } |
| 980 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 981 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
| 982 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
| 983 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
| 984 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } |
| 985 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
| 986 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 987 | |
| 988 | static unsigned long __init get_random_vaddr(void) |
| 989 | { |
| 990 | unsigned long random_vaddr, random_pages, total_user_pages; |
| 991 | |
| 992 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; |
| 993 | |
| 994 | random_pages = get_random_long() % total_user_pages; |
| 995 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; |
| 996 | |
| 997 | return random_vaddr; |
| 998 | } |
| 999 | |
| 1000 | static void __init destroy_args(struct pgtable_debug_args *args) |
| 1001 | { |
| 1002 | struct page *page = NULL; |
| 1003 | |
| 1004 | /* Free (huge) page */ |
| 1005 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1006 | has_transparent_pud_hugepage() && |
| 1007 | args->pud_pfn != ULONG_MAX) { |
| 1008 | if (args->is_contiguous_page) { |
| 1009 | free_contig_range(pfn: args->pud_pfn, |
| 1010 | nr_pages: (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); |
| 1011 | } else { |
| 1012 | page = pfn_to_page(args->pud_pfn); |
| 1013 | __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); |
| 1014 | } |
| 1015 | |
| 1016 | args->pud_pfn = ULONG_MAX; |
| 1017 | args->pmd_pfn = ULONG_MAX; |
| 1018 | args->pte_pfn = ULONG_MAX; |
| 1019 | } |
| 1020 | |
| 1021 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1022 | has_transparent_hugepage() && |
| 1023 | args->pmd_pfn != ULONG_MAX) { |
| 1024 | if (args->is_contiguous_page) { |
| 1025 | free_contig_range(pfn: args->pmd_pfn, nr_pages: (1 << HPAGE_PMD_ORDER)); |
| 1026 | } else { |
| 1027 | page = pfn_to_page(args->pmd_pfn); |
| 1028 | __free_pages(page, HPAGE_PMD_ORDER); |
| 1029 | } |
| 1030 | |
| 1031 | args->pmd_pfn = ULONG_MAX; |
| 1032 | args->pte_pfn = ULONG_MAX; |
| 1033 | } |
| 1034 | |
| 1035 | if (args->pte_pfn != ULONG_MAX) { |
| 1036 | page = pfn_to_page(args->pte_pfn); |
| 1037 | __free_page(page); |
| 1038 | |
| 1039 | args->pte_pfn = ULONG_MAX; |
| 1040 | } |
| 1041 | |
| 1042 | /* Free page table entries */ |
| 1043 | if (args->start_ptep) { |
| 1044 | pte_free(mm: args->mm, pte_page: args->start_ptep); |
| 1045 | mm_dec_nr_ptes(mm: args->mm); |
| 1046 | } |
| 1047 | |
| 1048 | if (args->start_pmdp) { |
| 1049 | pmd_free(mm: args->mm, pmd: args->start_pmdp); |
| 1050 | mm_dec_nr_pmds(mm: args->mm); |
| 1051 | } |
| 1052 | |
| 1053 | if (args->start_pudp) { |
| 1054 | pud_free(mm: args->mm, pud: args->start_pudp); |
| 1055 | mm_dec_nr_puds(mm: args->mm); |
| 1056 | } |
| 1057 | |
| 1058 | if (args->start_p4dp) |
| 1059 | p4d_free(mm: args->mm, p4d: args->start_p4dp); |
| 1060 | |
| 1061 | /* Free vma and mm struct */ |
| 1062 | if (args->vma) |
| 1063 | vm_area_free(args->vma); |
| 1064 | |
| 1065 | if (args->mm) |
| 1066 | mmdrop(mm: args->mm); |
| 1067 | } |
| 1068 | |
| 1069 | static struct page * __init |
| 1070 | debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) |
| 1071 | { |
| 1072 | struct page *page = NULL; |
| 1073 | |
| 1074 | #ifdef CONFIG_CONTIG_ALLOC |
| 1075 | if (order > MAX_PAGE_ORDER) { |
| 1076 | page = alloc_contig_pages((1 << order), GFP_KERNEL, |
| 1077 | first_online_node, NULL); |
| 1078 | if (page) { |
| 1079 | args->is_contiguous_page = true; |
| 1080 | return page; |
| 1081 | } |
| 1082 | } |
| 1083 | #endif |
| 1084 | |
| 1085 | if (order <= MAX_PAGE_ORDER) |
| 1086 | page = alloc_pages(GFP_KERNEL, order); |
| 1087 | |
| 1088 | return page; |
| 1089 | } |
| 1090 | |
| 1091 | /* |
| 1092 | * Check if a physical memory range described by <pstart, pend> contains |
| 1093 | * an area that is of size psize, and aligned to psize. |
| 1094 | * |
| 1095 | * Don't use address 0, an all-zeroes physical address might mask bugs, and |
| 1096 | * it's not used on x86. |
| 1097 | */ |
| 1098 | static void __init phys_align_check(phys_addr_t pstart, |
| 1099 | phys_addr_t pend, unsigned long psize, |
| 1100 | phys_addr_t *physp, unsigned long *alignp) |
| 1101 | { |
| 1102 | phys_addr_t aligned_start, aligned_end; |
| 1103 | |
| 1104 | if (pstart == 0) |
| 1105 | pstart = PAGE_SIZE; |
| 1106 | |
| 1107 | aligned_start = ALIGN(pstart, psize); |
| 1108 | aligned_end = aligned_start + psize; |
| 1109 | |
| 1110 | if (aligned_end > aligned_start && aligned_end <= pend) { |
| 1111 | *alignp = psize; |
| 1112 | *physp = aligned_start; |
| 1113 | } |
| 1114 | } |
| 1115 | |
| 1116 | static void __init init_fixed_pfns(struct pgtable_debug_args *args) |
| 1117 | { |
| 1118 | u64 idx; |
| 1119 | phys_addr_t phys, pstart, pend; |
| 1120 | |
| 1121 | /* |
| 1122 | * Initialize the fixed pfns. To do this, try to find a |
| 1123 | * valid physical range, preferably aligned to PUD_SIZE, |
| 1124 | * but settling for aligned to PMD_SIZE as a fallback. If |
| 1125 | * neither of those is found, use the physical address of |
| 1126 | * the start_kernel symbol. |
| 1127 | * |
| 1128 | * The memory doesn't need to be allocated, it just needs to exist |
| 1129 | * as usable memory. It won't be touched. |
| 1130 | * |
| 1131 | * The alignment is recorded, and can be checked to see if we |
| 1132 | * can run the tests that require an actual valid physical |
| 1133 | * address range on some architectures ({pmd,pud}_huge_test |
| 1134 | * on x86). |
| 1135 | */ |
| 1136 | |
| 1137 | phys = __pa_symbol(&start_kernel); |
| 1138 | args->fixed_alignment = PAGE_SIZE; |
| 1139 | |
| 1140 | for_each_mem_range(idx, &pstart, &pend) { |
| 1141 | /* First check for a PUD-aligned area */ |
| 1142 | phys_align_check(pstart, pend, PUD_SIZE, physp: &phys, |
| 1143 | alignp: &args->fixed_alignment); |
| 1144 | |
| 1145 | /* If a PUD-aligned area is found, we're done */ |
| 1146 | if (args->fixed_alignment == PUD_SIZE) |
| 1147 | break; |
| 1148 | |
| 1149 | /* |
| 1150 | * If no PMD-aligned area found yet, check for one, |
| 1151 | * but continue the loop to look for a PUD-aligned area. |
| 1152 | */ |
| 1153 | if (args->fixed_alignment < PMD_SIZE) |
| 1154 | phys_align_check(pstart, pend, PMD_SIZE, physp: &phys, |
| 1155 | alignp: &args->fixed_alignment); |
| 1156 | } |
| 1157 | |
| 1158 | args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); |
| 1159 | args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); |
| 1160 | args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); |
| 1161 | args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); |
| 1162 | args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); |
| 1163 | WARN_ON(!pfn_valid(args->fixed_pte_pfn)); |
| 1164 | } |
| 1165 | |
| 1166 | |
| 1167 | static int __init init_args(struct pgtable_debug_args *args) |
| 1168 | { |
| 1169 | struct page *page = NULL; |
| 1170 | int ret = 0; |
| 1171 | |
| 1172 | /* |
| 1173 | * Initialize the debugging data. |
| 1174 | * |
| 1175 | * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) |
| 1176 | * will help create page table entries with PROT_NONE permission as |
| 1177 | * required for pxx_protnone_tests(). |
| 1178 | */ |
| 1179 | memset(args, 0, sizeof(*args)); |
| 1180 | args->vaddr = get_random_vaddr(); |
| 1181 | args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); |
| 1182 | args->page_prot_none = vm_get_page_prot(VM_NONE); |
| 1183 | args->is_contiguous_page = false; |
| 1184 | args->pud_pfn = ULONG_MAX; |
| 1185 | args->pmd_pfn = ULONG_MAX; |
| 1186 | args->pte_pfn = ULONG_MAX; |
| 1187 | args->fixed_pgd_pfn = ULONG_MAX; |
| 1188 | args->fixed_p4d_pfn = ULONG_MAX; |
| 1189 | args->fixed_pud_pfn = ULONG_MAX; |
| 1190 | args->fixed_pmd_pfn = ULONG_MAX; |
| 1191 | args->fixed_pte_pfn = ULONG_MAX; |
| 1192 | |
| 1193 | /* Allocate mm and vma */ |
| 1194 | args->mm = mm_alloc(); |
| 1195 | if (!args->mm) { |
| 1196 | pr_err("Failed to allocate mm struct\n" ); |
| 1197 | ret = -ENOMEM; |
| 1198 | goto error; |
| 1199 | } |
| 1200 | |
| 1201 | args->vma = vm_area_alloc(args->mm); |
| 1202 | if (!args->vma) { |
| 1203 | pr_err("Failed to allocate vma\n" ); |
| 1204 | ret = -ENOMEM; |
| 1205 | goto error; |
| 1206 | } |
| 1207 | |
| 1208 | /* |
| 1209 | * Allocate page table entries. They will be modified in the tests. |
| 1210 | * Lets save the page table entries so that they can be released |
| 1211 | * when the tests are completed. |
| 1212 | */ |
| 1213 | args->pgdp = pgd_offset(args->mm, args->vaddr); |
| 1214 | args->p4dp = p4d_alloc(mm: args->mm, pgd: args->pgdp, address: args->vaddr); |
| 1215 | if (!args->p4dp) { |
| 1216 | pr_err("Failed to allocate p4d entries\n" ); |
| 1217 | ret = -ENOMEM; |
| 1218 | goto error; |
| 1219 | } |
| 1220 | args->start_p4dp = p4d_offset(pgd: args->pgdp, address: 0UL); |
| 1221 | WARN_ON(!args->start_p4dp); |
| 1222 | |
| 1223 | args->pudp = pud_alloc(mm: args->mm, p4d: args->p4dp, address: args->vaddr); |
| 1224 | if (!args->pudp) { |
| 1225 | pr_err("Failed to allocate pud entries\n" ); |
| 1226 | ret = -ENOMEM; |
| 1227 | goto error; |
| 1228 | } |
| 1229 | args->start_pudp = pud_offset(p4d: args->p4dp, address: 0UL); |
| 1230 | WARN_ON(!args->start_pudp); |
| 1231 | |
| 1232 | args->pmdp = pmd_alloc(mm: args->mm, pud: args->pudp, address: args->vaddr); |
| 1233 | if (!args->pmdp) { |
| 1234 | pr_err("Failed to allocate pmd entries\n" ); |
| 1235 | ret = -ENOMEM; |
| 1236 | goto error; |
| 1237 | } |
| 1238 | args->start_pmdp = pmd_offset(pud: args->pudp, address: 0UL); |
| 1239 | WARN_ON(!args->start_pmdp); |
| 1240 | |
| 1241 | if (pte_alloc(args->mm, args->pmdp)) { |
| 1242 | pr_err("Failed to allocate pte entries\n" ); |
| 1243 | ret = -ENOMEM; |
| 1244 | goto error; |
| 1245 | } |
| 1246 | args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp)); |
| 1247 | WARN_ON(!args->start_ptep); |
| 1248 | |
| 1249 | init_fixed_pfns(args); |
| 1250 | |
| 1251 | /* |
| 1252 | * Allocate (huge) pages because some of the tests need to access |
| 1253 | * the data in the pages. The corresponding tests will be skipped |
| 1254 | * if we fail to allocate (huge) pages. |
| 1255 | */ |
| 1256 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1257 | has_transparent_pud_hugepage()) { |
| 1258 | page = debug_vm_pgtable_alloc_huge_page(args, |
| 1259 | HPAGE_PUD_SHIFT - PAGE_SHIFT); |
| 1260 | if (page) { |
| 1261 | args->pud_pfn = page_to_pfn(page); |
| 1262 | args->pmd_pfn = args->pud_pfn; |
| 1263 | args->pte_pfn = args->pud_pfn; |
| 1264 | return 0; |
| 1265 | } |
| 1266 | } |
| 1267 | |
| 1268 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
| 1269 | has_transparent_hugepage()) { |
| 1270 | page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); |
| 1271 | if (page) { |
| 1272 | args->pmd_pfn = page_to_pfn(page); |
| 1273 | args->pte_pfn = args->pmd_pfn; |
| 1274 | return 0; |
| 1275 | } |
| 1276 | } |
| 1277 | |
| 1278 | page = alloc_page(GFP_KERNEL); |
| 1279 | if (page) |
| 1280 | args->pte_pfn = page_to_pfn(page); |
| 1281 | |
| 1282 | return 0; |
| 1283 | |
| 1284 | error: |
| 1285 | destroy_args(args); |
| 1286 | return ret; |
| 1287 | } |
| 1288 | |
| 1289 | static int __init debug_vm_pgtable(void) |
| 1290 | { |
| 1291 | struct pgtable_debug_args args; |
| 1292 | spinlock_t *ptl = NULL; |
| 1293 | int idx, ret; |
| 1294 | |
| 1295 | pr_info("Validating architecture page table helpers\n" ); |
| 1296 | ret = init_args(args: &args); |
| 1297 | if (ret) |
| 1298 | return ret; |
| 1299 | |
| 1300 | /* |
| 1301 | * Iterate over each possible vm_flags to make sure that all |
| 1302 | * the basic page table transformation validations just hold |
| 1303 | * true irrespective of the starting protection value for a |
| 1304 | * given page table entry. |
| 1305 | * |
| 1306 | * Protection based vm_flags combinations are always linear |
| 1307 | * and increasing i.e starting from VM_NONE and going up to |
| 1308 | * (VM_SHARED | READ | WRITE | EXEC). |
| 1309 | */ |
| 1310 | #define VM_FLAGS_START (VM_NONE) |
| 1311 | #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) |
| 1312 | |
| 1313 | for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { |
| 1314 | pte_basic_tests(args: &args, idx); |
| 1315 | pmd_basic_tests(args: &args, idx); |
| 1316 | pud_basic_tests(args: &args, idx); |
| 1317 | } |
| 1318 | |
| 1319 | /* |
| 1320 | * Both P4D and PGD level tests are very basic which do not |
| 1321 | * involve creating page table entries from the protection |
| 1322 | * value and the given pfn. Hence just keep them out from |
| 1323 | * the above iteration for now to save some test execution |
| 1324 | * time. |
| 1325 | */ |
| 1326 | p4d_basic_tests(args: &args); |
| 1327 | pgd_basic_tests(args: &args); |
| 1328 | |
| 1329 | pmd_leaf_tests(args: &args); |
| 1330 | pud_leaf_tests(args: &args); |
| 1331 | |
| 1332 | pte_special_tests(args: &args); |
| 1333 | pte_protnone_tests(args: &args); |
| 1334 | pmd_protnone_tests(args: &args); |
| 1335 | |
| 1336 | pte_devmap_tests(args: &args); |
| 1337 | pmd_devmap_tests(args: &args); |
| 1338 | pud_devmap_tests(args: &args); |
| 1339 | |
| 1340 | pte_soft_dirty_tests(args: &args); |
| 1341 | pmd_soft_dirty_tests(args: &args); |
| 1342 | pte_swap_soft_dirty_tests(args: &args); |
| 1343 | pmd_swap_soft_dirty_tests(args: &args); |
| 1344 | |
| 1345 | pte_swap_exclusive_tests(args: &args); |
| 1346 | |
| 1347 | pte_swap_tests(args: &args); |
| 1348 | pmd_swap_tests(args: &args); |
| 1349 | |
| 1350 | swap_migration_tests(args: &args); |
| 1351 | |
| 1352 | pmd_thp_tests(args: &args); |
| 1353 | pud_thp_tests(args: &args); |
| 1354 | |
| 1355 | hugetlb_basic_tests(args: &args); |
| 1356 | |
| 1357 | /* |
| 1358 | * Page table modifying tests. They need to hold |
| 1359 | * proper page table lock. |
| 1360 | */ |
| 1361 | |
| 1362 | args.ptep = pte_offset_map_lock(mm: args.mm, pmd: args.pmdp, addr: args.vaddr, ptlp: &ptl); |
| 1363 | pte_clear_tests(args: &args); |
| 1364 | pte_advanced_tests(args: &args); |
| 1365 | if (args.ptep) |
| 1366 | pte_unmap_unlock(args.ptep, ptl); |
| 1367 | |
| 1368 | ptl = pmd_lock(mm: args.mm, pmd: args.pmdp); |
| 1369 | pmd_clear_tests(args: &args); |
| 1370 | pmd_advanced_tests(args: &args); |
| 1371 | pmd_huge_tests(args: &args); |
| 1372 | pmd_populate_tests(args: &args); |
| 1373 | spin_unlock(lock: ptl); |
| 1374 | |
| 1375 | ptl = pud_lock(mm: args.mm, pud: args.pudp); |
| 1376 | pud_clear_tests(args: &args); |
| 1377 | pud_advanced_tests(args: &args); |
| 1378 | pud_huge_tests(args: &args); |
| 1379 | pud_populate_tests(args: &args); |
| 1380 | spin_unlock(lock: ptl); |
| 1381 | |
| 1382 | spin_lock(lock: &(args.mm->page_table_lock)); |
| 1383 | p4d_clear_tests(args: &args); |
| 1384 | pgd_clear_tests(args: &args); |
| 1385 | p4d_populate_tests(args: &args); |
| 1386 | pgd_populate_tests(args: &args); |
| 1387 | spin_unlock(lock: &(args.mm->page_table_lock)); |
| 1388 | |
| 1389 | destroy_args(args: &args); |
| 1390 | return 0; |
| 1391 | } |
| 1392 | late_initcall(debug_vm_pgtable); |
| 1393 | |