| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2002 Richard Henderson |
| 4 | * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. |
| 5 | * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> |
| 6 | * Copyright (C) 2024 Mike Rapoport IBM. |
| 7 | */ |
| 8 | |
| 9 | #define pr_fmt(fmt) "execmem: " fmt |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/execmem.h> |
| 15 | #include <linux/maple_tree.h> |
| 16 | #include <linux/set_memory.h> |
| 17 | #include <linux/moduleloader.h> |
| 18 | #include <linux/text-patching.h> |
| 19 | |
| 20 | #include <asm/tlbflush.h> |
| 21 | |
| 22 | #include "internal.h" |
| 23 | |
| 24 | static struct execmem_info *execmem_info __ro_after_init; |
| 25 | static struct execmem_info default_execmem_info __ro_after_init; |
| 26 | |
| 27 | #ifdef CONFIG_MMU |
| 28 | static void *execmem_vmalloc(struct execmem_range *range, size_t size, |
| 29 | pgprot_t pgprot, unsigned long vm_flags) |
| 30 | { |
| 31 | bool kasan = range->flags & EXECMEM_KASAN_SHADOW; |
| 32 | gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN; |
| 33 | unsigned int align = range->alignment; |
| 34 | unsigned long start = range->start; |
| 35 | unsigned long end = range->end; |
| 36 | void *p; |
| 37 | |
| 38 | if (kasan) |
| 39 | vm_flags |= VM_DEFER_KMEMLEAK; |
| 40 | |
| 41 | if (vm_flags & VM_ALLOW_HUGE_VMAP) |
| 42 | align = PMD_SIZE; |
| 43 | |
| 44 | p = __vmalloc_node_range(size, align, start, end, gfp_flags, |
| 45 | pgprot, vm_flags, NUMA_NO_NODE, |
| 46 | __builtin_return_address(0)); |
| 47 | if (!p && range->fallback_start) { |
| 48 | start = range->fallback_start; |
| 49 | end = range->fallback_end; |
| 50 | p = __vmalloc_node_range(size, align, start, end, gfp_flags, |
| 51 | pgprot, vm_flags, NUMA_NO_NODE, |
| 52 | __builtin_return_address(0)); |
| 53 | } |
| 54 | |
| 55 | if (!p) { |
| 56 | pr_warn_ratelimited("unable to allocate memory\n" ); |
| 57 | return NULL; |
| 58 | } |
| 59 | |
| 60 | if (kasan && (kasan_alloc_module_shadow(addr: p, size, GFP_KERNEL) < 0)) { |
| 61 | vfree(addr: p); |
| 62 | return NULL; |
| 63 | } |
| 64 | |
| 65 | return p; |
| 66 | } |
| 67 | |
| 68 | struct vm_struct *execmem_vmap(size_t size) |
| 69 | { |
| 70 | struct execmem_range *range = &execmem_info->ranges[EXECMEM_MODULE_DATA]; |
| 71 | struct vm_struct *area; |
| 72 | |
| 73 | area = __get_vm_area_node(size, align: range->alignment, PAGE_SHIFT, VM_ALLOC, |
| 74 | start: range->start, end: range->end, NUMA_NO_NODE, |
| 75 | GFP_KERNEL, caller: __builtin_return_address(0)); |
| 76 | if (!area && range->fallback_start) |
| 77 | area = __get_vm_area_node(size, align: range->alignment, PAGE_SHIFT, VM_ALLOC, |
| 78 | start: range->fallback_start, end: range->fallback_end, |
| 79 | NUMA_NO_NODE, GFP_KERNEL, caller: __builtin_return_address(0)); |
| 80 | |
| 81 | return area; |
| 82 | } |
| 83 | #else |
| 84 | static void *execmem_vmalloc(struct execmem_range *range, size_t size, |
| 85 | pgprot_t pgprot, unsigned long vm_flags) |
| 86 | { |
| 87 | return vmalloc(size); |
| 88 | } |
| 89 | #endif /* CONFIG_MMU */ |
| 90 | |
| 91 | #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX |
| 92 | struct execmem_cache { |
| 93 | struct mutex mutex; |
| 94 | struct maple_tree busy_areas; |
| 95 | struct maple_tree free_areas; |
| 96 | }; |
| 97 | |
| 98 | static struct execmem_cache execmem_cache = { |
| 99 | .mutex = __MUTEX_INITIALIZER(execmem_cache.mutex), |
| 100 | .busy_areas = MTREE_INIT_EXT(busy_areas, MT_FLAGS_LOCK_EXTERN, |
| 101 | execmem_cache.mutex), |
| 102 | .free_areas = MTREE_INIT_EXT(free_areas, MT_FLAGS_LOCK_EXTERN, |
| 103 | execmem_cache.mutex), |
| 104 | }; |
| 105 | |
| 106 | static inline unsigned long mas_range_len(struct ma_state *mas) |
| 107 | { |
| 108 | return mas->last - mas->index + 1; |
| 109 | } |
| 110 | |
| 111 | static int execmem_set_direct_map_valid(struct vm_struct *vm, bool valid) |
| 112 | { |
| 113 | unsigned int nr = (1 << get_vm_area_page_order(vm)); |
| 114 | unsigned int updated = 0; |
| 115 | int err = 0; |
| 116 | |
| 117 | for (int i = 0; i < vm->nr_pages; i += nr) { |
| 118 | err = set_direct_map_valid_noflush(page: vm->pages[i], nr, valid); |
| 119 | if (err) |
| 120 | goto err_restore; |
| 121 | updated += nr; |
| 122 | } |
| 123 | |
| 124 | return 0; |
| 125 | |
| 126 | err_restore: |
| 127 | for (int i = 0; i < updated; i += nr) |
| 128 | set_direct_map_valid_noflush(page: vm->pages[i], nr, valid: !valid); |
| 129 | |
| 130 | return err; |
| 131 | } |
| 132 | |
| 133 | static void execmem_cache_clean(struct work_struct *work) |
| 134 | { |
| 135 | struct maple_tree *free_areas = &execmem_cache.free_areas; |
| 136 | struct mutex *mutex = &execmem_cache.mutex; |
| 137 | MA_STATE(mas, free_areas, 0, ULONG_MAX); |
| 138 | void *area; |
| 139 | |
| 140 | mutex_lock(mutex); |
| 141 | mas_for_each(&mas, area, ULONG_MAX) { |
| 142 | size_t size = mas_range_len(mas: &mas); |
| 143 | |
| 144 | if (IS_ALIGNED(size, PMD_SIZE) && |
| 145 | IS_ALIGNED(mas.index, PMD_SIZE)) { |
| 146 | struct vm_struct *vm = find_vm_area(addr: area); |
| 147 | |
| 148 | execmem_set_direct_map_valid(vm, valid: true); |
| 149 | mas_store_gfp(mas: &mas, NULL, GFP_KERNEL); |
| 150 | vfree(addr: area); |
| 151 | } |
| 152 | } |
| 153 | mutex_unlock(lock: mutex); |
| 154 | } |
| 155 | |
| 156 | static DECLARE_WORK(execmem_cache_clean_work, execmem_cache_clean); |
| 157 | |
| 158 | static int execmem_cache_add(void *ptr, size_t size) |
| 159 | { |
| 160 | struct maple_tree *free_areas = &execmem_cache.free_areas; |
| 161 | struct mutex *mutex = &execmem_cache.mutex; |
| 162 | unsigned long addr = (unsigned long)ptr; |
| 163 | MA_STATE(mas, free_areas, addr - 1, addr + 1); |
| 164 | unsigned long lower, upper; |
| 165 | void *area = NULL; |
| 166 | int err; |
| 167 | |
| 168 | lower = addr; |
| 169 | upper = addr + size - 1; |
| 170 | |
| 171 | mutex_lock(mutex); |
| 172 | area = mas_walk(mas: &mas); |
| 173 | if (area && mas.last == addr - 1) |
| 174 | lower = mas.index; |
| 175 | |
| 176 | area = mas_next(mas: &mas, ULONG_MAX); |
| 177 | if (area && mas.index == addr + size) |
| 178 | upper = mas.last; |
| 179 | |
| 180 | mas_set_range(mas: &mas, start: lower, last: upper); |
| 181 | err = mas_store_gfp(mas: &mas, entry: (void *)lower, GFP_KERNEL); |
| 182 | mutex_unlock(lock: mutex); |
| 183 | if (err) |
| 184 | return err; |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | static bool within_range(struct execmem_range *range, struct ma_state *mas, |
| 190 | size_t size) |
| 191 | { |
| 192 | unsigned long addr = mas->index; |
| 193 | |
| 194 | if (addr >= range->start && addr + size < range->end) |
| 195 | return true; |
| 196 | |
| 197 | if (range->fallback_start && |
| 198 | addr >= range->fallback_start && addr + size < range->fallback_end) |
| 199 | return true; |
| 200 | |
| 201 | return false; |
| 202 | } |
| 203 | |
| 204 | static void *__execmem_cache_alloc(struct execmem_range *range, size_t size) |
| 205 | { |
| 206 | struct maple_tree *free_areas = &execmem_cache.free_areas; |
| 207 | struct maple_tree *busy_areas = &execmem_cache.busy_areas; |
| 208 | MA_STATE(mas_free, free_areas, 0, ULONG_MAX); |
| 209 | MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX); |
| 210 | struct mutex *mutex = &execmem_cache.mutex; |
| 211 | unsigned long addr, last, area_size = 0; |
| 212 | void *area, *ptr = NULL; |
| 213 | int err; |
| 214 | |
| 215 | mutex_lock(mutex); |
| 216 | mas_for_each(&mas_free, area, ULONG_MAX) { |
| 217 | area_size = mas_range_len(mas: &mas_free); |
| 218 | |
| 219 | if (area_size >= size && within_range(range, mas: &mas_free, size)) |
| 220 | break; |
| 221 | } |
| 222 | |
| 223 | if (area_size < size) |
| 224 | goto out_unlock; |
| 225 | |
| 226 | addr = mas_free.index; |
| 227 | last = mas_free.last; |
| 228 | |
| 229 | /* insert allocated size to busy_areas at range [addr, addr + size) */ |
| 230 | mas_set_range(mas: &mas_busy, start: addr, last: addr + size - 1); |
| 231 | err = mas_store_gfp(mas: &mas_busy, entry: (void *)addr, GFP_KERNEL); |
| 232 | if (err) |
| 233 | goto out_unlock; |
| 234 | |
| 235 | mas_store_gfp(mas: &mas_free, NULL, GFP_KERNEL); |
| 236 | if (area_size > size) { |
| 237 | void *ptr = (void *)(addr + size); |
| 238 | |
| 239 | /* |
| 240 | * re-insert remaining free size to free_areas at range |
| 241 | * [addr + size, last] |
| 242 | */ |
| 243 | mas_set_range(mas: &mas_free, start: addr + size, last); |
| 244 | err = mas_store_gfp(mas: &mas_free, entry: ptr, GFP_KERNEL); |
| 245 | if (err) { |
| 246 | mas_store_gfp(mas: &mas_busy, NULL, GFP_KERNEL); |
| 247 | goto out_unlock; |
| 248 | } |
| 249 | } |
| 250 | ptr = (void *)addr; |
| 251 | |
| 252 | out_unlock: |
| 253 | mutex_unlock(lock: mutex); |
| 254 | return ptr; |
| 255 | } |
| 256 | |
| 257 | static bool execmem_cache_rox = false; |
| 258 | |
| 259 | void execmem_cache_make_ro(void) |
| 260 | { |
| 261 | struct maple_tree *free_areas = &execmem_cache.free_areas; |
| 262 | struct maple_tree *busy_areas = &execmem_cache.busy_areas; |
| 263 | MA_STATE(mas_free, free_areas, 0, ULONG_MAX); |
| 264 | MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX); |
| 265 | struct mutex *mutex = &execmem_cache.mutex; |
| 266 | void *area; |
| 267 | |
| 268 | execmem_cache_rox = true; |
| 269 | |
| 270 | mutex_lock(mutex); |
| 271 | |
| 272 | mas_for_each(&mas_free, area, ULONG_MAX) { |
| 273 | unsigned long pages = mas_range_len(mas: &mas_free) >> PAGE_SHIFT; |
| 274 | set_memory_ro(addr: mas_free.index, numpages: pages); |
| 275 | } |
| 276 | |
| 277 | mas_for_each(&mas_busy, area, ULONG_MAX) { |
| 278 | unsigned long pages = mas_range_len(mas: &mas_busy) >> PAGE_SHIFT; |
| 279 | set_memory_ro(addr: mas_busy.index, numpages: pages); |
| 280 | } |
| 281 | |
| 282 | mutex_unlock(lock: mutex); |
| 283 | } |
| 284 | |
| 285 | static int execmem_cache_populate(struct execmem_range *range, size_t size) |
| 286 | { |
| 287 | unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; |
| 288 | struct vm_struct *vm; |
| 289 | size_t alloc_size; |
| 290 | int err = -ENOMEM; |
| 291 | void *p; |
| 292 | |
| 293 | alloc_size = round_up(size, PMD_SIZE); |
| 294 | p = execmem_vmalloc(range, size: alloc_size, PAGE_KERNEL, vm_flags); |
| 295 | if (!p) |
| 296 | return err; |
| 297 | |
| 298 | vm = find_vm_area(addr: p); |
| 299 | if (!vm) |
| 300 | goto err_free_mem; |
| 301 | |
| 302 | /* fill memory with instructions that will trap */ |
| 303 | execmem_fill_trapping_insns(ptr: p, size: alloc_size, /* writable = */ true); |
| 304 | |
| 305 | if (execmem_cache_rox) { |
| 306 | err = set_memory_rox(addr: (unsigned long)p, numpages: vm->nr_pages); |
| 307 | if (err) |
| 308 | goto err_free_mem; |
| 309 | } else { |
| 310 | err = set_memory_x(addr: (unsigned long)p, numpages: vm->nr_pages); |
| 311 | if (err) |
| 312 | goto err_free_mem; |
| 313 | } |
| 314 | |
| 315 | err = execmem_cache_add(ptr: p, size: alloc_size); |
| 316 | if (err) |
| 317 | goto err_reset_direct_map; |
| 318 | |
| 319 | return 0; |
| 320 | |
| 321 | err_reset_direct_map: |
| 322 | execmem_set_direct_map_valid(vm, valid: true); |
| 323 | err_free_mem: |
| 324 | vfree(addr: p); |
| 325 | return err; |
| 326 | } |
| 327 | |
| 328 | static void *execmem_cache_alloc(struct execmem_range *range, size_t size) |
| 329 | { |
| 330 | void *p; |
| 331 | int err; |
| 332 | |
| 333 | p = __execmem_cache_alloc(range, size); |
| 334 | if (p) |
| 335 | return p; |
| 336 | |
| 337 | err = execmem_cache_populate(range, size); |
| 338 | if (err) |
| 339 | return NULL; |
| 340 | |
| 341 | return __execmem_cache_alloc(range, size); |
| 342 | } |
| 343 | |
| 344 | static bool execmem_cache_free(void *ptr) |
| 345 | { |
| 346 | struct maple_tree *busy_areas = &execmem_cache.busy_areas; |
| 347 | struct mutex *mutex = &execmem_cache.mutex; |
| 348 | unsigned long addr = (unsigned long)ptr; |
| 349 | MA_STATE(mas, busy_areas, addr, addr); |
| 350 | size_t size; |
| 351 | void *area; |
| 352 | |
| 353 | mutex_lock(mutex); |
| 354 | area = mas_walk(mas: &mas); |
| 355 | if (!area) { |
| 356 | mutex_unlock(lock: mutex); |
| 357 | return false; |
| 358 | } |
| 359 | size = mas_range_len(mas: &mas); |
| 360 | |
| 361 | mas_store_gfp(mas: &mas, NULL, GFP_KERNEL); |
| 362 | mutex_unlock(lock: mutex); |
| 363 | |
| 364 | execmem_fill_trapping_insns(ptr, size, /* writable = */ false); |
| 365 | |
| 366 | execmem_cache_add(ptr, size); |
| 367 | |
| 368 | schedule_work(work: &execmem_cache_clean_work); |
| 369 | |
| 370 | return true; |
| 371 | } |
| 372 | |
| 373 | int execmem_make_temp_rw(void *ptr, size_t size) |
| 374 | { |
| 375 | unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 376 | unsigned long addr = (unsigned long)ptr; |
| 377 | int ret; |
| 378 | |
| 379 | ret = set_memory_nx(addr, numpages: nr); |
| 380 | if (ret) |
| 381 | return ret; |
| 382 | |
| 383 | return set_memory_rw(addr, numpages: nr); |
| 384 | } |
| 385 | |
| 386 | int execmem_restore_rox(void *ptr, size_t size) |
| 387 | { |
| 388 | unsigned int nr = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 389 | unsigned long addr = (unsigned long)ptr; |
| 390 | |
| 391 | return set_memory_rox(addr, numpages: nr); |
| 392 | } |
| 393 | |
| 394 | #else /* CONFIG_ARCH_HAS_EXECMEM_ROX */ |
| 395 | static void *execmem_cache_alloc(struct execmem_range *range, size_t size) |
| 396 | { |
| 397 | return NULL; |
| 398 | } |
| 399 | |
| 400 | static bool execmem_cache_free(void *ptr) |
| 401 | { |
| 402 | return false; |
| 403 | } |
| 404 | #endif /* CONFIG_ARCH_HAS_EXECMEM_ROX */ |
| 405 | |
| 406 | void *execmem_alloc(enum execmem_type type, size_t size) |
| 407 | { |
| 408 | struct execmem_range *range = &execmem_info->ranges[type]; |
| 409 | bool use_cache = range->flags & EXECMEM_ROX_CACHE; |
| 410 | unsigned long vm_flags = VM_FLUSH_RESET_PERMS; |
| 411 | pgprot_t pgprot = range->pgprot; |
| 412 | void *p; |
| 413 | |
| 414 | size = PAGE_ALIGN(size); |
| 415 | |
| 416 | if (use_cache) |
| 417 | p = execmem_cache_alloc(range, size); |
| 418 | else |
| 419 | p = execmem_vmalloc(range, size, pgprot, vm_flags); |
| 420 | |
| 421 | return kasan_reset_tag(addr: p); |
| 422 | } |
| 423 | |
| 424 | void execmem_free(void *ptr) |
| 425 | { |
| 426 | /* |
| 427 | * This memory may be RO, and freeing RO memory in an interrupt is not |
| 428 | * supported by vmalloc. |
| 429 | */ |
| 430 | WARN_ON(in_interrupt()); |
| 431 | |
| 432 | if (!execmem_cache_free(ptr)) |
| 433 | vfree(addr: ptr); |
| 434 | } |
| 435 | |
| 436 | void *execmem_update_copy(void *dst, const void *src, size_t size) |
| 437 | { |
| 438 | return text_poke_copy(addr: dst, opcode: src, len: size); |
| 439 | } |
| 440 | |
| 441 | bool execmem_is_rox(enum execmem_type type) |
| 442 | { |
| 443 | return !!(execmem_info->ranges[type].flags & EXECMEM_ROX_CACHE); |
| 444 | } |
| 445 | |
| 446 | static bool execmem_validate(struct execmem_info *info) |
| 447 | { |
| 448 | struct execmem_range *r = &info->ranges[EXECMEM_DEFAULT]; |
| 449 | |
| 450 | if (!r->alignment || !r->start || !r->end || !pgprot_val(r->pgprot)) { |
| 451 | pr_crit("Invalid parameters for execmem allocator, module loading will fail" ); |
| 452 | return false; |
| 453 | } |
| 454 | |
| 455 | if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) { |
| 456 | for (int i = EXECMEM_DEFAULT; i < EXECMEM_TYPE_MAX; i++) { |
| 457 | r = &info->ranges[i]; |
| 458 | |
| 459 | if (r->flags & EXECMEM_ROX_CACHE) { |
| 460 | pr_warn_once("ROX cache is not supported\n" ); |
| 461 | r->flags &= ~EXECMEM_ROX_CACHE; |
| 462 | } |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | return true; |
| 467 | } |
| 468 | |
| 469 | static void execmem_init_missing(struct execmem_info *info) |
| 470 | { |
| 471 | struct execmem_range *default_range = &info->ranges[EXECMEM_DEFAULT]; |
| 472 | |
| 473 | for (int i = EXECMEM_DEFAULT + 1; i < EXECMEM_TYPE_MAX; i++) { |
| 474 | struct execmem_range *r = &info->ranges[i]; |
| 475 | |
| 476 | if (!r->start) { |
| 477 | if (i == EXECMEM_MODULE_DATA) |
| 478 | r->pgprot = PAGE_KERNEL; |
| 479 | else |
| 480 | r->pgprot = default_range->pgprot; |
| 481 | r->alignment = default_range->alignment; |
| 482 | r->start = default_range->start; |
| 483 | r->end = default_range->end; |
| 484 | r->flags = default_range->flags; |
| 485 | r->fallback_start = default_range->fallback_start; |
| 486 | r->fallback_end = default_range->fallback_end; |
| 487 | } |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | struct execmem_info * __weak execmem_arch_setup(void) |
| 492 | { |
| 493 | return NULL; |
| 494 | } |
| 495 | |
| 496 | static void __init __execmem_init(void) |
| 497 | { |
| 498 | struct execmem_info *info = execmem_arch_setup(); |
| 499 | |
| 500 | if (!info) { |
| 501 | info = execmem_info = &default_execmem_info; |
| 502 | info->ranges[EXECMEM_DEFAULT].start = VMALLOC_START; |
| 503 | info->ranges[EXECMEM_DEFAULT].end = VMALLOC_END; |
| 504 | info->ranges[EXECMEM_DEFAULT].pgprot = PAGE_KERNEL_EXEC; |
| 505 | info->ranges[EXECMEM_DEFAULT].alignment = 1; |
| 506 | } |
| 507 | |
| 508 | if (!execmem_validate(info)) |
| 509 | return; |
| 510 | |
| 511 | execmem_init_missing(info); |
| 512 | |
| 513 | execmem_info = info; |
| 514 | } |
| 515 | |
| 516 | #ifdef CONFIG_ARCH_WANTS_EXECMEM_LATE |
| 517 | static int __init execmem_late_init(void) |
| 518 | { |
| 519 | __execmem_init(); |
| 520 | return 0; |
| 521 | } |
| 522 | core_initcall(execmem_late_init); |
| 523 | #else |
| 524 | void __init execmem_init(void) |
| 525 | { |
| 526 | __execmem_init(); |
| 527 | } |
| 528 | #endif |
| 529 | |