| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_CLEANUP_H |
| 3 | #define _LINUX_CLEANUP_H |
| 4 | |
| 5 | #include <linux/compiler.h> |
| 6 | |
| 7 | /** |
| 8 | * DOC: scope-based cleanup helpers |
| 9 | * |
| 10 | * The "goto error" pattern is notorious for introducing subtle resource |
| 11 | * leaks. It is tedious and error prone to add new resource acquisition |
| 12 | * constraints into code paths that already have several unwind |
| 13 | * conditions. The "cleanup" helpers enable the compiler to help with |
| 14 | * this tedium and can aid in maintaining LIFO (last in first out) |
| 15 | * unwind ordering to avoid unintentional leaks. |
| 16 | * |
| 17 | * As drivers make up the majority of the kernel code base, here is an |
| 18 | * example of using these helpers to clean up PCI drivers. The target of |
| 19 | * the cleanups are occasions where a goto is used to unwind a device |
| 20 | * reference (pci_dev_put()), or unlock the device (pci_dev_unlock()) |
| 21 | * before returning. |
| 22 | * |
| 23 | * The DEFINE_FREE() macro can arrange for PCI device references to be |
| 24 | * dropped when the associated variable goes out of scope:: |
| 25 | * |
| 26 | * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T)) |
| 27 | * ... |
| 28 | * struct pci_dev *dev __free(pci_dev_put) = |
| 29 | * pci_get_slot(parent, PCI_DEVFN(0, 0)); |
| 30 | * |
| 31 | * The above will automatically call pci_dev_put() if @dev is non-NULL |
| 32 | * when @dev goes out of scope (automatic variable scope). If a function |
| 33 | * wants to invoke pci_dev_put() on error, but return @dev (i.e. without |
| 34 | * freeing it) on success, it can do:: |
| 35 | * |
| 36 | * return no_free_ptr(dev); |
| 37 | * |
| 38 | * ...or:: |
| 39 | * |
| 40 | * return_ptr(dev); |
| 41 | * |
| 42 | * The DEFINE_GUARD() macro can arrange for the PCI device lock to be |
| 43 | * dropped when the scope where guard() is invoked ends:: |
| 44 | * |
| 45 | * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T)) |
| 46 | * ... |
| 47 | * guard(pci_dev)(dev); |
| 48 | * |
| 49 | * The lifetime of the lock obtained by the guard() helper follows the |
| 50 | * scope of automatic variable declaration. Take the following example:: |
| 51 | * |
| 52 | * func(...) |
| 53 | * { |
| 54 | * if (...) { |
| 55 | * ... |
| 56 | * guard(pci_dev)(dev); // pci_dev_lock() invoked here |
| 57 | * ... |
| 58 | * } // <- implied pci_dev_unlock() triggered here |
| 59 | * } |
| 60 | * |
| 61 | * Observe the lock is held for the remainder of the "if ()" block not |
| 62 | * the remainder of "func()". |
| 63 | * |
| 64 | * Now, when a function uses both __free() and guard(), or multiple |
| 65 | * instances of __free(), the LIFO order of variable definition order |
| 66 | * matters. GCC documentation says: |
| 67 | * |
| 68 | * "When multiple variables in the same scope have cleanup attributes, |
| 69 | * at exit from the scope their associated cleanup functions are run in |
| 70 | * reverse order of definition (last defined, first cleanup)." |
| 71 | * |
| 72 | * When the unwind order matters it requires that variables be defined |
| 73 | * mid-function scope rather than at the top of the file. Take the |
| 74 | * following example and notice the bug highlighted by "!!":: |
| 75 | * |
| 76 | * LIST_HEAD(list); |
| 77 | * DEFINE_MUTEX(lock); |
| 78 | * |
| 79 | * struct object { |
| 80 | * struct list_head node; |
| 81 | * }; |
| 82 | * |
| 83 | * static struct object *alloc_add(void) |
| 84 | * { |
| 85 | * struct object *obj; |
| 86 | * |
| 87 | * lockdep_assert_held(&lock); |
| 88 | * obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 89 | * if (obj) { |
| 90 | * LIST_HEAD_INIT(&obj->node); |
| 91 | * list_add(obj->node, &list): |
| 92 | * } |
| 93 | * return obj; |
| 94 | * } |
| 95 | * |
| 96 | * static void remove_free(struct object *obj) |
| 97 | * { |
| 98 | * lockdep_assert_held(&lock); |
| 99 | * list_del(&obj->node); |
| 100 | * kfree(obj); |
| 101 | * } |
| 102 | * |
| 103 | * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T)) |
| 104 | * static int init(void) |
| 105 | * { |
| 106 | * struct object *obj __free(remove_free) = NULL; |
| 107 | * int err; |
| 108 | * |
| 109 | * guard(mutex)(&lock); |
| 110 | * obj = alloc_add(); |
| 111 | * |
| 112 | * if (!obj) |
| 113 | * return -ENOMEM; |
| 114 | * |
| 115 | * err = other_init(obj); |
| 116 | * if (err) |
| 117 | * return err; // remove_free() called without the lock!! |
| 118 | * |
| 119 | * no_free_ptr(obj); |
| 120 | * return 0; |
| 121 | * } |
| 122 | * |
| 123 | * That bug is fixed by changing init() to call guard() and define + |
| 124 | * initialize @obj in this order:: |
| 125 | * |
| 126 | * guard(mutex)(&lock); |
| 127 | * struct object *obj __free(remove_free) = alloc_add(); |
| 128 | * |
| 129 | * Given that the "__free(...) = NULL" pattern for variables defined at |
| 130 | * the top of the function poses this potential interdependency problem |
| 131 | * the recommendation is to always define and assign variables in one |
| 132 | * statement and not group variable definitions at the top of the |
| 133 | * function when __free() is used. |
| 134 | * |
| 135 | * Lastly, given that the benefit of cleanup helpers is removal of |
| 136 | * "goto", and that the "goto" statement can jump between scopes, the |
| 137 | * expectation is that usage of "goto" and cleanup helpers is never |
| 138 | * mixed in the same function. I.e. for a given routine, convert all |
| 139 | * resources that need a "goto" cleanup to scope-based cleanup, or |
| 140 | * convert none of them. |
| 141 | */ |
| 142 | |
| 143 | /* |
| 144 | * DEFINE_FREE(name, type, free): |
| 145 | * simple helper macro that defines the required wrapper for a __free() |
| 146 | * based cleanup function. @free is an expression using '_T' to access the |
| 147 | * variable. @free should typically include a NULL test before calling a |
| 148 | * function, see the example below. |
| 149 | * |
| 150 | * __free(name): |
| 151 | * variable attribute to add a scoped based cleanup to the variable. |
| 152 | * |
| 153 | * no_free_ptr(var): |
| 154 | * like a non-atomic xchg(var, NULL), such that the cleanup function will |
| 155 | * be inhibited -- provided it sanely deals with a NULL value. |
| 156 | * |
| 157 | * NOTE: this has __must_check semantics so that it is harder to accidentally |
| 158 | * leak the resource. |
| 159 | * |
| 160 | * return_ptr(p): |
| 161 | * returns p while inhibiting the __free(). |
| 162 | * |
| 163 | * Ex. |
| 164 | * |
| 165 | * DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) |
| 166 | * |
| 167 | * void *alloc_obj(...) |
| 168 | * { |
| 169 | * struct obj *p __free(kfree) = kmalloc(...); |
| 170 | * if (!p) |
| 171 | * return NULL; |
| 172 | * |
| 173 | * if (!init_obj(p)) |
| 174 | * return NULL; |
| 175 | * |
| 176 | * return_ptr(p); |
| 177 | * } |
| 178 | * |
| 179 | * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though |
| 180 | * kfree() is fine to be called with a NULL value. This is on purpose. This way |
| 181 | * the compiler sees the end of our alloc_obj() function as: |
| 182 | * |
| 183 | * tmp = p; |
| 184 | * p = NULL; |
| 185 | * if (p) |
| 186 | * kfree(p); |
| 187 | * return tmp; |
| 188 | * |
| 189 | * And through the magic of value-propagation and dead-code-elimination, it |
| 190 | * eliminates the actual cleanup call and compiles into: |
| 191 | * |
| 192 | * return p; |
| 193 | * |
| 194 | * Without the NULL test it turns into a mess and the compiler can't help us. |
| 195 | */ |
| 196 | |
| 197 | #define DEFINE_FREE(_name, _type, _free) \ |
| 198 | static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; } |
| 199 | |
| 200 | #define __free(_name) __cleanup(__free_##_name) |
| 201 | |
| 202 | #define __get_and_null(p, nullvalue) \ |
| 203 | ({ \ |
| 204 | __auto_type __ptr = &(p); \ |
| 205 | __auto_type __val = *__ptr; \ |
| 206 | *__ptr = nullvalue; \ |
| 207 | __val; \ |
| 208 | }) |
| 209 | |
| 210 | static inline __must_check |
| 211 | const volatile void * __must_check_fn(const volatile void *val) |
| 212 | { return val; } |
| 213 | |
| 214 | #define no_free_ptr(p) \ |
| 215 | ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL))) |
| 216 | |
| 217 | #define return_ptr(p) return no_free_ptr(p) |
| 218 | |
| 219 | /* |
| 220 | * Only for situations where an allocation is handed in to another function |
| 221 | * and consumed by that function on success. |
| 222 | * |
| 223 | * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL); |
| 224 | * |
| 225 | * setup(f); |
| 226 | * if (some_condition) |
| 227 | * return -EINVAL; |
| 228 | * .... |
| 229 | * ret = bar(f); |
| 230 | * if (!ret) |
| 231 | * retain_and_null_ptr(f); |
| 232 | * return ret; |
| 233 | * |
| 234 | * After retain_and_null_ptr(f) the variable f is NULL and cannot be |
| 235 | * dereferenced anymore. |
| 236 | */ |
| 237 | #define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL)) |
| 238 | |
| 239 | /* |
| 240 | * DEFINE_CLASS(name, type, exit, init, init_args...): |
| 241 | * helper to define the destructor and constructor for a type. |
| 242 | * @exit is an expression using '_T' -- similar to FREE above. |
| 243 | * @init is an expression in @init_args resulting in @type |
| 244 | * |
| 245 | * EXTEND_CLASS(name, ext, init, init_args...): |
| 246 | * extends class @name to @name@ext with the new constructor |
| 247 | * |
| 248 | * CLASS(name, var)(args...): |
| 249 | * declare the variable @var as an instance of the named class |
| 250 | * |
| 251 | * Ex. |
| 252 | * |
| 253 | * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) |
| 254 | * |
| 255 | * CLASS(fdget, f)(fd); |
| 256 | * if (fd_empty(f)) |
| 257 | * return -EBADF; |
| 258 | * |
| 259 | * // use 'f' without concern |
| 260 | */ |
| 261 | |
| 262 | #define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \ |
| 263 | typedef _type class_##_name##_t; \ |
| 264 | static inline void class_##_name##_destructor(_type *p) \ |
| 265 | { _type _T = *p; _exit; } \ |
| 266 | static inline _type class_##_name##_constructor(_init_args) \ |
| 267 | { _type t = _init; return t; } |
| 268 | |
| 269 | #define EXTEND_CLASS(_name, ext, _init, _init_args...) \ |
| 270 | typedef class_##_name##_t class_##_name##ext##_t; \ |
| 271 | static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\ |
| 272 | { class_##_name##_destructor(p); } \ |
| 273 | static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ |
| 274 | { class_##_name##_t t = _init; return t; } |
| 275 | |
| 276 | #define CLASS(_name, var) \ |
| 277 | class_##_name##_t var __cleanup(class_##_name##_destructor) = \ |
| 278 | class_##_name##_constructor |
| 279 | |
| 280 | |
| 281 | /* |
| 282 | * DEFINE_GUARD(name, type, lock, unlock): |
| 283 | * trivial wrapper around DEFINE_CLASS() above specifically |
| 284 | * for locks. |
| 285 | * |
| 286 | * DEFINE_GUARD_COND(name, ext, condlock) |
| 287 | * wrapper around EXTEND_CLASS above to add conditional lock |
| 288 | * variants to a base class, eg. mutex_trylock() or |
| 289 | * mutex_lock_interruptible(). |
| 290 | * |
| 291 | * guard(name): |
| 292 | * an anonymous instance of the (guard) class, not recommended for |
| 293 | * conditional locks. |
| 294 | * |
| 295 | * scoped_guard (name, args...) { }: |
| 296 | * similar to CLASS(name, scope)(args), except the variable (with the |
| 297 | * explicit name 'scope') is declard in a for-loop such that its scope is |
| 298 | * bound to the next (compound) statement. |
| 299 | * |
| 300 | * for conditional locks the loop body is skipped when the lock is not |
| 301 | * acquired. |
| 302 | * |
| 303 | * scoped_cond_guard (name, fail, args...) { }: |
| 304 | * similar to scoped_guard(), except it does fail when the lock |
| 305 | * acquire fails. |
| 306 | * |
| 307 | * Only for conditional locks. |
| 308 | */ |
| 309 | |
| 310 | #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \ |
| 311 | static __maybe_unused const bool class_##_name##_is_conditional = _is_cond |
| 312 | |
| 313 | #define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \ |
| 314 | static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ |
| 315 | { return (void *)(__force unsigned long)*(_exp); } |
| 316 | |
| 317 | #define DEFINE_CLASS_IS_GUARD(_name) \ |
| 318 | __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ |
| 319 | __DEFINE_GUARD_LOCK_PTR(_name, _T) |
| 320 | |
| 321 | #define DEFINE_CLASS_IS_COND_GUARD(_name) \ |
| 322 | __DEFINE_CLASS_IS_CONDITIONAL(_name, true); \ |
| 323 | __DEFINE_GUARD_LOCK_PTR(_name, _T) |
| 324 | |
| 325 | #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ |
| 326 | DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ |
| 327 | DEFINE_CLASS_IS_GUARD(_name) |
| 328 | |
| 329 | #define DEFINE_GUARD_COND(_name, _ext, _condlock) \ |
| 330 | __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ |
| 331 | EXTEND_CLASS(_name, _ext, \ |
| 332 | ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ |
| 333 | class_##_name##_t _T) \ |
| 334 | static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ |
| 335 | { return class_##_name##_lock_ptr(_T); } |
| 336 | |
| 337 | #define guard(_name) \ |
| 338 | CLASS(_name, __UNIQUE_ID(guard)) |
| 339 | |
| 340 | #define __guard_ptr(_name) class_##_name##_lock_ptr |
| 341 | #define __is_cond_ptr(_name) class_##_name##_is_conditional |
| 342 | |
| 343 | /* |
| 344 | * Helper macro for scoped_guard(). |
| 345 | * |
| 346 | * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that |
| 347 | * compiler would be sure that for the unconditional locks the body of the |
| 348 | * loop (caller-provided code glued to the else clause) could not be skipped. |
| 349 | * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too |
| 350 | * hard to deduce (even if could be proven true for unconditional locks). |
| 351 | */ |
| 352 | #define __scoped_guard(_name, _label, args...) \ |
| 353 | for (CLASS(_name, scope)(args); \ |
| 354 | __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \ |
| 355 | ({ goto _label; })) \ |
| 356 | if (0) { \ |
| 357 | _label: \ |
| 358 | break; \ |
| 359 | } else |
| 360 | |
| 361 | #define scoped_guard(_name, args...) \ |
| 362 | __scoped_guard(_name, __UNIQUE_ID(label), args) |
| 363 | |
| 364 | #define __scoped_cond_guard(_name, _fail, _label, args...) \ |
| 365 | for (CLASS(_name, scope)(args); true; ({ goto _label; })) \ |
| 366 | if (!__guard_ptr(_name)(&scope)) { \ |
| 367 | BUILD_BUG_ON(!__is_cond_ptr(_name)); \ |
| 368 | _fail; \ |
| 369 | _label: \ |
| 370 | break; \ |
| 371 | } else |
| 372 | |
| 373 | #define scoped_cond_guard(_name, _fail, args...) \ |
| 374 | __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args) |
| 375 | |
| 376 | /* |
| 377 | * Additional helper macros for generating lock guards with types, either for |
| 378 | * locks that don't have a native type (eg. RCU, preempt) or those that need a |
| 379 | * 'fat' pointer (eg. spin_lock_irqsave). |
| 380 | * |
| 381 | * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...) |
| 382 | * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...) |
| 383 | * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock) |
| 384 | * |
| 385 | * will result in the following type: |
| 386 | * |
| 387 | * typedef struct { |
| 388 | * type *lock; // 'type := void' for the _0 variant |
| 389 | * __VA_ARGS__; |
| 390 | * } class_##name##_t; |
| 391 | * |
| 392 | * As above, both _lock and _unlock are statements, except this time '_T' will |
| 393 | * be a pointer to the above struct. |
| 394 | */ |
| 395 | |
| 396 | #define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \ |
| 397 | typedef struct { \ |
| 398 | _type *lock; \ |
| 399 | __VA_ARGS__; \ |
| 400 | } class_##_name##_t; \ |
| 401 | \ |
| 402 | static inline void class_##_name##_destructor(class_##_name##_t *_T) \ |
| 403 | { \ |
| 404 | if (_T->lock) { _unlock; } \ |
| 405 | } \ |
| 406 | \ |
| 407 | __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock) |
| 408 | |
| 409 | #define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \ |
| 410 | static inline class_##_name##_t class_##_name##_constructor(_type *l) \ |
| 411 | { \ |
| 412 | class_##_name##_t _t = { .lock = l }, *_T = &_t; \ |
| 413 | _lock; \ |
| 414 | return _t; \ |
| 415 | } |
| 416 | |
| 417 | #define __DEFINE_LOCK_GUARD_0(_name, _lock) \ |
| 418 | static inline class_##_name##_t class_##_name##_constructor(void) \ |
| 419 | { \ |
| 420 | class_##_name##_t _t = { .lock = (void*)1 }, \ |
| 421 | *_T __maybe_unused = &_t; \ |
| 422 | _lock; \ |
| 423 | return _t; \ |
| 424 | } |
| 425 | |
| 426 | #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ |
| 427 | __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ |
| 428 | __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ |
| 429 | __DEFINE_LOCK_GUARD_1(_name, _type, _lock) |
| 430 | |
| 431 | #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ |
| 432 | __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ |
| 433 | __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ |
| 434 | __DEFINE_LOCK_GUARD_0(_name, _lock) |
| 435 | |
| 436 | #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ |
| 437 | __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ |
| 438 | EXTEND_CLASS(_name, _ext, \ |
| 439 | ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ |
| 440 | if (_T->lock && !(_condlock)) _T->lock = NULL; \ |
| 441 | _t; }), \ |
| 442 | typeof_member(class_##_name##_t, lock) l) \ |
| 443 | static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ |
| 444 | { return class_##_name##_lock_ptr(_T); } |
| 445 | |
| 446 | |
| 447 | #endif /* _LINUX_CLEANUP_H */ |
| 448 | |