| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * hrtimers - High-resolution kernel timers |
| 4 | * |
| 5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> |
| 6 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar |
| 7 | * |
| 8 | * data type definitions, declarations, prototypes |
| 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | */ |
| 12 | #ifndef _LINUX_HRTIMER_H |
| 13 | #define _LINUX_HRTIMER_H |
| 14 | |
| 15 | #include <linux/hrtimer_defs.h> |
| 16 | #include <linux/hrtimer_types.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/list.h> |
| 19 | #include <linux/percpu-defs.h> |
| 20 | #include <linux/rbtree.h> |
| 21 | #include <linux/timer.h> |
| 22 | |
| 23 | /* |
| 24 | * Mode arguments of xxx_hrtimer functions: |
| 25 | * |
| 26 | * HRTIMER_MODE_ABS - Time value is absolute |
| 27 | * HRTIMER_MODE_REL - Time value is relative to now |
| 28 | * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered |
| 29 | * when starting the timer) |
| 30 | * HRTIMER_MODE_SOFT - Timer callback function will be executed in |
| 31 | * soft irq context |
| 32 | * HRTIMER_MODE_HARD - Timer callback function will be executed in |
| 33 | * hard irq context even on PREEMPT_RT. |
| 34 | */ |
| 35 | enum hrtimer_mode { |
| 36 | HRTIMER_MODE_ABS = 0x00, |
| 37 | HRTIMER_MODE_REL = 0x01, |
| 38 | HRTIMER_MODE_PINNED = 0x02, |
| 39 | HRTIMER_MODE_SOFT = 0x04, |
| 40 | HRTIMER_MODE_HARD = 0x08, |
| 41 | |
| 42 | HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, |
| 43 | HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, |
| 44 | |
| 45 | HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, |
| 46 | HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, |
| 47 | |
| 48 | HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, |
| 49 | HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, |
| 50 | |
| 51 | HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, |
| 52 | HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, |
| 53 | |
| 54 | HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, |
| 55 | HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, |
| 56 | }; |
| 57 | |
| 58 | /* |
| 59 | * Values to track state of the timer |
| 60 | * |
| 61 | * Possible states: |
| 62 | * |
| 63 | * 0x00 inactive |
| 64 | * 0x01 enqueued into rbtree |
| 65 | * |
| 66 | * The callback state is not part of the timer->state because clearing it would |
| 67 | * mean touching the timer after the callback, this makes it impossible to free |
| 68 | * the timer from the callback function. |
| 69 | * |
| 70 | * Therefore we track the callback state in: |
| 71 | * |
| 72 | * timer->base->cpu_base->running == timer |
| 73 | * |
| 74 | * On SMP it is possible to have a "callback function running and enqueued" |
| 75 | * status. It happens for example when a posix timer expired and the callback |
| 76 | * queued a signal. Between dropping the lock which protects the posix timer |
| 77 | * and reacquiring the base lock of the hrtimer, another CPU can deliver the |
| 78 | * signal and rearm the timer. |
| 79 | * |
| 80 | * All state transitions are protected by cpu_base->lock. |
| 81 | */ |
| 82 | #define HRTIMER_STATE_INACTIVE 0x00 |
| 83 | #define HRTIMER_STATE_ENQUEUED 0x01 |
| 84 | |
| 85 | /** |
| 86 | * struct hrtimer_sleeper - simple sleeper structure |
| 87 | * @timer: embedded timer structure |
| 88 | * @task: task to wake up |
| 89 | * |
| 90 | * task is set to NULL, when the timer expires. |
| 91 | */ |
| 92 | struct hrtimer_sleeper { |
| 93 | struct hrtimer timer; |
| 94 | struct task_struct *task; |
| 95 | }; |
| 96 | |
| 97 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
| 98 | { |
| 99 | timer->node.expires = time; |
| 100 | timer->_softexpires = time; |
| 101 | } |
| 102 | |
| 103 | static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) |
| 104 | { |
| 105 | timer->_softexpires = time; |
| 106 | timer->node.expires = ktime_add_safe(lhs: time, rhs: delta); |
| 107 | } |
| 108 | |
| 109 | static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) |
| 110 | { |
| 111 | timer->_softexpires = time; |
| 112 | timer->node.expires = ktime_add_safe(lhs: time, rhs: ns_to_ktime(ns: delta)); |
| 113 | } |
| 114 | |
| 115 | static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) |
| 116 | { |
| 117 | timer->node.expires = tv64; |
| 118 | timer->_softexpires = tv64; |
| 119 | } |
| 120 | |
| 121 | static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) |
| 122 | { |
| 123 | timer->node.expires = ktime_add_safe(lhs: timer->node.expires, rhs: time); |
| 124 | timer->_softexpires = ktime_add_safe(lhs: timer->_softexpires, rhs: time); |
| 125 | } |
| 126 | |
| 127 | static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) |
| 128 | { |
| 129 | timer->node.expires = ktime_add_ns(timer->node.expires, ns); |
| 130 | timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); |
| 131 | } |
| 132 | |
| 133 | static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer) |
| 134 | { |
| 135 | return timer->node.expires; |
| 136 | } |
| 137 | |
| 138 | static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) |
| 139 | { |
| 140 | return timer->_softexpires; |
| 141 | } |
| 142 | |
| 143 | static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) |
| 144 | { |
| 145 | return timer->node.expires; |
| 146 | } |
| 147 | static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) |
| 148 | { |
| 149 | return timer->_softexpires; |
| 150 | } |
| 151 | |
| 152 | static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer) |
| 153 | { |
| 154 | return ktime_to_ns(kt: timer->node.expires); |
| 155 | } |
| 156 | |
| 157 | static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) |
| 158 | { |
| 159 | return ktime_sub(timer->node.expires, timer->base->get_time()); |
| 160 | } |
| 161 | |
| 162 | static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) |
| 163 | { |
| 164 | return timer->base->get_time(); |
| 165 | } |
| 166 | |
| 167 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) |
| 168 | { |
| 169 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? |
| 170 | timer->base->cpu_base->hres_active : 0; |
| 171 | } |
| 172 | |
| 173 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 174 | struct clock_event_device; |
| 175 | |
| 176 | extern void hrtimer_interrupt(struct clock_event_device *dev); |
| 177 | |
| 178 | extern unsigned int hrtimer_resolution; |
| 179 | |
| 180 | #else |
| 181 | |
| 182 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
| 183 | |
| 184 | #endif |
| 185 | |
| 186 | static inline ktime_t |
| 187 | __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) |
| 188 | { |
| 189 | ktime_t rem = ktime_sub(timer->node.expires, now); |
| 190 | |
| 191 | /* |
| 192 | * Adjust relative timers for the extra we added in |
| 193 | * hrtimer_start_range_ns() to prevent short timeouts. |
| 194 | */ |
| 195 | if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel) |
| 196 | rem -= hrtimer_resolution; |
| 197 | return rem; |
| 198 | } |
| 199 | |
| 200 | static inline ktime_t |
| 201 | hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) |
| 202 | { |
| 203 | return __hrtimer_expires_remaining_adjusted(timer, |
| 204 | now: timer->base->get_time()); |
| 205 | } |
| 206 | |
| 207 | #ifdef CONFIG_TIMERFD |
| 208 | extern void timerfd_clock_was_set(void); |
| 209 | extern void timerfd_resume(void); |
| 210 | #else |
| 211 | static inline void timerfd_clock_was_set(void) { } |
| 212 | static inline void timerfd_resume(void) { } |
| 213 | #endif |
| 214 | |
| 215 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
| 216 | |
| 217 | #ifdef CONFIG_PREEMPT_RT |
| 218 | void hrtimer_cancel_wait_running(const struct hrtimer *timer); |
| 219 | #else |
| 220 | static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) |
| 221 | { |
| 222 | cpu_relax(); |
| 223 | } |
| 224 | #endif |
| 225 | |
| 226 | static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused) |
| 227 | { |
| 228 | return HRTIMER_NORESTART; |
| 229 | } |
| 230 | |
| 231 | /* Exported timer functions: */ |
| 232 | |
| 233 | /* Initialize timers: */ |
| 234 | extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), |
| 235 | clockid_t clock_id, enum hrtimer_mode mode); |
| 236 | extern void hrtimer_setup_on_stack(struct hrtimer *timer, |
| 237 | enum hrtimer_restart (*function)(struct hrtimer *), |
| 238 | clockid_t clock_id, enum hrtimer_mode mode); |
| 239 | extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id, |
| 240 | enum hrtimer_mode mode); |
| 241 | |
| 242 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
| 243 | extern void destroy_hrtimer_on_stack(struct hrtimer *timer); |
| 244 | #else |
| 245 | static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } |
| 246 | #endif |
| 247 | |
| 248 | /* Basic timer operations: */ |
| 249 | extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 250 | u64 range_ns, const enum hrtimer_mode mode); |
| 251 | |
| 252 | /** |
| 253 | * hrtimer_start - (re)start an hrtimer |
| 254 | * @timer: the timer to be added |
| 255 | * @tim: expiry time |
| 256 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or |
| 257 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); |
| 258 | * softirq based mode is considered for debug purpose only! |
| 259 | */ |
| 260 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, |
| 261 | const enum hrtimer_mode mode) |
| 262 | { |
| 263 | hrtimer_start_range_ns(timer, tim, range_ns: 0, mode); |
| 264 | } |
| 265 | |
| 266 | extern int hrtimer_cancel(struct hrtimer *timer); |
| 267 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
| 268 | |
| 269 | static inline void hrtimer_start_expires(struct hrtimer *timer, |
| 270 | enum hrtimer_mode mode) |
| 271 | { |
| 272 | u64 delta; |
| 273 | ktime_t soft, hard; |
| 274 | soft = hrtimer_get_softexpires(timer); |
| 275 | hard = hrtimer_get_expires(timer); |
| 276 | delta = ktime_to_ns(ktime_sub(hard, soft)); |
| 277 | hrtimer_start_range_ns(timer, tim: soft, range_ns: delta, mode); |
| 278 | } |
| 279 | |
| 280 | void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, |
| 281 | enum hrtimer_mode mode); |
| 282 | |
| 283 | static inline void hrtimer_restart(struct hrtimer *timer) |
| 284 | { |
| 285 | hrtimer_start_expires(timer, mode: HRTIMER_MODE_ABS); |
| 286 | } |
| 287 | |
| 288 | /* Query timers: */ |
| 289 | extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); |
| 290 | |
| 291 | /** |
| 292 | * hrtimer_get_remaining - get remaining time for the timer |
| 293 | * @timer: the timer to read |
| 294 | */ |
| 295 | static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
| 296 | { |
| 297 | return __hrtimer_get_remaining(timer, adjust: false); |
| 298 | } |
| 299 | |
| 300 | extern u64 hrtimer_get_next_event(void); |
| 301 | extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); |
| 302 | |
| 303 | extern bool hrtimer_active(const struct hrtimer *timer); |
| 304 | |
| 305 | /** |
| 306 | * hrtimer_is_queued - check, whether the timer is on one of the queues |
| 307 | * @timer: Timer to check |
| 308 | * |
| 309 | * Returns: True if the timer is queued, false otherwise |
| 310 | * |
| 311 | * The function can be used lockless, but it gives only a current snapshot. |
| 312 | */ |
| 313 | static inline bool hrtimer_is_queued(struct hrtimer *timer) |
| 314 | { |
| 315 | /* The READ_ONCE pairs with the update functions of timer->state */ |
| 316 | return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED); |
| 317 | } |
| 318 | |
| 319 | /* |
| 320 | * Helper function to check, whether the timer is running the callback |
| 321 | * function |
| 322 | */ |
| 323 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
| 324 | { |
| 325 | return timer->base->running == timer; |
| 326 | } |
| 327 | |
| 328 | /** |
| 329 | * hrtimer_update_function - Update the timer's callback function |
| 330 | * @timer: Timer to update |
| 331 | * @function: New callback function |
| 332 | * |
| 333 | * Only safe to call if the timer is not enqueued. Can be called in the callback function if the |
| 334 | * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED). |
| 335 | */ |
| 336 | static inline void hrtimer_update_function(struct hrtimer *timer, |
| 337 | enum hrtimer_restart (*function)(struct hrtimer *)) |
| 338 | { |
| 339 | #ifdef CONFIG_PROVE_LOCKING |
| 340 | guard(raw_spinlock_irqsave)(l: &timer->base->cpu_base->lock); |
| 341 | |
| 342 | if (WARN_ON_ONCE(hrtimer_is_queued(timer))) |
| 343 | return; |
| 344 | |
| 345 | if (WARN_ON_ONCE(!function)) |
| 346 | return; |
| 347 | #endif |
| 348 | ACCESS_PRIVATE(timer, function) = function; |
| 349 | } |
| 350 | |
| 351 | /* Forward a hrtimer so it expires after now: */ |
| 352 | extern u64 |
| 353 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); |
| 354 | |
| 355 | /** |
| 356 | * hrtimer_forward_now() - forward the timer expiry so it expires after now |
| 357 | * @timer: hrtimer to forward |
| 358 | * @interval: the interval to forward |
| 359 | * |
| 360 | * It is a variant of hrtimer_forward(). The timer will expire after the current |
| 361 | * time of the hrtimer clock base. See hrtimer_forward() for details. |
| 362 | */ |
| 363 | static inline u64 hrtimer_forward_now(struct hrtimer *timer, |
| 364 | ktime_t interval) |
| 365 | { |
| 366 | return hrtimer_forward(timer, now: timer->base->get_time(), interval); |
| 367 | } |
| 368 | |
| 369 | /* Precise sleep: */ |
| 370 | |
| 371 | extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); |
| 372 | extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, |
| 373 | const clockid_t clockid); |
| 374 | |
| 375 | extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, |
| 376 | const enum hrtimer_mode mode); |
| 377 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, |
| 378 | u64 delta, |
| 379 | const enum hrtimer_mode mode, |
| 380 | clockid_t clock_id); |
| 381 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
| 382 | |
| 383 | /* Soft interrupt function to run the hrtimer queues: */ |
| 384 | extern void hrtimer_run_queues(void); |
| 385 | |
| 386 | /* Bootup initialization: */ |
| 387 | extern void __init hrtimers_init(void); |
| 388 | |
| 389 | /* Show pending timers: */ |
| 390 | extern void sysrq_timer_list_show(void); |
| 391 | |
| 392 | int hrtimers_prepare_cpu(unsigned int cpu); |
| 393 | int hrtimers_cpu_starting(unsigned int cpu); |
| 394 | #ifdef CONFIG_HOTPLUG_CPU |
| 395 | int hrtimers_cpu_dying(unsigned int cpu); |
| 396 | #else |
| 397 | #define hrtimers_cpu_dying NULL |
| 398 | #endif |
| 399 | |
| 400 | #endif |
| 401 | |