| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * You SHOULD NOT be including this unless you're vsyscall |
| 4 | * handling code or timekeeping internal code! |
| 5 | */ |
| 6 | |
| 7 | #ifndef _LINUX_TIMEKEEPER_INTERNAL_H |
| 8 | #define _LINUX_TIMEKEEPER_INTERNAL_H |
| 9 | |
| 10 | #include <linux/clocksource.h> |
| 11 | #include <linux/jiffies.h> |
| 12 | #include <linux/time.h> |
| 13 | |
| 14 | /** |
| 15 | * struct tk_read_base - base structure for timekeeping readout |
| 16 | * @clock: Current clocksource used for timekeeping. |
| 17 | * @mask: Bitmask for two's complement subtraction of non 64bit clocks |
| 18 | * @cycle_last: @clock cycle value at last update |
| 19 | * @mult: (NTP adjusted) multiplier for scaled math conversion |
| 20 | * @shift: Shift value for scaled math conversion |
| 21 | * @xtime_nsec: Shifted (fractional) nano seconds offset for readout |
| 22 | * @base: ktime_t (nanoseconds) base time for readout |
| 23 | * @base_real: Nanoseconds base value for clock REALTIME readout |
| 24 | * |
| 25 | * This struct has size 56 byte on 64 bit. Together with a seqcount it |
| 26 | * occupies a single 64byte cache line. |
| 27 | * |
| 28 | * The struct is separate from struct timekeeper as it is also used |
| 29 | * for the fast NMI safe accessors. |
| 30 | * |
| 31 | * @base_real is for the fast NMI safe accessor to allow reading clock |
| 32 | * realtime from any context. |
| 33 | */ |
| 34 | struct tk_read_base { |
| 35 | struct clocksource *clock; |
| 36 | u64 mask; |
| 37 | u64 cycle_last; |
| 38 | u32 mult; |
| 39 | u32 shift; |
| 40 | u64 xtime_nsec; |
| 41 | ktime_t base; |
| 42 | u64 base_real; |
| 43 | }; |
| 44 | |
| 45 | /** |
| 46 | * struct timekeeper - Structure holding internal timekeeping values. |
| 47 | * @tkr_mono: The readout base structure for CLOCK_MONOTONIC |
| 48 | * @xtime_sec: Current CLOCK_REALTIME time in seconds |
| 49 | * @ktime_sec: Current CLOCK_MONOTONIC time in seconds |
| 50 | * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset |
| 51 | * @offs_real: Offset clock monotonic -> clock realtime |
| 52 | * @offs_boot: Offset clock monotonic -> clock boottime |
| 53 | * @offs_tai: Offset clock monotonic -> clock tai |
| 54 | * @coarse_nsec: The nanoseconds part for coarse time getters |
| 55 | * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW |
| 56 | * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds |
| 57 | * @clock_was_set_seq: The sequence number of clock was set events |
| 58 | * @cs_was_changed_seq: The sequence number of clocksource change events |
| 59 | * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset |
| 60 | * @cycle_interval: Number of clock cycles in one NTP interval |
| 61 | * @xtime_interval: Number of clock shifted nano seconds in one NTP |
| 62 | * interval. |
| 63 | * @xtime_remainder: Shifted nano seconds left over when rounding |
| 64 | * @cycle_interval |
| 65 | * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. |
| 66 | * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second |
| 67 | * @ntp_tick: The ntp_tick_length() value currently being |
| 68 | * used. This cached copy ensures we consistently |
| 69 | * apply the tick length for an entire tick, as |
| 70 | * ntp_tick_length may change mid-tick, and we don't |
| 71 | * want to apply that new value to the tick in |
| 72 | * progress. |
| 73 | * @ntp_error: Difference between accumulated time and NTP time in ntp |
| 74 | * shifted nano seconds. |
| 75 | * @ntp_error_shift: Shift conversion between clock shifted nano seconds and |
| 76 | * ntp shifted nano seconds. |
| 77 | * @ntp_err_mult: Multiplication factor for scaled math conversion |
| 78 | * @skip_second_overflow: Flag used to avoid updating NTP twice with same second |
| 79 | * @tai_offset: The current UTC to TAI offset in seconds |
| 80 | * |
| 81 | * Note: For timespec(64) based interfaces wall_to_monotonic is what |
| 82 | * we need to add to xtime (or xtime corrected for sub jiffy times) |
| 83 | * to get to monotonic time. Monotonic is pegged at zero at system |
| 84 | * boot time, so wall_to_monotonic will be negative, however, we will |
| 85 | * ALWAYS keep the tv_nsec part positive so we can use the usual |
| 86 | * normalization. |
| 87 | * |
| 88 | * wall_to_monotonic is moved after resume from suspend for the |
| 89 | * monotonic time not to jump. We need to add total_sleep_time to |
| 90 | * wall_to_monotonic to get the real boot based time offset. |
| 91 | * |
| 92 | * wall_to_monotonic is no longer the boot time, getboottime must be |
| 93 | * used instead. |
| 94 | * |
| 95 | * @monotonic_to_boottime is a timespec64 representation of @offs_boot to |
| 96 | * accelerate the VDSO update for CLOCK_BOOTTIME. |
| 97 | * |
| 98 | * The cacheline ordering of the structure is optimized for in kernel usage of |
| 99 | * the ktime_get() and ktime_get_ts64() family of time accessors. Struct |
| 100 | * timekeeper is prepended in the core timekeeping code with a sequence count, |
| 101 | * which results in the following cacheline layout: |
| 102 | * |
| 103 | * 0: seqcount, tkr_mono |
| 104 | * 1: xtime_sec ... coarse_nsec |
| 105 | * 2: tkr_raw, raw_sec |
| 106 | * 3,4: Internal variables |
| 107 | * |
| 108 | * Cacheline 0,1 contain the data which is used for accessing |
| 109 | * CLOCK_MONOTONIC/REALTIME/BOOTTIME/TAI, while cacheline 2 contains the |
| 110 | * data for accessing CLOCK_MONOTONIC_RAW. Cacheline 3,4 are internal |
| 111 | * variables which are only accessed during timekeeper updates once per |
| 112 | * tick. |
| 113 | */ |
| 114 | struct timekeeper { |
| 115 | /* Cacheline 0 (together with prepended seqcount of timekeeper core): */ |
| 116 | struct tk_read_base tkr_mono; |
| 117 | |
| 118 | /* Cacheline 1: */ |
| 119 | u64 xtime_sec; |
| 120 | unsigned long ktime_sec; |
| 121 | struct timespec64 wall_to_monotonic; |
| 122 | ktime_t offs_real; |
| 123 | ktime_t offs_boot; |
| 124 | ktime_t offs_tai; |
| 125 | u32 coarse_nsec; |
| 126 | |
| 127 | /* Cacheline 2: */ |
| 128 | struct tk_read_base tkr_raw; |
| 129 | u64 raw_sec; |
| 130 | |
| 131 | /* Cachline 3 and 4 (timekeeping internal variables): */ |
| 132 | unsigned int clock_was_set_seq; |
| 133 | u8 cs_was_changed_seq; |
| 134 | |
| 135 | struct timespec64 monotonic_to_boot; |
| 136 | |
| 137 | u64 cycle_interval; |
| 138 | u64 xtime_interval; |
| 139 | s64 xtime_remainder; |
| 140 | u64 raw_interval; |
| 141 | |
| 142 | ktime_t next_leap_ktime; |
| 143 | u64 ntp_tick; |
| 144 | s64 ntp_error; |
| 145 | u32 ntp_error_shift; |
| 146 | u32 ntp_err_mult; |
| 147 | u32 skip_second_overflow; |
| 148 | s32 tai_offset; |
| 149 | }; |
| 150 | |
| 151 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
| 152 | |
| 153 | extern void update_vsyscall(struct timekeeper *tk); |
| 154 | extern void update_vsyscall_tz(void); |
| 155 | |
| 156 | #else |
| 157 | |
| 158 | static inline void update_vsyscall(struct timekeeper *tk) |
| 159 | { |
| 160 | } |
| 161 | static inline void update_vsyscall_tz(void) |
| 162 | { |
| 163 | } |
| 164 | #endif |
| 165 | |
| 166 | #endif /* _LINUX_TIMEKEEPER_INTERNAL_H */ |
| 167 | |