| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_CACHE_H |
| 3 | #define __LINUX_CACHE_H |
| 4 | |
| 5 | #include <uapi/linux/kernel.h> |
| 6 | #include <vdso/cache.h> |
| 7 | #include <asm/cache.h> |
| 8 | |
| 9 | #ifndef L1_CACHE_ALIGN |
| 10 | #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) |
| 11 | #endif |
| 12 | |
| 13 | /** |
| 14 | * SMP_CACHE_ALIGN - align a value to the L2 cacheline size |
| 15 | * @x: value to align |
| 16 | * |
| 17 | * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes, |
| 18 | * this needs to be accounted. |
| 19 | * |
| 20 | * Return: aligned value. |
| 21 | */ |
| 22 | #ifndef SMP_CACHE_ALIGN |
| 23 | #define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES) |
| 24 | #endif |
| 25 | |
| 26 | /* |
| 27 | * ``__aligned_largest`` aligns a field to the value most optimal for the |
| 28 | * target architecture to perform memory operations. Get the actual value |
| 29 | * to be able to use it anywhere else. |
| 30 | */ |
| 31 | #ifndef __LARGEST_ALIGN |
| 32 | #define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest) |
| 33 | #endif |
| 34 | |
| 35 | #ifndef LARGEST_ALIGN |
| 36 | #define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN) |
| 37 | #endif |
| 38 | |
| 39 | /* |
| 40 | * __read_mostly is used to keep rarely changing variables out of frequently |
| 41 | * updated cachelines. Its use should be reserved for data that is used |
| 42 | * frequently in hot paths. Performance traces can help decide when to use |
| 43 | * this. You want __read_mostly data to be tightly packed, so that in the |
| 44 | * best case multiple frequently read variables for a hot path will be next |
| 45 | * to each other in order to reduce the number of cachelines needed to |
| 46 | * execute a critical path. We should be mindful and selective of its use. |
| 47 | * ie: if you're going to use it please supply a *good* justification in your |
| 48 | * commit log |
| 49 | */ |
| 50 | #ifndef __read_mostly |
| 51 | #define __read_mostly |
| 52 | #endif |
| 53 | |
| 54 | /* |
| 55 | * __ro_after_init is used to mark things that are read-only after init (i.e. |
| 56 | * after mark_rodata_ro() has been called). These are effectively read-only, |
| 57 | * but may get written to during init, so can't live in .rodata (via "const"). |
| 58 | */ |
| 59 | #ifndef __ro_after_init |
| 60 | #define __ro_after_init __section(".data..ro_after_init") |
| 61 | #endif |
| 62 | |
| 63 | #ifndef ____cacheline_aligned_in_smp |
| 64 | #ifdef CONFIG_SMP |
| 65 | #define ____cacheline_aligned_in_smp ____cacheline_aligned |
| 66 | #else |
| 67 | #define ____cacheline_aligned_in_smp |
| 68 | #endif /* CONFIG_SMP */ |
| 69 | #endif |
| 70 | |
| 71 | #ifndef __cacheline_aligned |
| 72 | #define __cacheline_aligned \ |
| 73 | __attribute__((__aligned__(SMP_CACHE_BYTES), \ |
| 74 | __section__(".data..cacheline_aligned"))) |
| 75 | #endif /* __cacheline_aligned */ |
| 76 | |
| 77 | #ifndef __cacheline_aligned_in_smp |
| 78 | #ifdef CONFIG_SMP |
| 79 | #define __cacheline_aligned_in_smp __cacheline_aligned |
| 80 | #else |
| 81 | #define __cacheline_aligned_in_smp |
| 82 | #endif /* CONFIG_SMP */ |
| 83 | #endif |
| 84 | |
| 85 | /* |
| 86 | * The maximum alignment needed for some critical structures |
| 87 | * These could be inter-node cacheline sizes/L3 cacheline |
| 88 | * size etc. Define this in asm/cache.h for your arch |
| 89 | */ |
| 90 | #ifndef INTERNODE_CACHE_SHIFT |
| 91 | #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT |
| 92 | #endif |
| 93 | |
| 94 | #if !defined(____cacheline_internodealigned_in_smp) |
| 95 | #if defined(CONFIG_SMP) |
| 96 | #define ____cacheline_internodealigned_in_smp \ |
| 97 | __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) |
| 98 | #else |
| 99 | #define ____cacheline_internodealigned_in_smp |
| 100 | #endif |
| 101 | #endif |
| 102 | |
| 103 | #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE |
| 104 | #define cache_line_size() L1_CACHE_BYTES |
| 105 | #endif |
| 106 | |
| 107 | #ifndef __cacheline_group_begin |
| 108 | #define __cacheline_group_begin(GROUP) \ |
| 109 | __u8 __cacheline_group_begin__##GROUP[0] |
| 110 | #endif |
| 111 | |
| 112 | #ifndef __cacheline_group_end |
| 113 | #define __cacheline_group_end(GROUP) \ |
| 114 | __u8 __cacheline_group_end__##GROUP[0] |
| 115 | #endif |
| 116 | |
| 117 | /** |
| 118 | * __cacheline_group_begin_aligned - declare an aligned group start |
| 119 | * @GROUP: name of the group |
| 120 | * @...: optional group alignment |
| 121 | * |
| 122 | * The following block inside a struct: |
| 123 | * |
| 124 | * __cacheline_group_begin_aligned(grp); |
| 125 | * field a; |
| 126 | * field b; |
| 127 | * __cacheline_group_end_aligned(grp); |
| 128 | * |
| 129 | * will always be aligned to either the specified alignment or |
| 130 | * ``SMP_CACHE_BYTES``. |
| 131 | */ |
| 132 | #define __cacheline_group_begin_aligned(GROUP, ...) \ |
| 133 | __cacheline_group_begin(GROUP) \ |
| 134 | __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) |
| 135 | |
| 136 | /** |
| 137 | * __cacheline_group_end_aligned - declare an aligned group end |
| 138 | * @GROUP: name of the group |
| 139 | * @...: optional alignment (same as was in __cacheline_group_begin_aligned()) |
| 140 | * |
| 141 | * Note that the end marker is aligned to sizeof(long) to allow more precise |
| 142 | * size assertion. It also declares a padding at the end to avoid next field |
| 143 | * falling into this cacheline. |
| 144 | */ |
| 145 | #define __cacheline_group_end_aligned(GROUP, ...) \ |
| 146 | __cacheline_group_end(GROUP) __aligned(sizeof(long)); \ |
| 147 | struct { } __cacheline_group_pad__##GROUP \ |
| 148 | __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) |
| 149 | |
| 150 | #ifndef CACHELINE_ASSERT_GROUP_MEMBER |
| 151 | #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ |
| 152 | BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ |
| 153 | offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \ |
| 154 | offsetofend(TYPE, MEMBER) <= \ |
| 155 | offsetof(TYPE, __cacheline_group_end__##GROUP))) |
| 156 | #endif |
| 157 | |
| 158 | #ifndef CACHELINE_ASSERT_GROUP_SIZE |
| 159 | #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \ |
| 160 | BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \ |
| 161 | offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \ |
| 162 | SIZE) |
| 163 | #endif |
| 164 | |
| 165 | /* |
| 166 | * Helper to add padding within a struct to ensure data fall into separate |
| 167 | * cachelines. |
| 168 | */ |
| 169 | #if defined(CONFIG_SMP) |
| 170 | struct cacheline_padding { |
| 171 | char x[0]; |
| 172 | } ____cacheline_internodealigned_in_smp; |
| 173 | #define CACHELINE_PADDING(name) struct cacheline_padding name |
| 174 | #else |
| 175 | #define CACHELINE_PADDING(name) |
| 176 | #endif |
| 177 | |
| 178 | #ifdef ARCH_DMA_MINALIGN |
| 179 | #define ARCH_HAS_DMA_MINALIGN |
| 180 | #else |
| 181 | #define ARCH_DMA_MINALIGN __alignof__(unsigned long long) |
| 182 | #endif |
| 183 | |
| 184 | #endif /* __LINUX_CACHE_H */ |
| 185 | |