| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/seq_file.h> |
| 4 | #include <linux/kallsyms.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/ftrace.h> |
| 7 | #include <linux/fs.h> |
| 8 | |
| 9 | #include "trace_output.h" |
| 10 | |
| 11 | struct recursed_functions { |
| 12 | unsigned long ip; |
| 13 | unsigned long parent_ip; |
| 14 | }; |
| 15 | |
| 16 | static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE]; |
| 17 | static atomic_t nr_records; |
| 18 | |
| 19 | /* |
| 20 | * Cache the last found function. Yes, updates to this is racey, but |
| 21 | * so is memory cache ;-) |
| 22 | */ |
| 23 | static unsigned long cached_function; |
| 24 | |
| 25 | void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip) |
| 26 | { |
| 27 | int index = 0; |
| 28 | int i; |
| 29 | unsigned long old; |
| 30 | |
| 31 | again: |
| 32 | /* First check the last one recorded */ |
| 33 | if (ip == cached_function) |
| 34 | return; |
| 35 | |
| 36 | i = atomic_read(v: &nr_records); |
| 37 | /* nr_records is -1 when clearing records */ |
| 38 | smp_mb__after_atomic(); |
| 39 | if (i < 0) |
| 40 | return; |
| 41 | |
| 42 | /* |
| 43 | * If there's two writers and this writer comes in second, |
| 44 | * the cmpxchg() below to update the ip will fail. Then this |
| 45 | * writer will try again. It is possible that index will now |
| 46 | * be greater than nr_records. This is because the writer |
| 47 | * that succeeded has not updated the nr_records yet. |
| 48 | * This writer could keep trying again until the other writer |
| 49 | * updates nr_records. But if the other writer takes an |
| 50 | * interrupt, and that interrupt locks up that CPU, we do |
| 51 | * not want this CPU to lock up due to the recursion protection, |
| 52 | * and have a bug report showing this CPU as the cause of |
| 53 | * locking up the computer. To not lose this record, this |
| 54 | * writer will simply use the next position to update the |
| 55 | * recursed_functions, and it will update the nr_records |
| 56 | * accordingly. |
| 57 | */ |
| 58 | if (index < i) |
| 59 | index = i; |
| 60 | if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE) |
| 61 | return; |
| 62 | |
| 63 | for (i = index - 1; i >= 0; i--) { |
| 64 | if (recursed_functions[i].ip == ip) { |
| 65 | cached_function = ip; |
| 66 | return; |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | cached_function = ip; |
| 71 | |
| 72 | /* |
| 73 | * We only want to add a function if it hasn't been added before. |
| 74 | * Add to the current location before incrementing the count. |
| 75 | * If it fails to add, then increment the index (save in i) |
| 76 | * and try again. |
| 77 | */ |
| 78 | old = cmpxchg(&recursed_functions[index].ip, 0, ip); |
| 79 | if (old != 0) { |
| 80 | /* Did something else already added this for us? */ |
| 81 | if (old == ip) |
| 82 | return; |
| 83 | /* Try the next location (use i for the next index) */ |
| 84 | index++; |
| 85 | goto again; |
| 86 | } |
| 87 | |
| 88 | recursed_functions[index].parent_ip = parent_ip; |
| 89 | |
| 90 | /* |
| 91 | * It's still possible that we could race with the clearing |
| 92 | * CPU0 CPU1 |
| 93 | * ---- ---- |
| 94 | * ip = func |
| 95 | * nr_records = -1; |
| 96 | * recursed_functions[0] = 0; |
| 97 | * i = -1 |
| 98 | * if (i < 0) |
| 99 | * nr_records = 0; |
| 100 | * (new recursion detected) |
| 101 | * recursed_functions[0] = func |
| 102 | * cmpxchg(recursed_functions[0], |
| 103 | * func, 0) |
| 104 | * |
| 105 | * But the worse that could happen is that we get a zero in |
| 106 | * the recursed_functions array, and it's likely that "func" will |
| 107 | * be recorded again. |
| 108 | */ |
| 109 | i = atomic_read(v: &nr_records); |
| 110 | smp_mb__after_atomic(); |
| 111 | if (i < 0) |
| 112 | cmpxchg(&recursed_functions[index].ip, ip, 0); |
| 113 | else if (i <= index) |
| 114 | atomic_cmpxchg(v: &nr_records, old: i, new: index + 1); |
| 115 | } |
| 116 | EXPORT_SYMBOL_GPL(ftrace_record_recursion); |
| 117 | |
| 118 | static DEFINE_MUTEX(recursed_function_lock); |
| 119 | static struct trace_seq *tseq; |
| 120 | |
| 121 | static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos) |
| 122 | { |
| 123 | void *ret = NULL; |
| 124 | int index; |
| 125 | |
| 126 | mutex_lock(&recursed_function_lock); |
| 127 | index = atomic_read(v: &nr_records); |
| 128 | if (*pos < index) { |
| 129 | ret = &recursed_functions[*pos]; |
| 130 | } |
| 131 | |
| 132 | tseq = kzalloc(sizeof(*tseq), GFP_KERNEL); |
| 133 | if (!tseq) |
| 134 | return ERR_PTR(error: -ENOMEM); |
| 135 | |
| 136 | trace_seq_init(s: tseq); |
| 137 | |
| 138 | return ret; |
| 139 | } |
| 140 | |
| 141 | static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos) |
| 142 | { |
| 143 | int index; |
| 144 | int p; |
| 145 | |
| 146 | index = atomic_read(v: &nr_records); |
| 147 | p = ++(*pos); |
| 148 | |
| 149 | return p < index ? &recursed_functions[p] : NULL; |
| 150 | } |
| 151 | |
| 152 | static void recursed_function_seq_stop(struct seq_file *m, void *v) |
| 153 | { |
| 154 | kfree(objp: tseq); |
| 155 | mutex_unlock(lock: &recursed_function_lock); |
| 156 | } |
| 157 | |
| 158 | static int recursed_function_seq_show(struct seq_file *m, void *v) |
| 159 | { |
| 160 | struct recursed_functions *record = v; |
| 161 | int ret = 0; |
| 162 | |
| 163 | if (record) { |
| 164 | trace_seq_print_sym(s: tseq, address: record->parent_ip, offset: true); |
| 165 | trace_seq_puts(s: tseq, str: ":\t" ); |
| 166 | trace_seq_print_sym(s: tseq, address: record->ip, offset: true); |
| 167 | trace_seq_putc(s: tseq, c: '\n'); |
| 168 | ret = trace_print_seq(m, s: tseq); |
| 169 | } |
| 170 | |
| 171 | return ret; |
| 172 | } |
| 173 | |
| 174 | static const struct seq_operations recursed_function_seq_ops = { |
| 175 | .start = recursed_function_seq_start, |
| 176 | .next = recursed_function_seq_next, |
| 177 | .stop = recursed_function_seq_stop, |
| 178 | .show = recursed_function_seq_show |
| 179 | }; |
| 180 | |
| 181 | static int recursed_function_open(struct inode *inode, struct file *file) |
| 182 | { |
| 183 | int ret = 0; |
| 184 | |
| 185 | mutex_lock(&recursed_function_lock); |
| 186 | /* If this file was opened for write, then erase contents */ |
| 187 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
| 188 | /* disable updating records */ |
| 189 | atomic_set(v: &nr_records, i: -1); |
| 190 | smp_mb__after_atomic(); |
| 191 | memset(recursed_functions, 0, sizeof(recursed_functions)); |
| 192 | smp_wmb(); |
| 193 | /* enable them again */ |
| 194 | atomic_set(v: &nr_records, i: 0); |
| 195 | } |
| 196 | if (file->f_mode & FMODE_READ) |
| 197 | ret = seq_open(file, &recursed_function_seq_ops); |
| 198 | mutex_unlock(lock: &recursed_function_lock); |
| 199 | |
| 200 | return ret; |
| 201 | } |
| 202 | |
| 203 | static ssize_t recursed_function_write(struct file *file, |
| 204 | const char __user *buffer, |
| 205 | size_t count, loff_t *ppos) |
| 206 | { |
| 207 | return count; |
| 208 | } |
| 209 | |
| 210 | static int recursed_function_release(struct inode *inode, struct file *file) |
| 211 | { |
| 212 | if (file->f_mode & FMODE_READ) |
| 213 | seq_release(inode, file); |
| 214 | return 0; |
| 215 | } |
| 216 | |
| 217 | static const struct file_operations recursed_functions_fops = { |
| 218 | .open = recursed_function_open, |
| 219 | .write = recursed_function_write, |
| 220 | .read = seq_read, |
| 221 | .llseek = seq_lseek, |
| 222 | .release = recursed_function_release, |
| 223 | }; |
| 224 | |
| 225 | __init static int create_recursed_functions(void) |
| 226 | { |
| 227 | |
| 228 | trace_create_file(name: "recursed_functions" , TRACE_MODE_WRITE, |
| 229 | NULL, NULL, fops: &recursed_functions_fops); |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | fs_initcall(create_recursed_functions); |
| 234 | |