| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #define pr_fmt(fmt) "rethook: " fmt |
| 4 | |
| 5 | #include <linux/bug.h> |
| 6 | #include <linux/kallsyms.h> |
| 7 | #include <linux/kprobes.h> |
| 8 | #include <linux/preempt.h> |
| 9 | #include <linux/rethook.h> |
| 10 | #include <linux/slab.h> |
| 11 | |
| 12 | /* Return hook list (shadow stack by list) */ |
| 13 | |
| 14 | /* |
| 15 | * This function is called from delayed_put_task_struct() when a task is |
| 16 | * dead and cleaned up to recycle any kretprobe instances associated with |
| 17 | * this task. These left over instances represent probed functions that |
| 18 | * have been called but will never return. |
| 19 | */ |
| 20 | void rethook_flush_task(struct task_struct *tk) |
| 21 | { |
| 22 | struct rethook_node *rhn; |
| 23 | struct llist_node *node; |
| 24 | |
| 25 | node = __llist_del_all(head: &tk->rethooks); |
| 26 | while (node) { |
| 27 | rhn = container_of(node, struct rethook_node, llist); |
| 28 | node = node->next; |
| 29 | preempt_disable(); |
| 30 | rethook_recycle(node: rhn); |
| 31 | preempt_enable(); |
| 32 | } |
| 33 | } |
| 34 | |
| 35 | static void rethook_free_rcu(struct rcu_head *head) |
| 36 | { |
| 37 | struct rethook *rh = container_of(head, struct rethook, rcu); |
| 38 | objpool_fini(pool: &rh->pool); |
| 39 | } |
| 40 | |
| 41 | /** |
| 42 | * rethook_stop() - Stop using a rethook. |
| 43 | * @rh: the struct rethook to stop. |
| 44 | * |
| 45 | * Stop using a rethook to prepare for freeing it. If you want to wait for |
| 46 | * all running rethook handler before calling rethook_free(), you need to |
| 47 | * call this first and wait RCU, and call rethook_free(). |
| 48 | */ |
| 49 | void rethook_stop(struct rethook *rh) |
| 50 | { |
| 51 | rcu_assign_pointer(rh->handler, NULL); |
| 52 | } |
| 53 | |
| 54 | /** |
| 55 | * rethook_free() - Free struct rethook. |
| 56 | * @rh: the struct rethook to be freed. |
| 57 | * |
| 58 | * Free the rethook. Before calling this function, user must ensure the |
| 59 | * @rh::data is cleaned if needed (or, the handler can access it after |
| 60 | * calling this function.) This function will set the @rh to be freed |
| 61 | * after all rethook_node are freed (not soon). And the caller must |
| 62 | * not touch @rh after calling this. |
| 63 | */ |
| 64 | void rethook_free(struct rethook *rh) |
| 65 | { |
| 66 | rethook_stop(rh); |
| 67 | |
| 68 | call_rcu(head: &rh->rcu, func: rethook_free_rcu); |
| 69 | } |
| 70 | |
| 71 | static int rethook_init_node(void *nod, void *context) |
| 72 | { |
| 73 | struct rethook_node *node = nod; |
| 74 | |
| 75 | node->rethook = context; |
| 76 | return 0; |
| 77 | } |
| 78 | |
| 79 | static int rethook_fini_pool(struct objpool_head *head, void *context) |
| 80 | { |
| 81 | kfree(objp: context); |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | static inline rethook_handler_t rethook_get_handler(struct rethook *rh) |
| 86 | { |
| 87 | return (rethook_handler_t)rcu_dereference_check(rh->handler, |
| 88 | rcu_read_lock_any_held()); |
| 89 | } |
| 90 | |
| 91 | /** |
| 92 | * rethook_alloc() - Allocate struct rethook. |
| 93 | * @data: a data to pass the @handler when hooking the return. |
| 94 | * @handler: the return hook callback function, must NOT be NULL |
| 95 | * @size: node size: rethook node and additional data |
| 96 | * @num: number of rethook nodes to be preallocated |
| 97 | * |
| 98 | * Allocate and initialize a new rethook with @data and @handler. |
| 99 | * Return pointer of new rethook, or error codes for failures. |
| 100 | * |
| 101 | * Note that @handler == NULL means this rethook is going to be freed. |
| 102 | */ |
| 103 | struct rethook *rethook_alloc(void *data, rethook_handler_t handler, |
| 104 | int size, int num) |
| 105 | { |
| 106 | struct rethook *rh; |
| 107 | |
| 108 | if (!handler || num <= 0 || size < sizeof(struct rethook_node)) |
| 109 | return ERR_PTR(error: -EINVAL); |
| 110 | |
| 111 | rh = kzalloc(sizeof(struct rethook), GFP_KERNEL); |
| 112 | if (!rh) |
| 113 | return ERR_PTR(error: -ENOMEM); |
| 114 | |
| 115 | rh->data = data; |
| 116 | rcu_assign_pointer(rh->handler, handler); |
| 117 | |
| 118 | /* initialize the objpool for rethook nodes */ |
| 119 | if (objpool_init(pool: &rh->pool, nr_objs: num, object_size: size, GFP_KERNEL, context: rh, |
| 120 | objinit: rethook_init_node, release: rethook_fini_pool)) { |
| 121 | kfree(objp: rh); |
| 122 | return ERR_PTR(error: -ENOMEM); |
| 123 | } |
| 124 | return rh; |
| 125 | } |
| 126 | |
| 127 | static void free_rethook_node_rcu(struct rcu_head *head) |
| 128 | { |
| 129 | struct rethook_node *node = container_of(head, struct rethook_node, rcu); |
| 130 | struct rethook *rh = node->rethook; |
| 131 | |
| 132 | objpool_drop(obj: node, pool: &rh->pool); |
| 133 | } |
| 134 | |
| 135 | /** |
| 136 | * rethook_recycle() - return the node to rethook. |
| 137 | * @node: The struct rethook_node to be returned. |
| 138 | * |
| 139 | * Return back the @node to @node::rethook. If the @node::rethook is already |
| 140 | * marked as freed, this will free the @node. |
| 141 | */ |
| 142 | void rethook_recycle(struct rethook_node *node) |
| 143 | { |
| 144 | rethook_handler_t handler; |
| 145 | |
| 146 | handler = rethook_get_handler(rh: node->rethook); |
| 147 | if (likely(handler)) |
| 148 | objpool_push(obj: node, pool: &node->rethook->pool); |
| 149 | else |
| 150 | call_rcu(head: &node->rcu, func: free_rethook_node_rcu); |
| 151 | } |
| 152 | NOKPROBE_SYMBOL(rethook_recycle); |
| 153 | |
| 154 | /** |
| 155 | * rethook_try_get() - get an unused rethook node. |
| 156 | * @rh: The struct rethook which pools the nodes. |
| 157 | * |
| 158 | * Get an unused rethook node from @rh. If the node pool is empty, this |
| 159 | * will return NULL. Caller must disable preemption. |
| 160 | */ |
| 161 | struct rethook_node *rethook_try_get(struct rethook *rh) |
| 162 | { |
| 163 | rethook_handler_t handler = rethook_get_handler(rh); |
| 164 | |
| 165 | /* Check whether @rh is going to be freed. */ |
| 166 | if (unlikely(!handler)) |
| 167 | return NULL; |
| 168 | |
| 169 | #if defined(CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING) || defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE) |
| 170 | /* |
| 171 | * This expects the caller will set up a rethook on a function entry. |
| 172 | * When the function returns, the rethook will eventually be reclaimed |
| 173 | * or released in the rethook_recycle() with call_rcu(). |
| 174 | * This means the caller must be run in the RCU-availabe context. |
| 175 | */ |
| 176 | if (unlikely(!rcu_is_watching())) |
| 177 | return NULL; |
| 178 | #endif |
| 179 | |
| 180 | return (struct rethook_node *)objpool_pop(pool: &rh->pool); |
| 181 | } |
| 182 | NOKPROBE_SYMBOL(rethook_try_get); |
| 183 | |
| 184 | /** |
| 185 | * rethook_hook() - Hook the current function return. |
| 186 | * @node: The struct rethook node to hook the function return. |
| 187 | * @regs: The struct pt_regs for the function entry. |
| 188 | * @mcount: True if this is called from mcount(ftrace) context. |
| 189 | * |
| 190 | * Hook the current running function return. This must be called when the |
| 191 | * function entry (or at least @regs must be the registers of the function |
| 192 | * entry.) @mcount is used for identifying the context. If this is called |
| 193 | * from ftrace (mcount) callback, @mcount must be set true. If this is called |
| 194 | * from the real function entry (e.g. kprobes) @mcount must be set false. |
| 195 | * This is because the way to hook the function return depends on the context. |
| 196 | */ |
| 197 | void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) |
| 198 | { |
| 199 | arch_rethook_prepare(node, regs, mcount); |
| 200 | __llist_add(new: &node->llist, head: ¤t->rethooks); |
| 201 | } |
| 202 | NOKPROBE_SYMBOL(rethook_hook); |
| 203 | |
| 204 | /* This assumes the 'tsk' is the current task or is not running. */ |
| 205 | static unsigned long __rethook_find_ret_addr(struct task_struct *tsk, |
| 206 | struct llist_node **cur) |
| 207 | { |
| 208 | struct rethook_node *rh = NULL; |
| 209 | struct llist_node *node = *cur; |
| 210 | |
| 211 | if (!node) |
| 212 | node = tsk->rethooks.first; |
| 213 | else |
| 214 | node = node->next; |
| 215 | |
| 216 | while (node) { |
| 217 | rh = container_of(node, struct rethook_node, llist); |
| 218 | if (rh->ret_addr != (unsigned long)arch_rethook_trampoline) { |
| 219 | *cur = node; |
| 220 | return rh->ret_addr; |
| 221 | } |
| 222 | node = node->next; |
| 223 | } |
| 224 | return 0; |
| 225 | } |
| 226 | NOKPROBE_SYMBOL(__rethook_find_ret_addr); |
| 227 | |
| 228 | /** |
| 229 | * rethook_find_ret_addr -- Find correct return address modified by rethook |
| 230 | * @tsk: Target task |
| 231 | * @frame: A frame pointer |
| 232 | * @cur: a storage of the loop cursor llist_node pointer for next call |
| 233 | * |
| 234 | * Find the correct return address modified by a rethook on @tsk in unsigned |
| 235 | * long type. |
| 236 | * The @tsk must be 'current' or a task which is not running. @frame is a hint |
| 237 | * to get the currect return address - which is compared with the |
| 238 | * rethook::frame field. The @cur is a loop cursor for searching the |
| 239 | * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the |
| 240 | * first call, but '@cur' itself must NOT NULL. |
| 241 | * |
| 242 | * Returns found address value or zero if not found. |
| 243 | */ |
| 244 | unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, |
| 245 | struct llist_node **cur) |
| 246 | { |
| 247 | struct rethook_node *rhn = NULL; |
| 248 | unsigned long ret; |
| 249 | |
| 250 | if (WARN_ON_ONCE(!cur)) |
| 251 | return 0; |
| 252 | |
| 253 | if (tsk != current && task_is_running(tsk)) |
| 254 | return 0; |
| 255 | |
| 256 | do { |
| 257 | ret = __rethook_find_ret_addr(tsk, cur); |
| 258 | if (!ret) |
| 259 | break; |
| 260 | rhn = container_of(*cur, struct rethook_node, llist); |
| 261 | } while (rhn->frame != frame); |
| 262 | |
| 263 | return ret; |
| 264 | } |
| 265 | NOKPROBE_SYMBOL(rethook_find_ret_addr); |
| 266 | |
| 267 | void __weak arch_rethook_fixup_return(struct pt_regs *regs, |
| 268 | unsigned long correct_ret_addr) |
| 269 | { |
| 270 | /* |
| 271 | * Do nothing by default. If the architecture which uses a |
| 272 | * frame pointer to record real return address on the stack, |
| 273 | * it should fill this function to fixup the return address |
| 274 | * so that stacktrace works from the rethook handler. |
| 275 | */ |
| 276 | } |
| 277 | |
| 278 | /* This function will be called from each arch-defined trampoline. */ |
| 279 | unsigned long rethook_trampoline_handler(struct pt_regs *regs, |
| 280 | unsigned long frame) |
| 281 | { |
| 282 | struct llist_node *first, *node = NULL; |
| 283 | unsigned long correct_ret_addr; |
| 284 | rethook_handler_t handler; |
| 285 | struct rethook_node *rhn; |
| 286 | |
| 287 | correct_ret_addr = __rethook_find_ret_addr(current, cur: &node); |
| 288 | if (!correct_ret_addr) { |
| 289 | pr_err("rethook: Return address not found! Maybe there is a bug in the kernel\n" ); |
| 290 | BUG_ON(1); |
| 291 | } |
| 292 | |
| 293 | instruction_pointer_set(regs, val: correct_ret_addr); |
| 294 | |
| 295 | /* |
| 296 | * These loops must be protected from rethook_free_rcu() because those |
| 297 | * are accessing 'rhn->rethook'. |
| 298 | */ |
| 299 | preempt_disable_notrace(); |
| 300 | |
| 301 | /* |
| 302 | * Run the handler on the shadow stack. Do not unlink the list here because |
| 303 | * stackdump inside the handlers needs to decode it. |
| 304 | */ |
| 305 | first = current->rethooks.first; |
| 306 | while (first) { |
| 307 | rhn = container_of(first, struct rethook_node, llist); |
| 308 | if (WARN_ON_ONCE(rhn->frame != frame)) |
| 309 | break; |
| 310 | handler = rethook_get_handler(rh: rhn->rethook); |
| 311 | if (handler) |
| 312 | handler(rhn, rhn->rethook->data, |
| 313 | correct_ret_addr, regs); |
| 314 | |
| 315 | if (first == node) |
| 316 | break; |
| 317 | first = first->next; |
| 318 | } |
| 319 | |
| 320 | /* Fixup registers for returning to correct address. */ |
| 321 | arch_rethook_fixup_return(regs, correct_ret_addr); |
| 322 | |
| 323 | /* Unlink used shadow stack */ |
| 324 | first = current->rethooks.first; |
| 325 | current->rethooks.first = node->next; |
| 326 | node->next = NULL; |
| 327 | |
| 328 | while (first) { |
| 329 | rhn = container_of(first, struct rethook_node, llist); |
| 330 | first = first->next; |
| 331 | rethook_recycle(node: rhn); |
| 332 | } |
| 333 | preempt_enable_notrace(); |
| 334 | |
| 335 | return correct_ret_addr; |
| 336 | } |
| 337 | NOKPROBE_SYMBOL(rethook_trampoline_handler); |
| 338 | |