| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 3 | */ |
| 4 | #ifndef _LINUX_BPF_VERIFIER_H |
| 5 | #define _LINUX_BPF_VERIFIER_H 1 |
| 6 | |
| 7 | #include <linux/bpf.h> /* for enum bpf_reg_type */ |
| 8 | #include <linux/btf.h> /* for struct btf and btf_id() */ |
| 9 | #include <linux/filter.h> /* for MAX_BPF_STACK */ |
| 10 | #include <linux/tnum.h> |
| 11 | |
| 12 | /* Maximum variable offset umax_value permitted when resolving memory accesses. |
| 13 | * In practice this is far bigger than any realistic pointer offset; this limit |
| 14 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. |
| 15 | */ |
| 16 | #define BPF_MAX_VAR_OFF (1 << 29) |
| 17 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures |
| 18 | * that converting umax_value to int cannot overflow. |
| 19 | */ |
| 20 | #define BPF_MAX_VAR_SIZ (1 << 29) |
| 21 | /* size of tmp_str_buf in bpf_verifier. |
| 22 | * we need at least 306 bytes to fit full stack mask representation |
| 23 | * (in the "-8,-16,...,-512" form) |
| 24 | */ |
| 25 | #define TMP_STR_BUF_LEN 320 |
| 26 | /* Patch buffer size */ |
| 27 | #define INSN_BUF_SIZE 32 |
| 28 | |
| 29 | /* Liveness marks, used for registers and spilled-regs (in stack slots). |
| 30 | * Read marks propagate upwards until they find a write mark; they record that |
| 31 | * "one of this state's descendants read this reg" (and therefore the reg is |
| 32 | * relevant for states_equal() checks). |
| 33 | * Write marks collect downwards and do not propagate; they record that "the |
| 34 | * straight-line code that reached this state (from its parent) wrote this reg" |
| 35 | * (and therefore that reads propagated from this state or its descendants |
| 36 | * should not propagate to its parent). |
| 37 | * A state with a write mark can receive read marks; it just won't propagate |
| 38 | * them to its parent, since the write mark is a property, not of the state, |
| 39 | * but of the link between it and its parent. See mark_reg_read() and |
| 40 | * mark_stack_slot_read() in kernel/bpf/verifier.c. |
| 41 | */ |
| 42 | enum bpf_reg_liveness { |
| 43 | REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ |
| 44 | REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ |
| 45 | REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ |
| 46 | REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, |
| 47 | REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ |
| 48 | REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ |
| 49 | }; |
| 50 | |
| 51 | #define ITER_PREFIX "bpf_iter_" |
| 52 | |
| 53 | enum bpf_iter_state { |
| 54 | BPF_ITER_STATE_INVALID, /* for non-first slot */ |
| 55 | BPF_ITER_STATE_ACTIVE, |
| 56 | BPF_ITER_STATE_DRAINED, |
| 57 | }; |
| 58 | |
| 59 | struct bpf_reg_state { |
| 60 | /* Ordering of fields matters. See states_equal() */ |
| 61 | enum bpf_reg_type type; |
| 62 | /* |
| 63 | * Fixed part of pointer offset, pointer types only. |
| 64 | * Or constant delta between "linked" scalars with the same ID. |
| 65 | */ |
| 66 | s32 off; |
| 67 | union { |
| 68 | /* valid when type == PTR_TO_PACKET */ |
| 69 | int range; |
| 70 | |
| 71 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | |
| 72 | * PTR_TO_MAP_VALUE_OR_NULL |
| 73 | */ |
| 74 | struct { |
| 75 | struct bpf_map *map_ptr; |
| 76 | /* To distinguish map lookups from outer map |
| 77 | * the map_uid is non-zero for registers |
| 78 | * pointing to inner maps. |
| 79 | */ |
| 80 | u32 map_uid; |
| 81 | }; |
| 82 | |
| 83 | /* for PTR_TO_BTF_ID */ |
| 84 | struct { |
| 85 | struct btf *btf; |
| 86 | u32 btf_id; |
| 87 | }; |
| 88 | |
| 89 | struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ |
| 90 | u32 mem_size; |
| 91 | u32 dynptr_id; /* for dynptr slices */ |
| 92 | }; |
| 93 | |
| 94 | /* For dynptr stack slots */ |
| 95 | struct { |
| 96 | enum bpf_dynptr_type type; |
| 97 | /* A dynptr is 16 bytes so it takes up 2 stack slots. |
| 98 | * We need to track which slot is the first slot |
| 99 | * to protect against cases where the user may try to |
| 100 | * pass in an address starting at the second slot of the |
| 101 | * dynptr. |
| 102 | */ |
| 103 | bool first_slot; |
| 104 | } dynptr; |
| 105 | |
| 106 | /* For bpf_iter stack slots */ |
| 107 | struct { |
| 108 | /* BTF container and BTF type ID describing |
| 109 | * struct bpf_iter_<type> of an iterator state |
| 110 | */ |
| 111 | struct btf *btf; |
| 112 | u32 btf_id; |
| 113 | /* packing following two fields to fit iter state into 16 bytes */ |
| 114 | enum bpf_iter_state state:2; |
| 115 | int depth:30; |
| 116 | } iter; |
| 117 | |
| 118 | /* For irq stack slots */ |
| 119 | struct { |
| 120 | enum { |
| 121 | IRQ_NATIVE_KFUNC, |
| 122 | IRQ_LOCK_KFUNC, |
| 123 | } kfunc_class; |
| 124 | } irq; |
| 125 | |
| 126 | /* Max size from any of the above. */ |
| 127 | struct { |
| 128 | unsigned long raw1; |
| 129 | unsigned long raw2; |
| 130 | } raw; |
| 131 | |
| 132 | u32 subprogno; /* for PTR_TO_FUNC */ |
| 133 | }; |
| 134 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
| 135 | * the actual value. |
| 136 | * For pointer types, this represents the variable part of the offset |
| 137 | * from the pointed-to object, and is shared with all bpf_reg_states |
| 138 | * with the same id as us. |
| 139 | */ |
| 140 | struct tnum var_off; |
| 141 | /* Used to determine if any memory access using this register will |
| 142 | * result in a bad access. |
| 143 | * These refer to the same value as var_off, not necessarily the actual |
| 144 | * contents of the register. |
| 145 | */ |
| 146 | s64 smin_value; /* minimum possible (s64)value */ |
| 147 | s64 smax_value; /* maximum possible (s64)value */ |
| 148 | u64 umin_value; /* minimum possible (u64)value */ |
| 149 | u64 umax_value; /* maximum possible (u64)value */ |
| 150 | s32 s32_min_value; /* minimum possible (s32)value */ |
| 151 | s32 s32_max_value; /* maximum possible (s32)value */ |
| 152 | u32 u32_min_value; /* minimum possible (u32)value */ |
| 153 | u32 u32_max_value; /* maximum possible (u32)value */ |
| 154 | /* For PTR_TO_PACKET, used to find other pointers with the same variable |
| 155 | * offset, so they can share range knowledge. |
| 156 | * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we |
| 157 | * came from, when one is tested for != NULL. |
| 158 | * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation |
| 159 | * for the purpose of tracking that it's freed. |
| 160 | * For PTR_TO_SOCKET this is used to share which pointers retain the |
| 161 | * same reference to the socket, to determine proper reference freeing. |
| 162 | * For stack slots that are dynptrs, this is used to track references to |
| 163 | * the dynptr to determine proper reference freeing. |
| 164 | * Similarly to dynptrs, we use ID to track "belonging" of a reference |
| 165 | * to a specific instance of bpf_iter. |
| 166 | */ |
| 167 | /* |
| 168 | * Upper bit of ID is used to remember relationship between "linked" |
| 169 | * registers. Example: |
| 170 | * r1 = r2; both will have r1->id == r2->id == N |
| 171 | * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10 |
| 172 | */ |
| 173 | #define BPF_ADD_CONST (1U << 31) |
| 174 | u32 id; |
| 175 | /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned |
| 176 | * from a pointer-cast helper, bpf_sk_fullsock() and |
| 177 | * bpf_tcp_sock(). |
| 178 | * |
| 179 | * Consider the following where "sk" is a reference counted |
| 180 | * pointer returned from "sk = bpf_sk_lookup_tcp();": |
| 181 | * |
| 182 | * 1: sk = bpf_sk_lookup_tcp(); |
| 183 | * 2: if (!sk) { return 0; } |
| 184 | * 3: fullsock = bpf_sk_fullsock(sk); |
| 185 | * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } |
| 186 | * 5: tp = bpf_tcp_sock(fullsock); |
| 187 | * 6: if (!tp) { bpf_sk_release(sk); return 0; } |
| 188 | * 7: bpf_sk_release(sk); |
| 189 | * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain |
| 190 | * |
| 191 | * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and |
| 192 | * "tp" ptr should be invalidated also. In order to do that, |
| 193 | * the reg holding "fullsock" and "sk" need to remember |
| 194 | * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id |
| 195 | * such that the verifier can reset all regs which have |
| 196 | * ref_obj_id matching the sk_reg->id. |
| 197 | * |
| 198 | * sk_reg->ref_obj_id is set to sk_reg->id at line 1. |
| 199 | * sk_reg->id will stay as NULL-marking purpose only. |
| 200 | * After NULL-marking is done, sk_reg->id can be reset to 0. |
| 201 | * |
| 202 | * After "fullsock = bpf_sk_fullsock(sk);" at line 3, |
| 203 | * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. |
| 204 | * |
| 205 | * After "tp = bpf_tcp_sock(fullsock);" at line 5, |
| 206 | * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id |
| 207 | * which is the same as sk_reg->ref_obj_id. |
| 208 | * |
| 209 | * From the verifier perspective, if sk, fullsock and tp |
| 210 | * are not NULL, they are the same ptr with different |
| 211 | * reg->type. In particular, bpf_sk_release(tp) is also |
| 212 | * allowed and has the same effect as bpf_sk_release(sk). |
| 213 | */ |
| 214 | u32 ref_obj_id; |
| 215 | /* parentage chain for liveness checking */ |
| 216 | struct bpf_reg_state *parent; |
| 217 | /* Inside the callee two registers can be both PTR_TO_STACK like |
| 218 | * R1=fp-8 and R2=fp-8, but one of them points to this function stack |
| 219 | * while another to the caller's stack. To differentiate them 'frameno' |
| 220 | * is used which is an index in bpf_verifier_state->frame[] array |
| 221 | * pointing to bpf_func_state. |
| 222 | */ |
| 223 | u32 frameno; |
| 224 | /* Tracks subreg definition. The stored value is the insn_idx of the |
| 225 | * writing insn. This is safe because subreg_def is used before any insn |
| 226 | * patching which only happens after main verification finished. |
| 227 | */ |
| 228 | s32 subreg_def; |
| 229 | enum bpf_reg_liveness live; |
| 230 | /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ |
| 231 | bool precise; |
| 232 | }; |
| 233 | |
| 234 | enum bpf_stack_slot_type { |
| 235 | STACK_INVALID, /* nothing was stored in this stack slot */ |
| 236 | STACK_SPILL, /* register spilled into stack */ |
| 237 | STACK_MISC, /* BPF program wrote some data into this slot */ |
| 238 | STACK_ZERO, /* BPF program wrote constant zero */ |
| 239 | /* A dynptr is stored in this stack slot. The type of dynptr |
| 240 | * is stored in bpf_stack_state->spilled_ptr.dynptr.type |
| 241 | */ |
| 242 | STACK_DYNPTR, |
| 243 | STACK_ITER, |
| 244 | STACK_IRQ_FLAG, |
| 245 | }; |
| 246 | |
| 247 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ |
| 248 | |
| 249 | #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \ |
| 250 | (1 << BPF_REG_3) | (1 << BPF_REG_4) | \ |
| 251 | (1 << BPF_REG_5)) |
| 252 | |
| 253 | #define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern) |
| 254 | #define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE) |
| 255 | |
| 256 | struct bpf_stack_state { |
| 257 | struct bpf_reg_state spilled_ptr; |
| 258 | u8 slot_type[BPF_REG_SIZE]; |
| 259 | }; |
| 260 | |
| 261 | struct bpf_reference_state { |
| 262 | /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to |
| 263 | * default to pointer reference on zero initialization of a state. |
| 264 | */ |
| 265 | enum ref_state_type { |
| 266 | REF_TYPE_PTR = (1 << 1), |
| 267 | REF_TYPE_IRQ = (1 << 2), |
| 268 | REF_TYPE_LOCK = (1 << 3), |
| 269 | REF_TYPE_RES_LOCK = (1 << 4), |
| 270 | REF_TYPE_RES_LOCK_IRQ = (1 << 5), |
| 271 | REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ, |
| 272 | } type; |
| 273 | /* Track each reference created with a unique id, even if the same |
| 274 | * instruction creates the reference multiple times (eg, via CALL). |
| 275 | */ |
| 276 | int id; |
| 277 | /* Instruction where the allocation of this reference occurred. This |
| 278 | * is used purely to inform the user of a reference leak. |
| 279 | */ |
| 280 | int insn_idx; |
| 281 | /* Use to keep track of the source object of a lock, to ensure |
| 282 | * it matches on unlock. |
| 283 | */ |
| 284 | void *ptr; |
| 285 | }; |
| 286 | |
| 287 | struct bpf_retval_range { |
| 288 | s32 minval; |
| 289 | s32 maxval; |
| 290 | }; |
| 291 | |
| 292 | /* state of the program: |
| 293 | * type of all registers and stack info |
| 294 | */ |
| 295 | struct bpf_func_state { |
| 296 | struct bpf_reg_state regs[MAX_BPF_REG]; |
| 297 | /* index of call instruction that called into this func */ |
| 298 | int callsite; |
| 299 | /* stack frame number of this function state from pov of |
| 300 | * enclosing bpf_verifier_state. |
| 301 | * 0 = main function, 1 = first callee. |
| 302 | */ |
| 303 | u32 frameno; |
| 304 | /* subprog number == index within subprog_info |
| 305 | * zero == main subprog |
| 306 | */ |
| 307 | u32 subprogno; |
| 308 | /* Every bpf_timer_start will increment async_entry_cnt. |
| 309 | * It's used to distinguish: |
| 310 | * void foo(void) { for(;;); } |
| 311 | * void foo(void) { bpf_timer_set_callback(,foo); } |
| 312 | */ |
| 313 | u32 async_entry_cnt; |
| 314 | struct bpf_retval_range callback_ret_range; |
| 315 | bool in_callback_fn; |
| 316 | bool in_async_callback_fn; |
| 317 | bool in_exception_callback_fn; |
| 318 | /* For callback calling functions that limit number of possible |
| 319 | * callback executions (e.g. bpf_loop) keeps track of current |
| 320 | * simulated iteration number. |
| 321 | * Value in frame N refers to number of times callback with frame |
| 322 | * N+1 was simulated, e.g. for the following call: |
| 323 | * |
| 324 | * bpf_loop(..., fn, ...); | suppose current frame is N |
| 325 | * | fn would be simulated in frame N+1 |
| 326 | * | number of simulations is tracked in frame N |
| 327 | */ |
| 328 | u32 callback_depth; |
| 329 | |
| 330 | /* The following fields should be last. See copy_func_state() */ |
| 331 | /* The state of the stack. Each element of the array describes BPF_REG_SIZE |
| 332 | * (i.e. 8) bytes worth of stack memory. |
| 333 | * stack[0] represents bytes [*(r10-8)..*(r10-1)] |
| 334 | * stack[1] represents bytes [*(r10-16)..*(r10-9)] |
| 335 | * ... |
| 336 | * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)] |
| 337 | */ |
| 338 | struct bpf_stack_state *stack; |
| 339 | /* Size of the current stack, in bytes. The stack state is tracked below, in |
| 340 | * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE. |
| 341 | */ |
| 342 | int allocated_stack; |
| 343 | }; |
| 344 | |
| 345 | #define MAX_CALL_FRAMES 8 |
| 346 | |
| 347 | /* instruction history flags, used in bpf_insn_hist_entry.flags field */ |
| 348 | enum { |
| 349 | /* instruction references stack slot through PTR_TO_STACK register; |
| 350 | * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) |
| 351 | * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512, |
| 352 | * 8 bytes per slot, so slot index (spi) is [0, 63]) |
| 353 | */ |
| 354 | INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */ |
| 355 | |
| 356 | INSN_F_SPI_MASK = 0x3f, /* 6 bits */ |
| 357 | INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ |
| 358 | |
| 359 | INSN_F_STACK_ACCESS = BIT(9), |
| 360 | |
| 361 | INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */ |
| 362 | INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */ |
| 363 | /* total 12 bits are used now. */ |
| 364 | }; |
| 365 | |
| 366 | static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); |
| 367 | static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); |
| 368 | |
| 369 | struct bpf_insn_hist_entry { |
| 370 | u32 idx; |
| 371 | /* insn idx can't be bigger than 1 million */ |
| 372 | u32 prev_idx : 20; |
| 373 | /* special INSN_F_xxx flags */ |
| 374 | u32 flags : 12; |
| 375 | /* additional registers that need precision tracking when this |
| 376 | * jump is backtracked, vector of six 10-bit records |
| 377 | */ |
| 378 | u64 linked_regs; |
| 379 | }; |
| 380 | |
| 381 | /* Maximum number of register states that can exist at once */ |
| 382 | #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES) |
| 383 | struct bpf_verifier_state { |
| 384 | /* call stack tracking */ |
| 385 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; |
| 386 | struct bpf_verifier_state *parent; |
| 387 | /* Acquired reference states */ |
| 388 | struct bpf_reference_state *refs; |
| 389 | /* |
| 390 | * 'branches' field is the number of branches left to explore: |
| 391 | * 0 - all possible paths from this state reached bpf_exit or |
| 392 | * were safely pruned |
| 393 | * 1 - at least one path is being explored. |
| 394 | * This state hasn't reached bpf_exit |
| 395 | * 2 - at least two paths are being explored. |
| 396 | * This state is an immediate parent of two children. |
| 397 | * One is fallthrough branch with branches==1 and another |
| 398 | * state is pushed into stack (to be explored later) also with |
| 399 | * branches==1. The parent of this state has branches==1. |
| 400 | * The verifier state tree connected via 'parent' pointer looks like: |
| 401 | * 1 |
| 402 | * 1 |
| 403 | * 2 -> 1 (first 'if' pushed into stack) |
| 404 | * 1 |
| 405 | * 2 -> 1 (second 'if' pushed into stack) |
| 406 | * 1 |
| 407 | * 1 |
| 408 | * 1 bpf_exit. |
| 409 | * |
| 410 | * Once do_check() reaches bpf_exit, it calls update_branch_counts() |
| 411 | * and the verifier state tree will look: |
| 412 | * 1 |
| 413 | * 1 |
| 414 | * 2 -> 1 (first 'if' pushed into stack) |
| 415 | * 1 |
| 416 | * 1 -> 1 (second 'if' pushed into stack) |
| 417 | * 0 |
| 418 | * 0 |
| 419 | * 0 bpf_exit. |
| 420 | * After pop_stack() the do_check() will resume at second 'if'. |
| 421 | * |
| 422 | * If is_state_visited() sees a state with branches > 0 it means |
| 423 | * there is a loop. If such state is exactly equal to the current state |
| 424 | * it's an infinite loop. Note states_equal() checks for states |
| 425 | * equivalency, so two states being 'states_equal' does not mean |
| 426 | * infinite loop. The exact comparison is provided by |
| 427 | * states_maybe_looping() function. It's a stronger pre-check and |
| 428 | * much faster than states_equal(). |
| 429 | * |
| 430 | * This algorithm may not find all possible infinite loops or |
| 431 | * loop iteration count may be too high. |
| 432 | * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. |
| 433 | */ |
| 434 | u32 branches; |
| 435 | u32 insn_idx; |
| 436 | u32 curframe; |
| 437 | |
| 438 | u32 acquired_refs; |
| 439 | u32 active_locks; |
| 440 | u32 active_preempt_locks; |
| 441 | u32 active_irq_id; |
| 442 | u32 active_lock_id; |
| 443 | void *active_lock_ptr; |
| 444 | bool active_rcu_lock; |
| 445 | |
| 446 | bool speculative; |
| 447 | bool in_sleepable; |
| 448 | |
| 449 | /* first and last insn idx of this verifier state */ |
| 450 | u32 first_insn_idx; |
| 451 | u32 last_insn_idx; |
| 452 | /* If this state is a part of states loop this field points to some |
| 453 | * parent of this state such that: |
| 454 | * - it is also a member of the same states loop; |
| 455 | * - DFS states traversal starting from initial state visits loop_entry |
| 456 | * state before this state. |
| 457 | * Used to compute topmost loop entry for state loops. |
| 458 | * State loops might appear because of open coded iterators logic. |
| 459 | * See get_loop_entry() for more information. |
| 460 | */ |
| 461 | struct bpf_verifier_state *loop_entry; |
| 462 | /* Sub-range of env->insn_hist[] corresponding to this state's |
| 463 | * instruction history. |
| 464 | * Backtracking is using it to go from last to first. |
| 465 | * For most states instruction history is short, 0-3 instructions. |
| 466 | * For loops can go up to ~40. |
| 467 | */ |
| 468 | u32 insn_hist_start; |
| 469 | u32 insn_hist_end; |
| 470 | u32 dfs_depth; |
| 471 | u32 callback_unroll_depth; |
| 472 | u32 may_goto_depth; |
| 473 | /* If this state was ever pointed-to by other state's loop_entry field |
| 474 | * this flag would be set to true. Used to avoid freeing such states |
| 475 | * while they are still in use. |
| 476 | */ |
| 477 | u32 used_as_loop_entry; |
| 478 | }; |
| 479 | |
| 480 | #define bpf_get_spilled_reg(slot, frame, mask) \ |
| 481 | (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ |
| 482 | ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \ |
| 483 | ? &frame->stack[slot].spilled_ptr : NULL) |
| 484 | |
| 485 | /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ |
| 486 | #define bpf_for_each_spilled_reg(iter, frame, reg, mask) \ |
| 487 | for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \ |
| 488 | iter < frame->allocated_stack / BPF_REG_SIZE; \ |
| 489 | iter++, reg = bpf_get_spilled_reg(iter, frame, mask)) |
| 490 | |
| 491 | #define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \ |
| 492 | ({ \ |
| 493 | struct bpf_verifier_state *___vstate = __vst; \ |
| 494 | int ___i, ___j; \ |
| 495 | for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \ |
| 496 | struct bpf_reg_state *___regs; \ |
| 497 | __state = ___vstate->frame[___i]; \ |
| 498 | ___regs = __state->regs; \ |
| 499 | for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \ |
| 500 | __reg = &___regs[___j]; \ |
| 501 | (void)(__expr); \ |
| 502 | } \ |
| 503 | bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \ |
| 504 | if (!__reg) \ |
| 505 | continue; \ |
| 506 | (void)(__expr); \ |
| 507 | } \ |
| 508 | } \ |
| 509 | }) |
| 510 | |
| 511 | /* Invoke __expr over regsiters in __vst, setting __state and __reg */ |
| 512 | #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ |
| 513 | bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr) |
| 514 | |
| 515 | /* linked list of verifier states used to prune search */ |
| 516 | struct bpf_verifier_state_list { |
| 517 | struct bpf_verifier_state state; |
| 518 | struct list_head node; |
| 519 | u32 miss_cnt; |
| 520 | u32 hit_cnt:31; |
| 521 | u32 in_free_list:1; |
| 522 | }; |
| 523 | |
| 524 | struct bpf_loop_inline_state { |
| 525 | unsigned int initialized:1; /* set to true upon first entry */ |
| 526 | unsigned int fit_for_inline:1; /* true if callback function is the same |
| 527 | * at each call and flags are always zero |
| 528 | */ |
| 529 | u32 callback_subprogno; /* valid when fit_for_inline is true */ |
| 530 | }; |
| 531 | |
| 532 | /* pointer and state for maps */ |
| 533 | struct bpf_map_ptr_state { |
| 534 | struct bpf_map *map_ptr; |
| 535 | bool poison; |
| 536 | bool unpriv; |
| 537 | }; |
| 538 | |
| 539 | /* Possible states for alu_state member. */ |
| 540 | #define BPF_ALU_SANITIZE_SRC (1U << 0) |
| 541 | #define BPF_ALU_SANITIZE_DST (1U << 1) |
| 542 | #define BPF_ALU_NEG_VALUE (1U << 2) |
| 543 | #define BPF_ALU_NON_POINTER (1U << 3) |
| 544 | #define BPF_ALU_IMMEDIATE (1U << 4) |
| 545 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
| 546 | BPF_ALU_SANITIZE_DST) |
| 547 | |
| 548 | struct bpf_insn_aux_data { |
| 549 | union { |
| 550 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ |
| 551 | struct bpf_map_ptr_state map_ptr_state; |
| 552 | s32 call_imm; /* saved imm field of call insn */ |
| 553 | u32 alu_limit; /* limit for add/sub register with pointer */ |
| 554 | struct { |
| 555 | u32 map_index; /* index into used_maps[] */ |
| 556 | u32 map_off; /* offset from value base address */ |
| 557 | }; |
| 558 | struct { |
| 559 | enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ |
| 560 | union { |
| 561 | struct { |
| 562 | struct btf *btf; |
| 563 | u32 btf_id; /* btf_id for struct typed var */ |
| 564 | }; |
| 565 | u32 mem_size; /* mem_size for non-struct typed var */ |
| 566 | }; |
| 567 | } btf_var; |
| 568 | /* if instruction is a call to bpf_loop this field tracks |
| 569 | * the state of the relevant registers to make decision about inlining |
| 570 | */ |
| 571 | struct bpf_loop_inline_state loop_inline_state; |
| 572 | }; |
| 573 | union { |
| 574 | /* remember the size of type passed to bpf_obj_new to rewrite R1 */ |
| 575 | u64 obj_new_size; |
| 576 | /* remember the offset of node field within type to rewrite */ |
| 577 | u64 insert_off; |
| 578 | }; |
| 579 | struct btf_struct_meta *kptr_struct_meta; |
| 580 | u64 map_key_state; /* constant (32 bit) key tracking for maps */ |
| 581 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
| 582 | u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ |
| 583 | bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ |
| 584 | bool zext_dst; /* this insn zero extends dst reg */ |
| 585 | bool needs_zext; /* alu op needs to clear upper bits */ |
| 586 | bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ |
| 587 | bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ |
| 588 | bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ |
| 589 | u8 alu_state; /* used in combination with alu_limit */ |
| 590 | /* true if STX or LDX instruction is a part of a spill/fill |
| 591 | * pattern for a bpf_fastcall call. |
| 592 | */ |
| 593 | u8 fastcall_pattern:1; |
| 594 | /* for CALL instructions, a number of spill/fill pairs in the |
| 595 | * bpf_fastcall pattern. |
| 596 | */ |
| 597 | u8 fastcall_spills_num:3; |
| 598 | u8 arg_prog:4; |
| 599 | |
| 600 | /* below fields are initialized once */ |
| 601 | unsigned int orig_idx; /* original instruction index */ |
| 602 | bool jmp_point; |
| 603 | bool prune_point; |
| 604 | /* ensure we check state equivalence and save state checkpoint and |
| 605 | * this instruction, regardless of any heuristics |
| 606 | */ |
| 607 | bool force_checkpoint; |
| 608 | /* true if instruction is a call to a helper function that |
| 609 | * accepts callback function as a parameter. |
| 610 | */ |
| 611 | bool calls_callback; |
| 612 | /* registers alive before this instruction. */ |
| 613 | u16 live_regs_before; |
| 614 | }; |
| 615 | |
| 616 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
| 617 | #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ |
| 618 | |
| 619 | #define BPF_VERIFIER_TMP_LOG_SIZE 1024 |
| 620 | |
| 621 | struct bpf_verifier_log { |
| 622 | /* Logical start and end positions of a "log window" of the verifier log. |
| 623 | * start_pos == 0 means we haven't truncated anything. |
| 624 | * Once truncation starts to happen, start_pos + len_total == end_pos, |
| 625 | * except during log reset situations, in which (end_pos - start_pos) |
| 626 | * might get smaller than len_total (see bpf_vlog_reset()). |
| 627 | * Generally, (end_pos - start_pos) gives number of useful data in |
| 628 | * user log buffer. |
| 629 | */ |
| 630 | u64 start_pos; |
| 631 | u64 end_pos; |
| 632 | char __user *ubuf; |
| 633 | u32 level; |
| 634 | u32 len_total; |
| 635 | u32 len_max; |
| 636 | char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; |
| 637 | }; |
| 638 | |
| 639 | #define BPF_LOG_LEVEL1 1 |
| 640 | #define BPF_LOG_LEVEL2 2 |
| 641 | #define BPF_LOG_STATS 4 |
| 642 | #define BPF_LOG_FIXED 8 |
| 643 | #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) |
| 644 | #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED) |
| 645 | #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ |
| 646 | #define BPF_LOG_MIN_ALIGNMENT 8U |
| 647 | #define BPF_LOG_ALIGNMENT 40U |
| 648 | |
| 649 | static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) |
| 650 | { |
| 651 | return log && log->level; |
| 652 | } |
| 653 | |
| 654 | #define BPF_MAX_SUBPROGS 256 |
| 655 | |
| 656 | struct bpf_subprog_arg_info { |
| 657 | enum bpf_arg_type arg_type; |
| 658 | union { |
| 659 | u32 mem_size; |
| 660 | u32 btf_id; |
| 661 | }; |
| 662 | }; |
| 663 | |
| 664 | enum priv_stack_mode { |
| 665 | PRIV_STACK_UNKNOWN, |
| 666 | NO_PRIV_STACK, |
| 667 | PRIV_STACK_ADAPTIVE, |
| 668 | }; |
| 669 | |
| 670 | struct bpf_subprog_info { |
| 671 | /* 'start' has to be the first field otherwise find_subprog() won't work */ |
| 672 | u32 start; /* insn idx of function entry point */ |
| 673 | u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ |
| 674 | u16 stack_depth; /* max. stack depth used by this function */ |
| 675 | u16 ; |
| 676 | /* offsets in range [stack_depth .. fastcall_stack_off) |
| 677 | * are used for bpf_fastcall spills and fills. |
| 678 | */ |
| 679 | s16 fastcall_stack_off; |
| 680 | bool has_tail_call: 1; |
| 681 | bool tail_call_reachable: 1; |
| 682 | bool has_ld_abs: 1; |
| 683 | bool is_cb: 1; |
| 684 | bool is_async_cb: 1; |
| 685 | bool is_exception_cb: 1; |
| 686 | bool args_cached: 1; |
| 687 | /* true if bpf_fastcall stack region is used by functions that can't be inlined */ |
| 688 | bool keep_fastcall_stack: 1; |
| 689 | bool changes_pkt_data: 1; |
| 690 | bool might_sleep: 1; |
| 691 | |
| 692 | enum priv_stack_mode priv_stack_mode; |
| 693 | u8 arg_cnt; |
| 694 | struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; |
| 695 | }; |
| 696 | |
| 697 | struct bpf_verifier_env; |
| 698 | |
| 699 | struct backtrack_state { |
| 700 | struct bpf_verifier_env *env; |
| 701 | u32 frame; |
| 702 | u32 reg_masks[MAX_CALL_FRAMES]; |
| 703 | u64 stack_masks[MAX_CALL_FRAMES]; |
| 704 | }; |
| 705 | |
| 706 | struct bpf_id_pair { |
| 707 | u32 old; |
| 708 | u32 cur; |
| 709 | }; |
| 710 | |
| 711 | struct bpf_idmap { |
| 712 | u32 tmp_id_gen; |
| 713 | struct bpf_id_pair map[BPF_ID_MAP_SIZE]; |
| 714 | }; |
| 715 | |
| 716 | struct bpf_idset { |
| 717 | u32 count; |
| 718 | u32 ids[BPF_ID_MAP_SIZE]; |
| 719 | }; |
| 720 | |
| 721 | /* single container for all structs |
| 722 | * one verifier_env per bpf_check() call |
| 723 | */ |
| 724 | struct bpf_verifier_env { |
| 725 | u32 insn_idx; |
| 726 | u32 prev_insn_idx; |
| 727 | struct bpf_prog *prog; /* eBPF program being verified */ |
| 728 | const struct bpf_verifier_ops *ops; |
| 729 | struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */ |
| 730 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
| 731 | int stack_size; /* number of states to be processed */ |
| 732 | bool strict_alignment; /* perform strict pointer alignment checks */ |
| 733 | bool test_state_freq; /* test verifier with different pruning frequency */ |
| 734 | bool test_reg_invariants; /* fail verification on register invariants violations */ |
| 735 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
| 736 | /* Search pruning optimization, array of list_heads for |
| 737 | * lists of struct bpf_verifier_state_list. |
| 738 | */ |
| 739 | struct list_head *explored_states; |
| 740 | struct list_head free_list; /* list of struct bpf_verifier_state_list */ |
| 741 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
| 742 | struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ |
| 743 | u32 used_map_cnt; /* number of used maps */ |
| 744 | u32 used_btf_cnt; /* number of used BTF objects */ |
| 745 | u32 id_gen; /* used to generate unique reg IDs */ |
| 746 | u32 hidden_subprog_cnt; /* number of hidden subprogs */ |
| 747 | int exception_callback_subprog; |
| 748 | bool explore_alu_limits; |
| 749 | bool allow_ptr_leaks; |
| 750 | /* Allow access to uninitialized stack memory. Writes with fixed offset are |
| 751 | * always allowed, so this refers to reads (with fixed or variable offset), |
| 752 | * to writes with variable offset and to indirect (helper) accesses. |
| 753 | */ |
| 754 | bool allow_uninit_stack; |
| 755 | bool bpf_capable; |
| 756 | bool bypass_spec_v1; |
| 757 | bool bypass_spec_v4; |
| 758 | bool seen_direct_write; |
| 759 | bool seen_exception; |
| 760 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
| 761 | const struct bpf_line_info *prev_linfo; |
| 762 | struct bpf_verifier_log log; |
| 763 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */ |
| 764 | union { |
| 765 | struct bpf_idmap idmap_scratch; |
| 766 | struct bpf_idset idset_scratch; |
| 767 | }; |
| 768 | struct { |
| 769 | int *insn_state; |
| 770 | int *insn_stack; |
| 771 | /* vector of instruction indexes sorted in post-order */ |
| 772 | int *insn_postorder; |
| 773 | int cur_stack; |
| 774 | /* current position in the insn_postorder vector */ |
| 775 | int cur_postorder; |
| 776 | } cfg; |
| 777 | struct backtrack_state bt; |
| 778 | struct bpf_insn_hist_entry *insn_hist; |
| 779 | struct bpf_insn_hist_entry *cur_hist_ent; |
| 780 | u32 insn_hist_cap; |
| 781 | u32 pass_cnt; /* number of times do_check() was called */ |
| 782 | u32 subprog_cnt; |
| 783 | /* number of instructions analyzed by the verifier */ |
| 784 | u32 prev_insn_processed, insn_processed; |
| 785 | /* number of jmps, calls, exits analyzed so far */ |
| 786 | u32 prev_jmps_processed, jmps_processed; |
| 787 | /* total verification time */ |
| 788 | u64 verification_time; |
| 789 | /* maximum number of verifier states kept in 'branching' instructions */ |
| 790 | u32 max_states_per_insn; |
| 791 | /* total number of allocated verifier states */ |
| 792 | u32 total_states; |
| 793 | /* some states are freed during program analysis. |
| 794 | * this is peak number of states. this number dominates kernel |
| 795 | * memory consumption during verification |
| 796 | */ |
| 797 | u32 peak_states; |
| 798 | /* longest register parentage chain walked for liveness marking */ |
| 799 | u32 longest_mark_read_walk; |
| 800 | u32 free_list_size; |
| 801 | u32 explored_states_size; |
| 802 | bpfptr_t fd_array; |
| 803 | |
| 804 | /* bit mask to keep track of whether a register has been accessed |
| 805 | * since the last time the function state was printed |
| 806 | */ |
| 807 | u32 scratched_regs; |
| 808 | /* Same as scratched_regs but for stack slots */ |
| 809 | u64 scratched_stack_slots; |
| 810 | u64 prev_log_pos, prev_insn_print_pos; |
| 811 | /* buffer used to temporary hold constants as scalar registers */ |
| 812 | struct bpf_reg_state fake_reg[2]; |
| 813 | /* buffer used to generate temporary string representations, |
| 814 | * e.g., in reg_type_str() to generate reg_type string |
| 815 | */ |
| 816 | char tmp_str_buf[TMP_STR_BUF_LEN]; |
| 817 | struct bpf_insn insn_buf[INSN_BUF_SIZE]; |
| 818 | struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; |
| 819 | }; |
| 820 | |
| 821 | static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) |
| 822 | { |
| 823 | return &env->prog->aux->func_info_aux[subprog]; |
| 824 | } |
| 825 | |
| 826 | static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog) |
| 827 | { |
| 828 | return &env->subprog_info[subprog]; |
| 829 | } |
| 830 | |
| 831 | __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, |
| 832 | const char *fmt, va_list args); |
| 833 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, |
| 834 | const char *fmt, ...); |
| 835 | __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, |
| 836 | const char *fmt, ...); |
| 837 | int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level, |
| 838 | char __user *log_buf, u32 log_size); |
| 839 | void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos); |
| 840 | int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual); |
| 841 | |
| 842 | __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env, |
| 843 | u32 insn_off, |
| 844 | const char *prefix_fmt, ...); |
| 845 | |
| 846 | #define verifier_bug_if(cond, env, fmt, args...) \ |
| 847 | ({ \ |
| 848 | bool __cond = (cond); \ |
| 849 | if (unlikely(__cond)) { \ |
| 850 | BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \ |
| 851 | bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \ |
| 852 | } \ |
| 853 | (__cond); \ |
| 854 | }) |
| 855 | #define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args) |
| 856 | |
| 857 | static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) |
| 858 | { |
| 859 | struct bpf_verifier_state *cur = env->cur_state; |
| 860 | |
| 861 | return cur->frame[cur->curframe]; |
| 862 | } |
| 863 | |
| 864 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) |
| 865 | { |
| 866 | return cur_func(env)->regs; |
| 867 | } |
| 868 | |
| 869 | int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); |
| 870 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
| 871 | int insn_idx, int prev_insn_idx); |
| 872 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
| 873 | void |
| 874 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, |
| 875 | struct bpf_insn *insn); |
| 876 | void |
| 877 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); |
| 878 | |
| 879 | /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ |
| 880 | static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, |
| 881 | struct btf *btf, u32 btf_id) |
| 882 | { |
| 883 | if (tgt_prog) |
| 884 | return ((u64)tgt_prog->aux->id << 32) | btf_id; |
| 885 | else |
| 886 | return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; |
| 887 | } |
| 888 | |
| 889 | /* unpack the IDs from the key as constructed above */ |
| 890 | static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) |
| 891 | { |
| 892 | if (obj_id) |
| 893 | *obj_id = key >> 32; |
| 894 | if (btf_id) |
| 895 | *btf_id = key & 0x7FFFFFFF; |
| 896 | } |
| 897 | |
| 898 | int bpf_check_attach_target(struct bpf_verifier_log *log, |
| 899 | const struct bpf_prog *prog, |
| 900 | const struct bpf_prog *tgt_prog, |
| 901 | u32 btf_id, |
| 902 | struct bpf_attach_target_info *tgt_info); |
| 903 | void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); |
| 904 | |
| 905 | int mark_chain_precision(struct bpf_verifier_env *env, int regno); |
| 906 | |
| 907 | #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) |
| 908 | |
| 909 | /* extract base type from bpf_{arg, return, reg}_type. */ |
| 910 | static inline u32 base_type(u32 type) |
| 911 | { |
| 912 | return type & BPF_BASE_TYPE_MASK; |
| 913 | } |
| 914 | |
| 915 | /* extract flags from an extended type. See bpf_type_flag in bpf.h. */ |
| 916 | static inline u32 type_flag(u32 type) |
| 917 | { |
| 918 | return type & ~BPF_BASE_TYPE_MASK; |
| 919 | } |
| 920 | |
| 921 | /* only use after check_attach_btf_id() */ |
| 922 | static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) |
| 923 | { |
| 924 | return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ? |
| 925 | prog->aux->saved_dst_prog_type : prog->type; |
| 926 | } |
| 927 | |
| 928 | static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) |
| 929 | { |
| 930 | switch (resolve_prog_type(prog)) { |
| 931 | case BPF_PROG_TYPE_TRACING: |
| 932 | return prog->expected_attach_type != BPF_TRACE_ITER; |
| 933 | case BPF_PROG_TYPE_STRUCT_OPS: |
| 934 | return prog->aux->jits_use_priv_stack; |
| 935 | case BPF_PROG_TYPE_LSM: |
| 936 | return false; |
| 937 | default: |
| 938 | return true; |
| 939 | } |
| 940 | } |
| 941 | |
| 942 | #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF) |
| 943 | |
| 944 | static inline bool bpf_type_has_unsafe_modifiers(u32 type) |
| 945 | { |
| 946 | return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS; |
| 947 | } |
| 948 | |
| 949 | static inline bool type_is_ptr_alloc_obj(u32 type) |
| 950 | { |
| 951 | return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; |
| 952 | } |
| 953 | |
| 954 | static inline bool type_is_non_owning_ref(u32 type) |
| 955 | { |
| 956 | return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; |
| 957 | } |
| 958 | |
| 959 | static inline bool type_is_pkt_pointer(enum bpf_reg_type type) |
| 960 | { |
| 961 | type = base_type(type); |
| 962 | return type == PTR_TO_PACKET || |
| 963 | type == PTR_TO_PACKET_META; |
| 964 | } |
| 965 | |
| 966 | static inline bool type_is_sk_pointer(enum bpf_reg_type type) |
| 967 | { |
| 968 | return type == PTR_TO_SOCKET || |
| 969 | type == PTR_TO_SOCK_COMMON || |
| 970 | type == PTR_TO_TCP_SOCK || |
| 971 | type == PTR_TO_XDP_SOCK; |
| 972 | } |
| 973 | |
| 974 | static inline bool type_may_be_null(u32 type) |
| 975 | { |
| 976 | return type & PTR_MAYBE_NULL; |
| 977 | } |
| 978 | |
| 979 | static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) |
| 980 | { |
| 981 | env->scratched_regs |= 1U << regno; |
| 982 | } |
| 983 | |
| 984 | static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) |
| 985 | { |
| 986 | env->scratched_stack_slots |= 1ULL << spi; |
| 987 | } |
| 988 | |
| 989 | static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) |
| 990 | { |
| 991 | return (env->scratched_regs >> regno) & 1; |
| 992 | } |
| 993 | |
| 994 | static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) |
| 995 | { |
| 996 | return (env->scratched_stack_slots >> regno) & 1; |
| 997 | } |
| 998 | |
| 999 | static inline bool verifier_state_scratched(const struct bpf_verifier_env *env) |
| 1000 | { |
| 1001 | return env->scratched_regs || env->scratched_stack_slots; |
| 1002 | } |
| 1003 | |
| 1004 | static inline void mark_verifier_state_clean(struct bpf_verifier_env *env) |
| 1005 | { |
| 1006 | env->scratched_regs = 0U; |
| 1007 | env->scratched_stack_slots = 0ULL; |
| 1008 | } |
| 1009 | |
| 1010 | /* Used for printing the entire verifier state. */ |
| 1011 | static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env) |
| 1012 | { |
| 1013 | env->scratched_regs = ~0U; |
| 1014 | env->scratched_stack_slots = ~0ULL; |
| 1015 | } |
| 1016 | |
| 1017 | static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size) |
| 1018 | { |
| 1019 | #ifdef __BIG_ENDIAN |
| 1020 | off -= spill_size - fill_size; |
| 1021 | #endif |
| 1022 | |
| 1023 | return !(off % BPF_REG_SIZE); |
| 1024 | } |
| 1025 | |
| 1026 | const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type); |
| 1027 | const char *dynptr_type_str(enum bpf_dynptr_type type); |
| 1028 | const char *iter_type_str(const struct btf *btf, u32 btf_id); |
| 1029 | const char *iter_state_str(enum bpf_iter_state state); |
| 1030 | |
| 1031 | void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, |
| 1032 | u32 frameno, bool print_all); |
| 1033 | void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, |
| 1034 | u32 frameno); |
| 1035 | |
| 1036 | #endif /* _LINUX_BPF_VERIFIER_H */ |
| 1037 | |