| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Block data types and constants. Directly include this file only to |
| 4 | * break include dependency loop. |
| 5 | */ |
| 6 | #ifndef __LINUX_BLK_TYPES_H |
| 7 | #define __LINUX_BLK_TYPES_H |
| 8 | |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/bvec.h> |
| 11 | #include <linux/device.h> |
| 12 | #include <linux/ktime.h> |
| 13 | #include <linux/rw_hint.h> |
| 14 | |
| 15 | struct bio_set; |
| 16 | struct bio; |
| 17 | struct bio_integrity_payload; |
| 18 | struct page; |
| 19 | struct io_context; |
| 20 | struct cgroup_subsys_state; |
| 21 | typedef void (bio_end_io_t) (struct bio *); |
| 22 | struct bio_crypt_ctx; |
| 23 | |
| 24 | /* |
| 25 | * The basic unit of block I/O is a sector. It is used in a number of contexts |
| 26 | * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 |
| 27 | * bytes. Variables of type sector_t represent an offset or size that is a |
| 28 | * multiple of 512 bytes. Hence these two constants. |
| 29 | */ |
| 30 | #ifndef SECTOR_SHIFT |
| 31 | #define SECTOR_SHIFT 9 |
| 32 | #endif |
| 33 | #ifndef SECTOR_SIZE |
| 34 | #define SECTOR_SIZE (1 << SECTOR_SHIFT) |
| 35 | #endif |
| 36 | |
| 37 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) |
| 38 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) |
| 39 | #define SECTOR_MASK (PAGE_SECTORS - 1) |
| 40 | |
| 41 | struct block_device { |
| 42 | sector_t bd_start_sect; |
| 43 | sector_t bd_nr_sectors; |
| 44 | struct gendisk * bd_disk; |
| 45 | struct request_queue * bd_queue; |
| 46 | struct disk_stats __percpu *bd_stats; |
| 47 | unsigned long bd_stamp; |
| 48 | atomic_t __bd_flags; // partition number + flags |
| 49 | #define BD_PARTNO 255 // lower 8 bits; assign-once |
| 50 | #define BD_READ_ONLY (1u<<8) // read-only policy |
| 51 | #define BD_WRITE_HOLDER (1u<<9) |
| 52 | #define BD_HAS_SUBMIT_BIO (1u<<10) |
| 53 | #define BD_RO_WARNED (1u<<11) |
| 54 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 55 | #define BD_MAKE_IT_FAIL (1u<<12) |
| 56 | #endif |
| 57 | dev_t bd_dev; |
| 58 | struct address_space *bd_mapping; /* page cache */ |
| 59 | |
| 60 | atomic_t bd_openers; |
| 61 | spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ |
| 62 | void * bd_claiming; |
| 63 | void * bd_holder; |
| 64 | const struct blk_holder_ops *bd_holder_ops; |
| 65 | struct mutex bd_holder_lock; |
| 66 | int bd_holders; |
| 67 | struct kobject *bd_holder_dir; |
| 68 | |
| 69 | atomic_t bd_fsfreeze_count; /* number of freeze requests */ |
| 70 | struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ |
| 71 | |
| 72 | struct partition_meta_info *bd_meta_info; |
| 73 | int bd_writers; |
| 74 | #ifdef CONFIG_SECURITY |
| 75 | void *bd_security; |
| 76 | #endif |
| 77 | /* |
| 78 | * keep this out-of-line as it's both big and not needed in the fast |
| 79 | * path |
| 80 | */ |
| 81 | struct device bd_device; |
| 82 | } __randomize_layout; |
| 83 | |
| 84 | #define bdev_whole(_bdev) \ |
| 85 | ((_bdev)->bd_disk->part0) |
| 86 | |
| 87 | #define dev_to_bdev(device) \ |
| 88 | container_of((device), struct block_device, bd_device) |
| 89 | |
| 90 | #define bdev_kobj(_bdev) \ |
| 91 | (&((_bdev)->bd_device.kobj)) |
| 92 | |
| 93 | /* |
| 94 | * Block error status values. See block/blk-core:blk_errors for the details. |
| 95 | */ |
| 96 | typedef u8 __bitwise blk_status_t; |
| 97 | typedef u16 blk_short_t; |
| 98 | #define BLK_STS_OK 0 |
| 99 | #define BLK_STS_NOTSUPP ((__force blk_status_t)1) |
| 100 | #define BLK_STS_TIMEOUT ((__force blk_status_t)2) |
| 101 | #define BLK_STS_NOSPC ((__force blk_status_t)3) |
| 102 | #define BLK_STS_TRANSPORT ((__force blk_status_t)4) |
| 103 | #define BLK_STS_TARGET ((__force blk_status_t)5) |
| 104 | #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) |
| 105 | #define BLK_STS_MEDIUM ((__force blk_status_t)7) |
| 106 | #define BLK_STS_PROTECTION ((__force blk_status_t)8) |
| 107 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) |
| 108 | #define BLK_STS_IOERR ((__force blk_status_t)10) |
| 109 | |
| 110 | /* hack for device mapper, don't use elsewhere: */ |
| 111 | #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) |
| 112 | |
| 113 | /* |
| 114 | * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set |
| 115 | * and the bio would block (cf bio_wouldblock_error()) |
| 116 | */ |
| 117 | #define BLK_STS_AGAIN ((__force blk_status_t)12) |
| 118 | |
| 119 | /* |
| 120 | * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if |
| 121 | * device related resources are unavailable, but the driver can guarantee |
| 122 | * that the queue will be rerun in the future once resources become |
| 123 | * available again. This is typically the case for device specific |
| 124 | * resources that are consumed for IO. If the driver fails allocating these |
| 125 | * resources, we know that inflight (or pending) IO will free these |
| 126 | * resource upon completion. |
| 127 | * |
| 128 | * This is different from BLK_STS_RESOURCE in that it explicitly references |
| 129 | * a device specific resource. For resources of wider scope, allocation |
| 130 | * failure can happen without having pending IO. This means that we can't |
| 131 | * rely on request completions freeing these resources, as IO may not be in |
| 132 | * flight. Examples of that are kernel memory allocations, DMA mappings, or |
| 133 | * any other system wide resources. |
| 134 | */ |
| 135 | #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) |
| 136 | |
| 137 | /* |
| 138 | * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion |
| 139 | * path if the device returns a status indicating that too many zone resources |
| 140 | * are currently open. The same command should be successful if resubmitted |
| 141 | * after the number of open zones decreases below the device's limits, which is |
| 142 | * reported in the request_queue's max_open_zones. |
| 143 | */ |
| 144 | #define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14) |
| 145 | |
| 146 | /* |
| 147 | * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion |
| 148 | * path if the device returns a status indicating that too many zone resources |
| 149 | * are currently active. The same command should be successful if resubmitted |
| 150 | * after the number of active zones decreases below the device's limits, which |
| 151 | * is reported in the request_queue's max_active_zones. |
| 152 | */ |
| 153 | #define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15) |
| 154 | |
| 155 | /* |
| 156 | * BLK_STS_OFFLINE is returned from the driver when the target device is offline |
| 157 | * or is being taken offline. This could help differentiate the case where a |
| 158 | * device is intentionally being shut down from a real I/O error. |
| 159 | */ |
| 160 | #define BLK_STS_OFFLINE ((__force blk_status_t)16) |
| 161 | |
| 162 | /* |
| 163 | * BLK_STS_DURATION_LIMIT is returned from the driver when the target device |
| 164 | * aborted the command because it exceeded one of its Command Duration Limits. |
| 165 | */ |
| 166 | #define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) |
| 167 | |
| 168 | /* |
| 169 | * Invalid size or alignment. |
| 170 | */ |
| 171 | #define BLK_STS_INVAL ((__force blk_status_t)19) |
| 172 | |
| 173 | /** |
| 174 | * blk_path_error - returns true if error may be path related |
| 175 | * @error: status the request was completed with |
| 176 | * |
| 177 | * Description: |
| 178 | * This classifies block error status into non-retryable errors and ones |
| 179 | * that may be successful if retried on a failover path. |
| 180 | * |
| 181 | * Return: |
| 182 | * %false - retrying failover path will not help |
| 183 | * %true - may succeed if retried |
| 184 | */ |
| 185 | static inline bool blk_path_error(blk_status_t error) |
| 186 | { |
| 187 | switch (error) { |
| 188 | case BLK_STS_NOTSUPP: |
| 189 | case BLK_STS_NOSPC: |
| 190 | case BLK_STS_TARGET: |
| 191 | case BLK_STS_RESV_CONFLICT: |
| 192 | case BLK_STS_MEDIUM: |
| 193 | case BLK_STS_PROTECTION: |
| 194 | return false; |
| 195 | } |
| 196 | |
| 197 | /* Anything else could be a path failure, so should be retried */ |
| 198 | return true; |
| 199 | } |
| 200 | |
| 201 | struct bio_issue { |
| 202 | u64 value; |
| 203 | }; |
| 204 | |
| 205 | typedef __u32 __bitwise blk_opf_t; |
| 206 | |
| 207 | typedef unsigned int blk_qc_t; |
| 208 | #define BLK_QC_T_NONE -1U |
| 209 | |
| 210 | /* |
| 211 | * main unit of I/O for the block layer and lower layers (ie drivers and |
| 212 | * stacking drivers) |
| 213 | */ |
| 214 | struct bio { |
| 215 | struct bio *bi_next; /* request queue link */ |
| 216 | struct block_device *bi_bdev; |
| 217 | blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits |
| 218 | * req_flags. |
| 219 | */ |
| 220 | unsigned short bi_flags; /* BIO_* below */ |
| 221 | unsigned short bi_ioprio; |
| 222 | enum rw_hint bi_write_hint; |
| 223 | u8 bi_write_stream; |
| 224 | blk_status_t bi_status; |
| 225 | atomic_t __bi_remaining; |
| 226 | |
| 227 | struct bvec_iter bi_iter; |
| 228 | |
| 229 | union { |
| 230 | /* for polled bios: */ |
| 231 | blk_qc_t bi_cookie; |
| 232 | /* for plugged zoned writes only: */ |
| 233 | unsigned int __bi_nr_segments; |
| 234 | }; |
| 235 | bio_end_io_t *bi_end_io; |
| 236 | void *bi_private; |
| 237 | #ifdef CONFIG_BLK_CGROUP |
| 238 | /* |
| 239 | * Represents the association of the css and request_queue for the bio. |
| 240 | * If a bio goes direct to device, it will not have a blkg as it will |
| 241 | * not have a request_queue associated with it. The reference is put |
| 242 | * on release of the bio. |
| 243 | */ |
| 244 | struct blkcg_gq *bi_blkg; |
| 245 | struct bio_issue bi_issue; |
| 246 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
| 247 | u64 bi_iocost_cost; |
| 248 | #endif |
| 249 | #endif |
| 250 | |
| 251 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
| 252 | struct bio_crypt_ctx *bi_crypt_context; |
| 253 | #endif |
| 254 | |
| 255 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 256 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
| 257 | #endif |
| 258 | |
| 259 | unsigned short bi_vcnt; /* how many bio_vec's */ |
| 260 | |
| 261 | /* |
| 262 | * Everything starting with bi_max_vecs will be preserved by bio_reset() |
| 263 | */ |
| 264 | |
| 265 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
| 266 | |
| 267 | atomic_t __bi_cnt; /* pin count */ |
| 268 | |
| 269 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
| 270 | |
| 271 | struct bio_set *bi_pool; |
| 272 | |
| 273 | /* |
| 274 | * We can inline a number of vecs at the end of the bio, to avoid |
| 275 | * double allocations for a small number of bio_vecs. This member |
| 276 | * MUST obviously be kept at the very end of the bio. |
| 277 | */ |
| 278 | struct bio_vec bi_inline_vecs[]; |
| 279 | }; |
| 280 | |
| 281 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
| 282 | #define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) |
| 283 | |
| 284 | /* |
| 285 | * bio flags |
| 286 | */ |
| 287 | enum { |
| 288 | BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ |
| 289 | BIO_CLONED, /* doesn't own data */ |
| 290 | BIO_QUIET, /* Make BIO Quiet */ |
| 291 | BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ |
| 292 | BIO_REFFED, /* bio has elevated ->bi_cnt */ |
| 293 | BIO_BPS_THROTTLED, /* This bio has already been subjected to |
| 294 | * throttling rules. Don't do it again. */ |
| 295 | BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion |
| 296 | * of this bio. */ |
| 297 | BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ |
| 298 | BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ |
| 299 | /* |
| 300 | * This bio has completed bps throttling at the single tg granularity, |
| 301 | * which is different from BIO_BPS_THROTTLED. When the bio is enqueued |
| 302 | * into the sq->queued of the upper tg, or is about to be dispatched, |
| 303 | * this flag needs to be cleared. Since blk-throttle and rq_qos are not |
| 304 | * on the same hierarchical level, reuse the value. |
| 305 | */ |
| 306 | BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED, |
| 307 | BIO_QOS_MERGED, /* but went through rq_qos merge path */ |
| 308 | BIO_REMAPPED, |
| 309 | BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ |
| 310 | BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ |
| 311 | BIO_FLAG_LAST |
| 312 | }; |
| 313 | |
| 314 | typedef __u32 __bitwise blk_mq_req_flags_t; |
| 315 | |
| 316 | #define REQ_OP_BITS 8 |
| 317 | #define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) |
| 318 | #define REQ_FLAG_BITS 24 |
| 319 | |
| 320 | /** |
| 321 | * enum req_op - Operations common to the bio and request structures. |
| 322 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. |
| 323 | * |
| 324 | * The least significant bit of the operation number indicates the data |
| 325 | * transfer direction: |
| 326 | * |
| 327 | * - if the least significant bit is set transfers are TO the device |
| 328 | * - if the least significant bit is not set transfers are FROM the device |
| 329 | * |
| 330 | * If a operation does not transfer data the least significant bit has no |
| 331 | * meaning. |
| 332 | */ |
| 333 | enum req_op { |
| 334 | /* read sectors from the device */ |
| 335 | REQ_OP_READ = (__force blk_opf_t)0, |
| 336 | /* write sectors to the device */ |
| 337 | REQ_OP_WRITE = (__force blk_opf_t)1, |
| 338 | /* flush the volatile write cache */ |
| 339 | REQ_OP_FLUSH = (__force blk_opf_t)2, |
| 340 | /* discard sectors */ |
| 341 | REQ_OP_DISCARD = (__force blk_opf_t)3, |
| 342 | /* securely erase sectors */ |
| 343 | REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, |
| 344 | /* write data at the current zone write pointer */ |
| 345 | REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, |
| 346 | /* write the zero filled sector many times */ |
| 347 | REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, |
| 348 | /* Open a zone */ |
| 349 | REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, |
| 350 | /* Close a zone */ |
| 351 | REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, |
| 352 | /* Transition a zone to full */ |
| 353 | REQ_OP_ZONE_FINISH = (__force blk_opf_t)12, |
| 354 | /* reset a zone write pointer */ |
| 355 | REQ_OP_ZONE_RESET = (__force blk_opf_t)13, |
| 356 | /* reset all the zone present on the device */ |
| 357 | REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15, |
| 358 | |
| 359 | /* Driver private requests */ |
| 360 | REQ_OP_DRV_IN = (__force blk_opf_t)34, |
| 361 | REQ_OP_DRV_OUT = (__force blk_opf_t)35, |
| 362 | |
| 363 | REQ_OP_LAST = (__force blk_opf_t)36, |
| 364 | }; |
| 365 | |
| 366 | /* Keep cmd_flag_name[] in sync with the definitions below */ |
| 367 | enum req_flag_bits { |
| 368 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ |
| 369 | REQ_OP_BITS, |
| 370 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
| 371 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
| 372 | __REQ_SYNC, /* request is sync (sync write or read) */ |
| 373 | __REQ_META, /* metadata io request */ |
| 374 | __REQ_PRIO, /* boost priority in cfq */ |
| 375 | __REQ_NOMERGE, /* don't touch this for merging */ |
| 376 | __REQ_IDLE, /* anticipate more IO after this one */ |
| 377 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
| 378 | __REQ_FUA, /* forced unit access */ |
| 379 | __REQ_PREFLUSH, /* request for cache flush */ |
| 380 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
| 381 | __REQ_BACKGROUND, /* background IO */ |
| 382 | __REQ_NOWAIT, /* Don't wait if request will block */ |
| 383 | __REQ_POLLED, /* caller polls for completion using bio_poll */ |
| 384 | __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ |
| 385 | __REQ_SWAP, /* swap I/O */ |
| 386 | __REQ_DRV, /* for driver use */ |
| 387 | __REQ_FS_PRIVATE, /* for file system (submitter) use */ |
| 388 | __REQ_ATOMIC, /* for atomic write operations */ |
| 389 | /* |
| 390 | * Command specific flags, keep last: |
| 391 | */ |
| 392 | /* for REQ_OP_WRITE_ZEROES: */ |
| 393 | __REQ_NOUNMAP, /* do not free blocks when zeroing */ |
| 394 | |
| 395 | __REQ_NR_BITS, /* stops here */ |
| 396 | }; |
| 397 | |
| 398 | #define REQ_FAILFAST_DEV \ |
| 399 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) |
| 400 | #define REQ_FAILFAST_TRANSPORT \ |
| 401 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) |
| 402 | #define REQ_FAILFAST_DRIVER \ |
| 403 | (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) |
| 404 | #define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) |
| 405 | #define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) |
| 406 | #define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) |
| 407 | #define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) |
| 408 | #define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) |
| 409 | #define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) |
| 410 | #define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) |
| 411 | #define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) |
| 412 | #define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) |
| 413 | #define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) |
| 414 | #define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) |
| 415 | #define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) |
| 416 | #define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) |
| 417 | #define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) |
| 418 | #define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) |
| 419 | #define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) |
| 420 | #define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) |
| 421 | |
| 422 | #define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) |
| 423 | |
| 424 | #define REQ_FAILFAST_MASK \ |
| 425 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 426 | |
| 427 | #define REQ_NOMERGE_FLAGS \ |
| 428 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
| 429 | |
| 430 | enum stat_group { |
| 431 | STAT_READ, |
| 432 | STAT_WRITE, |
| 433 | STAT_DISCARD, |
| 434 | STAT_FLUSH, |
| 435 | |
| 436 | NR_STAT_GROUPS |
| 437 | }; |
| 438 | |
| 439 | static inline enum req_op bio_op(const struct bio *bio) |
| 440 | { |
| 441 | return bio->bi_opf & REQ_OP_MASK; |
| 442 | } |
| 443 | |
| 444 | static inline bool op_is_write(blk_opf_t op) |
| 445 | { |
| 446 | return !!(op & (__force blk_opf_t)1); |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Check if the bio or request is one that needs special treatment in the |
| 451 | * flush state machine. |
| 452 | */ |
| 453 | static inline bool op_is_flush(blk_opf_t op) |
| 454 | { |
| 455 | return op & (REQ_FUA | REQ_PREFLUSH); |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * Reads are always treated as synchronous, as are requests with the FUA or |
| 460 | * PREFLUSH flag. Other operations may be marked as synchronous using the |
| 461 | * REQ_SYNC flag. |
| 462 | */ |
| 463 | static inline bool op_is_sync(blk_opf_t op) |
| 464 | { |
| 465 | return (op & REQ_OP_MASK) == REQ_OP_READ || |
| 466 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); |
| 467 | } |
| 468 | |
| 469 | static inline bool op_is_discard(blk_opf_t op) |
| 470 | { |
| 471 | return (op & REQ_OP_MASK) == REQ_OP_DISCARD; |
| 472 | } |
| 473 | |
| 474 | /* |
| 475 | * Check if a bio or request operation is a zone management operation, with |
| 476 | * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case |
| 477 | * due to its different handling in the block layer and device response in |
| 478 | * case of command failure. |
| 479 | */ |
| 480 | static inline bool op_is_zone_mgmt(enum req_op op) |
| 481 | { |
| 482 | switch (op & REQ_OP_MASK) { |
| 483 | case REQ_OP_ZONE_RESET: |
| 484 | case REQ_OP_ZONE_OPEN: |
| 485 | case REQ_OP_ZONE_CLOSE: |
| 486 | case REQ_OP_ZONE_FINISH: |
| 487 | return true; |
| 488 | default: |
| 489 | return false; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | static inline int op_stat_group(enum req_op op) |
| 494 | { |
| 495 | if (op_is_discard(op)) |
| 496 | return STAT_DISCARD; |
| 497 | return op_is_write(op); |
| 498 | } |
| 499 | |
| 500 | struct blk_rq_stat { |
| 501 | u64 mean; |
| 502 | u64 min; |
| 503 | u64 max; |
| 504 | u32 nr_samples; |
| 505 | u64 batch; |
| 506 | }; |
| 507 | |
| 508 | #endif /* __LINUX_BLK_TYPES_H */ |
| 509 | |