| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | |
| 3 | #ifndef __KVM_TYPES_H__ |
| 4 | #define __KVM_TYPES_H__ |
| 5 | |
| 6 | struct kvm; |
| 7 | struct kvm_async_pf; |
| 8 | struct kvm_device_ops; |
| 9 | struct kvm_gfn_range; |
| 10 | struct kvm_interrupt; |
| 11 | struct kvm_irq_routing_table; |
| 12 | struct kvm_memory_slot; |
| 13 | struct kvm_one_reg; |
| 14 | struct kvm_run; |
| 15 | struct kvm_userspace_memory_region; |
| 16 | struct kvm_vcpu; |
| 17 | struct kvm_vcpu_init; |
| 18 | struct kvm_memslots; |
| 19 | |
| 20 | enum kvm_mr_change; |
| 21 | |
| 22 | #include <linux/bits.h> |
| 23 | #include <linux/mutex.h> |
| 24 | #include <linux/types.h> |
| 25 | #include <linux/spinlock_types.h> |
| 26 | |
| 27 | #include <asm/kvm_types.h> |
| 28 | |
| 29 | /* |
| 30 | * Address types: |
| 31 | * |
| 32 | * gva - guest virtual address |
| 33 | * gpa - guest physical address |
| 34 | * gfn - guest frame number |
| 35 | * hva - host virtual address |
| 36 | * hpa - host physical address |
| 37 | * hfn - host frame number |
| 38 | */ |
| 39 | |
| 40 | typedef unsigned long gva_t; |
| 41 | typedef u64 gpa_t; |
| 42 | typedef u64 gfn_t; |
| 43 | |
| 44 | #define INVALID_GPA (~(gpa_t)0) |
| 45 | |
| 46 | typedef unsigned long hva_t; |
| 47 | typedef u64 hpa_t; |
| 48 | typedef u64 hfn_t; |
| 49 | |
| 50 | typedef hfn_t kvm_pfn_t; |
| 51 | |
| 52 | struct gfn_to_hva_cache { |
| 53 | u64 generation; |
| 54 | gpa_t gpa; |
| 55 | unsigned long hva; |
| 56 | unsigned long len; |
| 57 | struct kvm_memory_slot *memslot; |
| 58 | }; |
| 59 | |
| 60 | struct gfn_to_pfn_cache { |
| 61 | u64 generation; |
| 62 | gpa_t gpa; |
| 63 | unsigned long uhva; |
| 64 | struct kvm_memory_slot *memslot; |
| 65 | struct kvm *kvm; |
| 66 | struct list_head list; |
| 67 | rwlock_t lock; |
| 68 | struct mutex refresh_lock; |
| 69 | void *khva; |
| 70 | kvm_pfn_t pfn; |
| 71 | bool active; |
| 72 | bool valid; |
| 73 | }; |
| 74 | |
| 75 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
| 76 | /* |
| 77 | * Memory caches are used to preallocate memory ahead of various MMU flows, |
| 78 | * e.g. page fault handlers. Gracefully handling allocation failures deep in |
| 79 | * MMU flows is problematic, as is triggering reclaim, I/O, etc... while |
| 80 | * holding MMU locks. Note, these caches act more like prefetch buffers than |
| 81 | * classical caches, i.e. objects are not returned to the cache on being freed. |
| 82 | * |
| 83 | * The @capacity field and @objects array are lazily initialized when the cache |
| 84 | * is topped up (__kvm_mmu_topup_memory_cache()). |
| 85 | */ |
| 86 | struct kvm_mmu_memory_cache { |
| 87 | gfp_t gfp_zero; |
| 88 | gfp_t gfp_custom; |
| 89 | u64 init_value; |
| 90 | struct kmem_cache *kmem_cache; |
| 91 | int capacity; |
| 92 | int nobjs; |
| 93 | void **objects; |
| 94 | }; |
| 95 | #endif |
| 96 | |
| 97 | #define HALT_POLL_HIST_COUNT 32 |
| 98 | |
| 99 | struct kvm_vm_stat_generic { |
| 100 | u64 remote_tlb_flush; |
| 101 | u64 remote_tlb_flush_requests; |
| 102 | }; |
| 103 | |
| 104 | struct kvm_vcpu_stat_generic { |
| 105 | u64 halt_successful_poll; |
| 106 | u64 halt_attempted_poll; |
| 107 | u64 halt_poll_invalid; |
| 108 | u64 halt_wakeup; |
| 109 | u64 halt_poll_success_ns; |
| 110 | u64 halt_poll_fail_ns; |
| 111 | u64 halt_wait_ns; |
| 112 | u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; |
| 113 | u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; |
| 114 | u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; |
| 115 | u64 blocking; |
| 116 | }; |
| 117 | |
| 118 | #define KVM_STATS_NAME_SIZE 48 |
| 119 | |
| 120 | #endif /* __KVM_TYPES_H__ */ |
| 121 | |