| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
| 2 | /* |
| 3 | * Framework for buffer objects that can be shared across devices/subsystems. |
| 4 | * |
| 5 | * Copyright(C) 2015 Intel Ltd |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #ifndef _DMA_BUF_UAPI_H_ |
| 21 | #define _DMA_BUF_UAPI_H_ |
| 22 | |
| 23 | #include <linux/types.h> |
| 24 | |
| 25 | /** |
| 26 | * struct dma_buf_sync - Synchronize with CPU access. |
| 27 | * |
| 28 | * When a DMA buffer is accessed from the CPU via mmap, it is not always |
| 29 | * possible to guarantee coherency between the CPU-visible map and underlying |
| 30 | * memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket |
| 31 | * any CPU access to give the kernel the chance to shuffle memory around if |
| 32 | * needed. |
| 33 | * |
| 34 | * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC |
| 35 | * with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the |
| 36 | * access is complete, the client should call DMA_BUF_IOCTL_SYNC with |
| 37 | * DMA_BUF_SYNC_END and the same read/write flags. |
| 38 | * |
| 39 | * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache |
| 40 | * coherency. It does not prevent other processes or devices from |
| 41 | * accessing the memory at the same time. If synchronization with a GPU or |
| 42 | * other device driver is required, it is the client's responsibility to |
| 43 | * wait for buffer to be ready for reading or writing before calling this |
| 44 | * ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that |
| 45 | * follow-up work is not submitted to GPU or other device driver until |
| 46 | * after this ioctl has been called with DMA_BUF_SYNC_END? |
| 47 | * |
| 48 | * If the driver or API with which the client is interacting uses implicit |
| 49 | * synchronization, waiting for prior work to complete can be done via |
| 50 | * poll() on the DMA buffer file descriptor. If the driver or API requires |
| 51 | * explicit synchronization, the client may have to wait on a sync_file or |
| 52 | * other synchronization primitive outside the scope of the DMA buffer API. |
| 53 | */ |
| 54 | struct dma_buf_sync { |
| 55 | /** |
| 56 | * @flags: Set of access flags |
| 57 | * |
| 58 | * DMA_BUF_SYNC_START: |
| 59 | * Indicates the start of a map access session. |
| 60 | * |
| 61 | * DMA_BUF_SYNC_END: |
| 62 | * Indicates the end of a map access session. |
| 63 | * |
| 64 | * DMA_BUF_SYNC_READ: |
| 65 | * Indicates that the mapped DMA buffer will be read by the |
| 66 | * client via the CPU map. |
| 67 | * |
| 68 | * DMA_BUF_SYNC_WRITE: |
| 69 | * Indicates that the mapped DMA buffer will be written by the |
| 70 | * client via the CPU map. |
| 71 | * |
| 72 | * DMA_BUF_SYNC_RW: |
| 73 | * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. |
| 74 | */ |
| 75 | __u64 flags; |
| 76 | }; |
| 77 | |
| 78 | #define DMA_BUF_SYNC_READ (1 << 0) |
| 79 | #define DMA_BUF_SYNC_WRITE (2 << 0) |
| 80 | #define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) |
| 81 | #define DMA_BUF_SYNC_START (0 << 2) |
| 82 | #define DMA_BUF_SYNC_END (1 << 2) |
| 83 | #define DMA_BUF_SYNC_VALID_FLAGS_MASK \ |
| 84 | (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) |
| 85 | |
| 86 | #define DMA_BUF_NAME_LEN 32 |
| 87 | |
| 88 | /** |
| 89 | * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf |
| 90 | * |
| 91 | * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the |
| 92 | * current set of fences on a dma-buf file descriptor as a sync_file. CPU |
| 93 | * waits via poll() or other driver-specific mechanisms typically wait on |
| 94 | * whatever fences are on the dma-buf at the time the wait begins. This |
| 95 | * is similar except that it takes a snapshot of the current fences on the |
| 96 | * dma-buf for waiting later instead of waiting immediately. This is |
| 97 | * useful for modern graphics APIs such as Vulkan which assume an explicit |
| 98 | * synchronization model but still need to inter-operate with dma-buf. |
| 99 | * |
| 100 | * The intended usage pattern is the following: |
| 101 | * |
| 102 | * 1. Export a sync_file with flags corresponding to the expected GPU usage |
| 103 | * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE. |
| 104 | * |
| 105 | * 2. Submit rendering work which uses the dma-buf. The work should wait on |
| 106 | * the exported sync file before rendering and produce another sync_file |
| 107 | * when complete. |
| 108 | * |
| 109 | * 3. Import the rendering-complete sync_file into the dma-buf with flags |
| 110 | * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE. |
| 111 | * |
| 112 | * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl, |
| 113 | * the above is not a single atomic operation. If userspace wants to ensure |
| 114 | * ordering via these fences, it is the respnosibility of userspace to use |
| 115 | * locks or other mechanisms to ensure that no other context adds fences or |
| 116 | * submits work between steps 1 and 3 above. |
| 117 | */ |
| 118 | struct dma_buf_export_sync_file { |
| 119 | /** |
| 120 | * @flags: Read/write flags |
| 121 | * |
| 122 | * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. |
| 123 | * |
| 124 | * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, |
| 125 | * the returned sync file waits on any writers of the dma-buf to |
| 126 | * complete. Waiting on the returned sync file is equivalent to |
| 127 | * poll() with POLLIN. |
| 128 | * |
| 129 | * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on |
| 130 | * any users of the dma-buf (read or write) to complete. Waiting |
| 131 | * on the returned sync file is equivalent to poll() with POLLOUT. |
| 132 | * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this |
| 133 | * is equivalent to just DMA_BUF_SYNC_WRITE. |
| 134 | */ |
| 135 | __u32 flags; |
| 136 | /** @fd: Returned sync file descriptor */ |
| 137 | __s32 fd; |
| 138 | }; |
| 139 | |
| 140 | /** |
| 141 | * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf |
| 142 | * |
| 143 | * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a |
| 144 | * sync_file into a dma-buf for the purposes of implicit synchronization |
| 145 | * with other dma-buf consumers. This allows clients using explicitly |
| 146 | * synchronized APIs such as Vulkan to inter-op with dma-buf consumers |
| 147 | * which expect implicit synchronization such as OpenGL or most media |
| 148 | * drivers/video. |
| 149 | */ |
| 150 | struct dma_buf_import_sync_file { |
| 151 | /** |
| 152 | * @flags: Read/write flags |
| 153 | * |
| 154 | * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. |
| 155 | * |
| 156 | * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, |
| 157 | * this inserts the sync_file as a read-only fence. Any subsequent |
| 158 | * implicitly synchronized writes to this dma-buf will wait on this |
| 159 | * fence but reads will not. |
| 160 | * |
| 161 | * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a |
| 162 | * write fence. All subsequent implicitly synchronized access to |
| 163 | * this dma-buf will wait on this fence. |
| 164 | */ |
| 165 | __u32 flags; |
| 166 | /** @fd: Sync file descriptor */ |
| 167 | __s32 fd; |
| 168 | }; |
| 169 | |
| 170 | #define DMA_BUF_BASE 'b' |
| 171 | #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) |
| 172 | |
| 173 | /* 32/64bitness of this uapi was botched in android, there's no difference |
| 174 | * between them in actual uapi, they're just different numbers. |
| 175 | */ |
| 176 | #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) |
| 177 | #define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) |
| 178 | #define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) |
| 179 | #define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) |
| 180 | #define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) |
| 181 | |
| 182 | #endif |
| 183 | |