rev |
line source |
nuclear@3
|
1 #include <stdio.h>
|
nuclear@3
|
2 #include <stdlib.h>
|
nuclear@3
|
3 #include <string.h>
|
nuclear@4
|
4 #include <stdint.h>
|
nuclear@3
|
5 #include "vku.h"
|
nuclear@3
|
6
|
nuclear@3
|
7 static const char *get_device_name(VkPhysicalDeviceType type);
|
nuclear@3
|
8 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags);
|
nuclear@3
|
9 static const char *get_queue_flag_string(VkQueueFlagBits flags);
|
nuclear@3
|
10 static int ver_major(uint32_t ver);
|
nuclear@3
|
11 static int ver_minor(uint32_t ver);
|
nuclear@3
|
12 static int ver_patch(uint32_t ver);
|
nuclear@3
|
13 static const char *mem_size_str(long sz);
|
nuclear@3
|
14
|
nuclear@15
|
15 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
|
nuclear@15
|
16 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
|
nuclear@15
|
17 const char *layer_prefix, const char *msg, void *udata);
|
nuclear@15
|
18
|
nuclear@3
|
19 VkInstance vk;
|
nuclear@3
|
20 VkDevice vkdev;
|
nuclear@3
|
21 VkQueue vkq;
|
nuclear@3
|
22
|
nuclear@4
|
23 static VkPhysicalDevice *phys_devices;
|
nuclear@4
|
24 static int sel_dev, sel_qfamily;
|
nuclear@4
|
25
|
nuclear@4
|
26 static VkExtensionProperties *vkext, *vkdevext;
|
nuclear@4
|
27 static uint32_t vkext_count, vkdevext_count;
|
nuclear@4
|
28
|
nuclear@15
|
29 static VkDebugReportCallbackEXT debug_callback_obj;
|
nuclear@17
|
30 static PFN_vkCreateDebugReportCallbackEXT vk_create_debug_report_callback;
|
nuclear@17
|
31 static PFN_vkDestroyDebugReportCallbackEXT vk_destroy_debug_report_callback;
|
nuclear@15
|
32 static void (*user_dbg_callback)(const char*, void*);
|
nuclear@15
|
33 static void *user_dbg_callback_data;
|
nuclear@15
|
34
|
nuclear@4
|
35
|
nuclear@4
|
36 int vku_have_extension(const char *name)
|
nuclear@4
|
37 {
|
nuclear@4
|
38 int i;
|
nuclear@4
|
39
|
nuclear@4
|
40 if(!vkext) {
|
nuclear@4
|
41 vkext_count = 0;
|
nuclear@4
|
42 vkEnumerateInstanceExtensionProperties(0, &vkext_count, 0);
|
nuclear@4
|
43 if(vkext_count) {
|
nuclear@4
|
44 if(!(vkext = malloc(vkext_count * sizeof *vkext))) {
|
nuclear@4
|
45 perror("failed to allocate instance extension list");
|
nuclear@4
|
46 return 0;
|
nuclear@4
|
47 }
|
nuclear@4
|
48 vkEnumerateInstanceExtensionProperties(0, &vkext_count, vkext);
|
nuclear@4
|
49
|
nuclear@4
|
50 printf("instance extensions:\n");
|
nuclear@4
|
51 for(i=0; i<(int)vkext_count; i++) {
|
nuclear@4
|
52 printf(" %s (ver: %u)\n", vkext[i].extensionName, (unsigned int)vkext[i].specVersion);
|
nuclear@4
|
53 }
|
nuclear@4
|
54 }
|
nuclear@4
|
55 }
|
nuclear@4
|
56
|
nuclear@4
|
57 for(i=0; i<(int)vkext_count; i++) {
|
nuclear@4
|
58 if(strcmp(vkext[i].extensionName, name) == 0) {
|
nuclear@4
|
59 return 1;
|
nuclear@4
|
60 }
|
nuclear@4
|
61 }
|
nuclear@4
|
62 return 0;
|
nuclear@4
|
63 }
|
nuclear@4
|
64
|
nuclear@4
|
65 int vku_have_device_extension(const char *name)
|
nuclear@4
|
66 {
|
nuclear@4
|
67 int i;
|
nuclear@4
|
68
|
nuclear@4
|
69 if(sel_dev < 0) return 0;
|
nuclear@4
|
70
|
nuclear@4
|
71 if(!vkdevext) {
|
nuclear@4
|
72 vkdevext_count = 0;
|
nuclear@4
|
73 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, 0);
|
nuclear@4
|
74 if(vkdevext_count) {
|
nuclear@4
|
75 if(!(vkdevext = malloc(vkdevext_count * sizeof *vkdevext))) {
|
nuclear@4
|
76 perror("failed to allocate device extension list");
|
nuclear@4
|
77 return 0;
|
nuclear@4
|
78 }
|
nuclear@4
|
79 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, vkdevext);
|
nuclear@4
|
80
|
nuclear@4
|
81 printf("selected device extensions:\n");
|
nuclear@4
|
82 for(i=0; i<(int)vkdevext_count; i++) {
|
nuclear@4
|
83 printf(" %s (ver: %u)\n", vkdevext[i].extensionName, (unsigned int)vkdevext[i].specVersion);
|
nuclear@4
|
84 }
|
nuclear@4
|
85 }
|
nuclear@4
|
86 }
|
nuclear@4
|
87
|
nuclear@4
|
88 for(i=0; i<(int)vkdevext_count; i++) {
|
nuclear@4
|
89 if(strcmp(vkdevext[i].extensionName, name) == 0) {
|
nuclear@4
|
90 return 1;
|
nuclear@4
|
91 }
|
nuclear@4
|
92 }
|
nuclear@4
|
93 return 0;
|
nuclear@4
|
94 }
|
nuclear@4
|
95
|
nuclear@15
|
96 void vku_set_debug_callback(void (*func)(const char*, void*), void *cls)
|
nuclear@15
|
97 {
|
nuclear@15
|
98 if(!debug_callback_obj && vk_create_debug_report_callback) {
|
nuclear@15
|
99 VkDebugReportCallbackCreateInfoEXT foo;
|
nuclear@15
|
100
|
nuclear@15
|
101 memset(&foo, 0, sizeof foo);
|
nuclear@15
|
102 foo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
|
nuclear@15
|
103 foo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
|
nuclear@15
|
104 foo.pfnCallback = debug_callback_thunk;
|
nuclear@15
|
105
|
nuclear@15
|
106 vk_create_debug_report_callback(vk, &foo, 0, &debug_callback_obj);
|
nuclear@15
|
107 }
|
nuclear@15
|
108
|
nuclear@15
|
109 user_dbg_callback = func;
|
nuclear@15
|
110 user_dbg_callback_data = cls;
|
nuclear@15
|
111 }
|
nuclear@15
|
112
|
nuclear@15
|
113
|
nuclear@3
|
114 int vku_create_dev(void)
|
nuclear@3
|
115 {
|
nuclear@3
|
116 int i, j;
|
nuclear@15
|
117 uint32_t nlayers;
|
nuclear@3
|
118 VkInstanceCreateInfo inst_info;
|
nuclear@15
|
119 VkLayerProperties *layers;
|
nuclear@3
|
120 VkDeviceCreateInfo dev_info;
|
nuclear@3
|
121 VkDeviceQueueCreateInfo queue_info;
|
nuclear@3
|
122 VkCommandPoolCreateInfo cmdpool_info;
|
nuclear@3
|
123 uint32_t num_devices;
|
nuclear@3
|
124 float qprio = 0.0f;
|
nuclear@3
|
125
|
nuclear@4
|
126 static const char *ext_names[] = {
|
nuclear@4
|
127 #ifdef VK_USE_PLATFORM_XLIB_KHR
|
nuclear@4
|
128 "VK_KHR_xlib_surface",
|
nuclear@4
|
129 #endif
|
nuclear@15
|
130 "VK_KHR_surface",
|
nuclear@15
|
131 "VK_EXT_debug_report"
|
nuclear@4
|
132 };
|
nuclear@5
|
133 static const char *devext_names[] = {
|
nuclear@5
|
134 "VK_KHR_swapchain"
|
nuclear@5
|
135 };
|
nuclear@15
|
136 static const char *layer_names[] = {
|
nuclear@16
|
137 "VK_LAYER_LUNARG_standard_validation",
|
nuclear@16
|
138 "VK_LAYER_LUNARG_parameter_validation",
|
nuclear@17
|
139 "VK_LAYER_LUNARG_core_validation",
|
nuclear@17
|
140 "VK_LAYER_LUNARG_image"
|
nuclear@15
|
141 };
|
nuclear@4
|
142
|
nuclear@4
|
143 sel_dev = -1;
|
nuclear@4
|
144 sel_qfamily = -1;
|
nuclear@4
|
145
|
nuclear@4
|
146 for(i=0; i<sizeof ext_names / sizeof *ext_names; i++) {
|
nuclear@4
|
147 if(!vku_have_extension(ext_names[i])) {
|
nuclear@4
|
148 fprintf(stderr, "required extension (%s) not found\n", ext_names[i]);
|
nuclear@4
|
149 return -1;
|
nuclear@4
|
150 }
|
nuclear@4
|
151 }
|
nuclear@4
|
152
|
nuclear@15
|
153 /* enumerate available validation layers */
|
nuclear@15
|
154 vkEnumerateInstanceLayerProperties(&nlayers, 0);
|
nuclear@15
|
155 layers = alloca(nlayers * sizeof *layers);
|
nuclear@15
|
156 vkEnumerateInstanceLayerProperties(&nlayers, layers);
|
nuclear@15
|
157
|
nuclear@15
|
158 printf("Available validation layers:\n");
|
nuclear@15
|
159 for(i=0; i<(int)nlayers; i++) {
|
nuclear@15
|
160 printf(" %s\n", layers[i].layerName);
|
nuclear@15
|
161 }
|
nuclear@15
|
162
|
nuclear@3
|
163 memset(&inst_info, 0, sizeof inst_info);
|
nuclear@3
|
164 inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
|
nuclear@4
|
165 inst_info.ppEnabledExtensionNames = ext_names;
|
nuclear@4
|
166 inst_info.enabledExtensionCount = sizeof ext_names / sizeof *ext_names;
|
nuclear@15
|
167 inst_info.ppEnabledLayerNames = layer_names;
|
nuclear@15
|
168 inst_info.enabledLayerCount = sizeof layer_names / sizeof *layer_names;
|
nuclear@3
|
169
|
nuclear@3
|
170 if(vkCreateInstance(&inst_info, 0, &vk) != 0) {
|
nuclear@3
|
171 fprintf(stderr, "failed to create vulkan instance\n");
|
nuclear@3
|
172 return -1;
|
nuclear@3
|
173 }
|
nuclear@3
|
174 printf("created vulkan instance\n");
|
nuclear@3
|
175
|
nuclear@15
|
176 if(!(vk_create_debug_report_callback = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(vk, "vkCreateDebugReportCallbackEXT"))) {
|
nuclear@15
|
177 fprintf(stderr, "FUCK EVERYTHING\n");
|
nuclear@15
|
178 return -1;
|
nuclear@15
|
179 }
|
nuclear@17
|
180 vk_destroy_debug_report_callback = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(vk, "vkDestroyDebugReportCallbackEXT");
|
nuclear@15
|
181 vku_set_debug_callback(user_dbg_callback, user_dbg_callback_data); /* set debug callback */
|
nuclear@15
|
182
|
nuclear@15
|
183
|
nuclear@3
|
184 if(vkEnumeratePhysicalDevices(vk, &num_devices, 0) != 0) {
|
nuclear@3
|
185 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
|
nuclear@3
|
186 return -1;
|
nuclear@3
|
187 }
|
nuclear@4
|
188 phys_devices = malloc(num_devices * sizeof *phys_devices);
|
nuclear@4
|
189 if(vkEnumeratePhysicalDevices(vk, &num_devices, phys_devices) != 0) {
|
nuclear@3
|
190 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
|
nuclear@3
|
191 return -1;
|
nuclear@3
|
192 }
|
nuclear@3
|
193 printf("found %u physical device(s)\n", (unsigned int)num_devices);
|
nuclear@3
|
194
|
nuclear@3
|
195 for(i=0; i<(int)num_devices; i++) {
|
nuclear@3
|
196 VkPhysicalDeviceProperties dev_prop;
|
nuclear@3
|
197 VkPhysicalDeviceMemoryProperties mem_prop;
|
nuclear@3
|
198 VkQueueFamilyProperties *qprop;
|
nuclear@3
|
199 uint32_t qprop_count;
|
nuclear@3
|
200
|
nuclear@4
|
201 vkGetPhysicalDeviceProperties(phys_devices[i], &dev_prop);
|
nuclear@3
|
202
|
nuclear@3
|
203 printf("Device %d: %s\n", i, dev_prop.deviceName);
|
nuclear@3
|
204 printf(" type: %s\n", get_device_name(dev_prop.deviceType));
|
nuclear@3
|
205 printf(" API version: %d.%d.%d\n", ver_major(dev_prop.apiVersion), ver_minor(dev_prop.apiVersion),
|
nuclear@3
|
206 ver_patch(dev_prop.apiVersion));
|
nuclear@3
|
207 printf(" driver version: %d.%d.%d\n", ver_major(dev_prop.driverVersion), ver_minor(dev_prop.driverVersion),
|
nuclear@3
|
208 ver_patch(dev_prop.driverVersion));
|
nuclear@3
|
209 printf(" vendor id: %x device id: %x\n", dev_prop.vendorID, dev_prop.deviceID);
|
nuclear@3
|
210
|
nuclear@3
|
211
|
nuclear@4
|
212 vkGetPhysicalDeviceMemoryProperties(phys_devices[i], &mem_prop);
|
nuclear@3
|
213 printf(" %d memory heaps:\n", mem_prop.memoryHeapCount);
|
nuclear@3
|
214 for(j=0; j<mem_prop.memoryHeapCount; j++) {
|
nuclear@3
|
215 VkMemoryHeap heap = mem_prop.memoryHeaps[j];
|
nuclear@3
|
216 printf(" Heap %d - size: %s, flags: %s\n", j, mem_size_str(heap.size),
|
nuclear@3
|
217 heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT ? "device-local" : "-");
|
nuclear@3
|
218 }
|
nuclear@3
|
219 printf(" %d memory types:\n", mem_prop.memoryTypeCount);
|
nuclear@3
|
220 for(j=0; j<mem_prop.memoryTypeCount; j++) {
|
nuclear@3
|
221 VkMemoryType type = mem_prop.memoryTypes[j];
|
nuclear@3
|
222 printf(" Type %d - heap: %d, flags: %s\n", j, type.heapIndex,
|
nuclear@3
|
223 get_mem_prop_flag_string(type.propertyFlags));
|
nuclear@3
|
224 }
|
nuclear@3
|
225
|
nuclear@4
|
226 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, 0);
|
nuclear@3
|
227 if(qprop_count <= 0) {
|
nuclear@3
|
228 continue;
|
nuclear@3
|
229 }
|
nuclear@3
|
230 qprop = malloc(qprop_count * sizeof *qprop);
|
nuclear@4
|
231 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, qprop);
|
nuclear@3
|
232
|
nuclear@3
|
233 for(j=0; j<qprop_count; j++) {
|
nuclear@3
|
234 printf(" Queue family %d:\n", j);
|
nuclear@3
|
235 printf(" flags: %s\n", get_queue_flag_string(qprop[j].queueFlags));
|
nuclear@3
|
236 printf(" num queues: %u\n", qprop[j].queueCount);
|
nuclear@3
|
237
|
nuclear@3
|
238 if(qprop[j].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
|
nuclear@3
|
239 sel_dev = i;
|
nuclear@3
|
240 sel_qfamily = j;
|
nuclear@3
|
241 }
|
nuclear@3
|
242 }
|
nuclear@3
|
243 free(qprop);
|
nuclear@3
|
244 }
|
nuclear@3
|
245
|
nuclear@3
|
246 if(sel_dev < 0 || sel_qfamily < 0) {
|
nuclear@3
|
247 fprintf(stderr, "failed to find any device with a graphics-capable command queue\n");
|
nuclear@3
|
248 vkDestroyDevice(vkdev, 0);
|
nuclear@3
|
249 return -1;
|
nuclear@3
|
250 }
|
nuclear@3
|
251
|
nuclear@5
|
252 for(i=0; i<sizeof devext_names / sizeof *devext_names; i++) {
|
nuclear@5
|
253 if(!vku_have_device_extension(devext_names[i])) {
|
nuclear@5
|
254 fprintf(stderr, "required extension (%s) not found on the selected device (%d)\n",
|
nuclear@5
|
255 ext_names[i], sel_dev);
|
nuclear@5
|
256 return -1;
|
nuclear@5
|
257 }
|
nuclear@5
|
258 }
|
nuclear@5
|
259
|
nuclear@3
|
260 /* create device & command queue */
|
nuclear@3
|
261 memset(&queue_info, 0, sizeof queue_info);
|
nuclear@3
|
262 queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
|
nuclear@3
|
263 queue_info.queueFamilyIndex = sel_qfamily;
|
nuclear@3
|
264 queue_info.queueCount = 1;
|
nuclear@3
|
265 queue_info.pQueuePriorities = &qprio;
|
nuclear@3
|
266
|
nuclear@3
|
267 memset(&dev_info, 0, sizeof dev_info);
|
nuclear@3
|
268 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
|
nuclear@3
|
269 dev_info.queueCreateInfoCount = 1;
|
nuclear@3
|
270 dev_info.pQueueCreateInfos = &queue_info;
|
nuclear@5
|
271 dev_info.enabledExtensionCount = sizeof devext_names / sizeof *devext_names;
|
nuclear@5
|
272 dev_info.ppEnabledExtensionNames = devext_names;
|
nuclear@3
|
273
|
nuclear@4
|
274 if(vkCreateDevice(phys_devices[sel_dev], &dev_info, 0, &vkdev) != 0) {
|
nuclear@3
|
275 fprintf(stderr, "failed to create device %d\n", sel_dev);
|
nuclear@3
|
276 return -1;
|
nuclear@3
|
277 }
|
nuclear@3
|
278 printf("created device %d\n", sel_dev);
|
nuclear@3
|
279
|
nuclear@3
|
280 vkGetDeviceQueue(vkdev, sel_qfamily, 0, &vkq);
|
nuclear@3
|
281
|
nuclear@3
|
282 /* create command buffer pool */
|
nuclear@3
|
283 memset(&cmdpool_info, 0, sizeof cmdpool_info);
|
nuclear@3
|
284 cmdpool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
|
nuclear@3
|
285 cmdpool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
|
nuclear@3
|
286 cmdpool_info.queueFamilyIndex = sel_qfamily;
|
nuclear@3
|
287
|
nuclear@3
|
288 if(vkCreateCommandPool(vkdev, &cmdpool_info, 0, &vkcmdpool) != 0) {
|
nuclear@3
|
289 fprintf(stderr, "failed to get command quque!\n");
|
nuclear@3
|
290 return -1;
|
nuclear@3
|
291 }
|
nuclear@3
|
292
|
nuclear@17
|
293 /* XXX hardcoded 2 command buffers for a swapchain with 2 images */
|
nuclear@17
|
294 if(!(swapchain_cmdbuf = malloc(2 * sizeof *swapchain_cmdbuf))) {
|
nuclear@17
|
295 fprintf(stderr, "failed to allocate command buffer array\n");
|
nuclear@4
|
296 return -1;
|
nuclear@4
|
297 }
|
nuclear@4
|
298
|
nuclear@17
|
299 for(i=0; i<2; i++) {
|
nuclear@17
|
300 if(!(swapchain_cmdbuf[i] = vku_alloc_cmdbuf(vkcmdpool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))) {
|
nuclear@17
|
301 fprintf(stderr, "failed to create primary command buffer\n");
|
nuclear@17
|
302 return -1;
|
nuclear@17
|
303 }
|
nuclear@17
|
304 }
|
nuclear@17
|
305 vkcmdbuf = swapchain_cmdbuf[0];
|
nuclear@17
|
306
|
nuclear@3
|
307 return 0;
|
nuclear@3
|
308 }
|
nuclear@3
|
309
|
nuclear@3
|
310 void vku_cleanup(void)
|
nuclear@3
|
311 {
|
nuclear@3
|
312 if(vk) {
|
nuclear@3
|
313 vkDeviceWaitIdle(vkdev);
|
nuclear@4
|
314 vkDestroyCommandPool(vkdev, vkcmdpool, 0);
|
nuclear@3
|
315 vkDestroyDevice(vkdev, 0);
|
nuclear@17
|
316 vk_destroy_debug_report_callback(vk, debug_callback_obj, 0);
|
nuclear@3
|
317 vkDestroyInstance(vk, 0);
|
nuclear@3
|
318 vk = 0;
|
nuclear@3
|
319 }
|
nuclear@4
|
320
|
nuclear@4
|
321 free(phys_devices);
|
nuclear@4
|
322 phys_devices = 0;
|
nuclear@3
|
323 }
|
nuclear@3
|
324
|
nuclear@4
|
325 VkCommandBuffer vku_alloc_cmdbuf(VkCommandPool pool, VkCommandBufferLevel level)
|
nuclear@3
|
326 {
|
nuclear@4
|
327 VkCommandBuffer cmdbuf;
|
nuclear@4
|
328 VkCommandBufferAllocateInfo inf;
|
nuclear@4
|
329
|
nuclear@4
|
330 memset(&inf, 0, sizeof inf);
|
nuclear@4
|
331 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
nuclear@4
|
332 inf.commandPool = pool;
|
nuclear@4
|
333 inf.level = level;
|
nuclear@4
|
334 inf.commandBufferCount = 1;
|
nuclear@4
|
335
|
nuclear@4
|
336 if(vkAllocateCommandBuffers(vkdev, &inf, &cmdbuf) != 0) {
|
nuclear@4
|
337 return 0;
|
nuclear@4
|
338 }
|
nuclear@4
|
339 return cmdbuf;
|
nuclear@4
|
340 }
|
nuclear@4
|
341
|
nuclear@4
|
342 void vku_free_cmdbuf(VkCommandPool pool, VkCommandBuffer buf)
|
nuclear@4
|
343 {
|
nuclear@4
|
344 vkFreeCommandBuffers(vkdev, pool, 1, &buf);
|
nuclear@4
|
345 }
|
nuclear@4
|
346
|
nuclear@4
|
347 void vku_begin_cmdbuf(VkCommandBuffer buf, unsigned int flags)
|
nuclear@4
|
348 {
|
nuclear@4
|
349 VkCommandBufferBeginInfo inf;
|
nuclear@4
|
350
|
nuclear@4
|
351 memset(&inf, 0, sizeof inf);
|
nuclear@4
|
352 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
nuclear@4
|
353 inf.flags = flags;
|
nuclear@4
|
354
|
nuclear@4
|
355 vkBeginCommandBuffer(buf, &inf);
|
nuclear@4
|
356 }
|
nuclear@4
|
357
|
nuclear@4
|
358
|
nuclear@4
|
359 void vku_end_cmdbuf(VkCommandBuffer buf)
|
nuclear@4
|
360 {
|
nuclear@4
|
361 vkEndCommandBuffer(buf);
|
nuclear@4
|
362 }
|
nuclear@4
|
363
|
nuclear@4
|
364 void vku_reset_cmdbuf(VkCommandBuffer buf)
|
nuclear@4
|
365 {
|
nuclear@4
|
366 vkResetCommandBuffer(buf, 0);
|
nuclear@4
|
367 }
|
nuclear@4
|
368
|
nuclear@17
|
369 void vku_submit_cmdbuf(VkQueue q, VkCommandBuffer buf, VkSemaphore sem_wait,
|
nuclear@17
|
370 VkSemaphore sem_done, VkFence done_fence)
|
nuclear@4
|
371 {
|
nuclear@4
|
372 VkSubmitInfo info;
|
nuclear@4
|
373
|
nuclear@4
|
374 memset(&info, 0, sizeof info);
|
nuclear@4
|
375 info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
nuclear@4
|
376 info.commandBufferCount = 1;
|
nuclear@4
|
377 info.pCommandBuffers = &buf;
|
nuclear@17
|
378 if(sem_wait) {
|
nuclear@17
|
379 info.pWaitSemaphores = &sem_wait;
|
nuclear@17
|
380 info.waitSemaphoreCount = 1;
|
nuclear@17
|
381 }
|
nuclear@17
|
382 if(sem_done) {
|
nuclear@17
|
383 info.pSignalSemaphores = &sem_done;
|
nuclear@17
|
384 info.signalSemaphoreCount = 1;
|
nuclear@17
|
385 }
|
nuclear@4
|
386
|
nuclear@4
|
387 vkQueueSubmit(q, 1, &info, done_fence);
|
nuclear@4
|
388 }
|
nuclear@4
|
389
|
nuclear@5
|
390 VkSwapchainKHR vku_create_swapchain(VkSurfaceKHR surf, int xsz, int ysz, int n,
|
nuclear@14
|
391 VkFormat fmt, VkPresentModeKHR pmode, VkSwapchainKHR prev)
|
nuclear@5
|
392 {
|
nuclear@5
|
393 VkSwapchainKHR sc;
|
nuclear@5
|
394 VkSwapchainCreateInfoKHR inf;
|
nuclear@5
|
395
|
nuclear@5
|
396 memset(&inf, 0, sizeof inf);
|
nuclear@5
|
397 inf.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
|
nuclear@5
|
398 inf.surface = surf;
|
nuclear@5
|
399 inf.minImageCount = n;
|
nuclear@14
|
400 inf.imageFormat = fmt;
|
nuclear@5
|
401 inf.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
|
nuclear@5
|
402 inf.imageExtent.width = xsz;
|
nuclear@5
|
403 inf.imageExtent.height = ysz;
|
nuclear@5
|
404 inf.imageArrayLayers = 1;
|
nuclear@5
|
405 inf.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
|
nuclear@5
|
406 inf.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; /* XXX make this an option? */
|
nuclear@5
|
407 inf.preTransform = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR;
|
nuclear@5
|
408 inf.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
|
nuclear@5
|
409 inf.presentMode = pmode;
|
nuclear@5
|
410 inf.oldSwapchain = prev;
|
nuclear@5
|
411
|
nuclear@5
|
412 if(vkCreateSwapchainKHR(vkdev, &inf, 0, &sc) != 0) {
|
nuclear@5
|
413 return 0;
|
nuclear@5
|
414 }
|
nuclear@5
|
415 return sc;
|
nuclear@5
|
416 }
|
nuclear@5
|
417
|
nuclear@6
|
418 VkImage *vku_get_swapchain_images(VkSwapchainKHR sc, int *count)
|
nuclear@6
|
419 {
|
nuclear@6
|
420 uint32_t nimg;
|
nuclear@6
|
421 VkImage *images;
|
nuclear@6
|
422
|
nuclear@6
|
423 if(vkGetSwapchainImagesKHR(vkdev, sc, &nimg, 0) != 0) {
|
nuclear@6
|
424 return 0;
|
nuclear@6
|
425 }
|
nuclear@6
|
426 if(!(images = malloc(nimg * sizeof *images))) {
|
nuclear@6
|
427 return 0;
|
nuclear@6
|
428 }
|
nuclear@6
|
429 vkGetSwapchainImagesKHR(vkdev, sc, &nimg, images);
|
nuclear@6
|
430
|
nuclear@6
|
431 if(count) *count = (int)nimg;
|
nuclear@6
|
432 return images;
|
nuclear@6
|
433 }
|
nuclear@6
|
434
|
nuclear@17
|
435 int vku_get_next_image(VkSwapchainKHR sc, VkSemaphore semdone)
|
nuclear@6
|
436 {
|
nuclear@6
|
437 uint32_t next;
|
nuclear@6
|
438
|
nuclear@17
|
439 if(vkAcquireNextImageKHR(vkdev, sc, UINT64_MAX, semdone, 0, &next) != 0) {
|
nuclear@6
|
440 return -1;
|
nuclear@6
|
441 }
|
nuclear@17
|
442 vkcmdbuf = swapchain_cmdbuf[next];
|
nuclear@6
|
443 return (int)next;
|
nuclear@6
|
444 }
|
nuclear@6
|
445
|
nuclear@14
|
446 VkImageView vku_create_view(VkImage img, VkFormat fmt)
|
nuclear@14
|
447 {
|
nuclear@14
|
448 VkImageView view;
|
nuclear@14
|
449 VkImageViewCreateInfo iv;
|
nuclear@14
|
450
|
nuclear@14
|
451 memset(&iv, 0, sizeof iv);
|
nuclear@14
|
452 iv.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
nuclear@14
|
453 iv.image = img;
|
nuclear@14
|
454 iv.viewType = VK_IMAGE_VIEW_TYPE_2D;
|
nuclear@14
|
455 iv.format = fmt;
|
nuclear@14
|
456 iv.components.r = iv.components.g = iv.components.b = iv.components.a =
|
nuclear@14
|
457 VK_COMPONENT_SWIZZLE_IDENTITY;
|
nuclear@14
|
458 iv.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
nuclear@14
|
459 iv.subresourceRange.baseMipLevel = 0;
|
nuclear@14
|
460 iv.subresourceRange.levelCount = 1;
|
nuclear@14
|
461 iv.subresourceRange.baseArrayLayer = 0;
|
nuclear@14
|
462 iv.subresourceRange.layerCount = 1;
|
nuclear@14
|
463
|
nuclear@14
|
464 if(vkCreateImageView(vkdev, &iv, 0, &view) != 0) {
|
nuclear@14
|
465 fprintf(stderr, "vku_create_view failed\n");
|
nuclear@14
|
466 return 0;
|
nuclear@14
|
467 }
|
nuclear@14
|
468 return view;
|
nuclear@14
|
469 }
|
nuclear@14
|
470
|
nuclear@14
|
471 void vku_destroy_view(VkImageView view)
|
nuclear@14
|
472 {
|
nuclear@14
|
473 vkDestroyImageView(vkdev, view, 0);
|
nuclear@14
|
474 }
|
nuclear@14
|
475
|
nuclear@14
|
476 VkFramebuffer vku_create_framebuffer(VkImageView view, int width, int height, VkRenderPass rpass)
|
nuclear@14
|
477 {
|
nuclear@14
|
478 VkFramebuffer fb;
|
nuclear@14
|
479 VkFramebufferCreateInfo fbinf;
|
nuclear@14
|
480
|
nuclear@14
|
481 memset(&fbinf, 0, sizeof fbinf);
|
nuclear@14
|
482 fbinf.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
|
nuclear@14
|
483 fbinf.renderPass = rpass;
|
nuclear@14
|
484 fbinf.attachmentCount = 1;
|
nuclear@14
|
485 fbinf.pAttachments = &view;
|
nuclear@14
|
486 fbinf.width = width;
|
nuclear@14
|
487 fbinf.height = height;
|
nuclear@14
|
488 fbinf.layers = 1;
|
nuclear@14
|
489
|
nuclear@14
|
490 if(vkCreateFramebuffer(vkdev, &fbinf, 0, &fb) != 0) {
|
nuclear@14
|
491 fprintf(stderr, "vku_create_framebuffer failed\n");
|
nuclear@14
|
492 return 0;
|
nuclear@14
|
493 }
|
nuclear@14
|
494 return fb;
|
nuclear@14
|
495 }
|
nuclear@14
|
496
|
nuclear@14
|
497 void vku_destroy_framebuffer(VkFramebuffer fb)
|
nuclear@14
|
498 {
|
nuclear@14
|
499 vkDestroyFramebuffer(vkdev, fb, 0);
|
nuclear@14
|
500 }
|
nuclear@14
|
501
|
nuclear@17
|
502 void vku_present(VkSwapchainKHR sc, int img_idx, VkSemaphore sem_wait)
|
nuclear@6
|
503 {
|
nuclear@6
|
504 VkPresentInfoKHR inf;
|
nuclear@6
|
505 VkResult res;
|
nuclear@6
|
506 uint32_t index = img_idx;
|
nuclear@6
|
507
|
nuclear@6
|
508 memset(&inf, 0, sizeof inf);
|
nuclear@6
|
509 inf.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
|
nuclear@6
|
510 inf.swapchainCount = 1;
|
nuclear@6
|
511 inf.pSwapchains = ≻
|
nuclear@6
|
512 inf.pImageIndices = &index;
|
nuclear@6
|
513 inf.pResults = &res;
|
nuclear@17
|
514 if(sem_wait) {
|
nuclear@17
|
515 inf.pWaitSemaphores = &sem_wait;
|
nuclear@17
|
516 inf.waitSemaphoreCount = 1;
|
nuclear@17
|
517 }
|
nuclear@6
|
518
|
nuclear@6
|
519 vkQueuePresentKHR(vkq, &inf);
|
nuclear@6
|
520 }
|
nuclear@6
|
521
|
nuclear@4
|
522 struct vku_buffer *vku_create_buffer(int sz, unsigned int usage)
|
nuclear@4
|
523 {
|
nuclear@4
|
524 struct vku_buffer *buf;
|
nuclear@3
|
525 VkBufferCreateInfo binfo;
|
nuclear@3
|
526
|
nuclear@3
|
527 if(!(buf = malloc(sizeof *buf))) {
|
nuclear@3
|
528 perror("failed to allocate vk_buffer structure");
|
nuclear@3
|
529 return 0;
|
nuclear@3
|
530 }
|
nuclear@3
|
531
|
nuclear@3
|
532 memset(&binfo, 0, sizeof binfo);
|
nuclear@3
|
533 binfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
nuclear@3
|
534 binfo.size = sz;
|
nuclear@3
|
535 binfo.usage = usage;
|
nuclear@3
|
536 binfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
nuclear@3
|
537
|
nuclear@3
|
538 if(vkCreateBuffer(vkdev, &binfo, 0, &buf->buf) != 0) {
|
nuclear@3
|
539 fprintf(stderr, "failed to create %d byte buffer (usage: %x)\n", sz, usage);
|
nuclear@3
|
540 return 0;
|
nuclear@3
|
541 }
|
nuclear@3
|
542 // TODO back with memory
|
nuclear@3
|
543 return buf;
|
nuclear@3
|
544 }
|
nuclear@3
|
545
|
nuclear@4
|
546 void vku_destroy_buffer(struct vku_buffer *buf)
|
nuclear@3
|
547 {
|
nuclear@3
|
548 if(buf) {
|
nuclear@3
|
549 vkDestroyBuffer(vkdev, buf->buf, 0);
|
nuclear@3
|
550 free(buf);
|
nuclear@3
|
551 }
|
nuclear@3
|
552 }
|
nuclear@3
|
553
|
nuclear@4
|
554 void vku_cmd_copybuf(VkCommandBuffer cmdbuf, VkBuffer dest, int doffs,
|
nuclear@4
|
555 VkBuffer src, int soffs, int size)
|
nuclear@4
|
556 {
|
nuclear@4
|
557 VkBufferCopy copy;
|
nuclear@4
|
558 copy.size = size;
|
nuclear@4
|
559 copy.srcOffset = soffs;
|
nuclear@4
|
560 copy.dstOffset = doffs;
|
nuclear@4
|
561
|
nuclear@4
|
562 vkCmdCopyBuffer(cmdbuf, src, dest, 1, ©);
|
nuclear@4
|
563 }
|
nuclear@4
|
564
|
nuclear@12
|
565
|
nuclear@13
|
566 VkRenderPass vku_create_renderpass(VkFormat cfmt, VkFormat dsfmt)
|
nuclear@13
|
567 {
|
nuclear@13
|
568 int count = 1; /* always assume we have a color attachment for now */
|
nuclear@13
|
569 VkAttachmentDescription at[2];
|
nuclear@13
|
570 VkAttachmentReference colref, dsref;
|
nuclear@13
|
571 VkSubpassDescription subpass;
|
nuclear@13
|
572 VkRenderPass pass;
|
nuclear@13
|
573 VkRenderPassCreateInfo rpinf;
|
nuclear@13
|
574
|
nuclear@13
|
575 colref.attachment = 0;
|
nuclear@13
|
576 colref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
|
nuclear@13
|
577 dsref.attachment = 1;
|
nuclear@13
|
578 dsref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
nuclear@13
|
579
|
nuclear@13
|
580 memset(&subpass, 0, sizeof subpass);
|
nuclear@13
|
581 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
|
nuclear@13
|
582 subpass.colorAttachmentCount = 1;
|
nuclear@13
|
583 subpass.pColorAttachments = &colref;
|
nuclear@13
|
584
|
nuclear@13
|
585 at[0].format = cfmt;
|
nuclear@13
|
586 at[0].samples = VK_SAMPLE_COUNT_1_BIT; /* TODO multisampling */
|
nuclear@13
|
587 at[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
nuclear@13
|
588 at[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
nuclear@13
|
589 at[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
nuclear@13
|
590 at[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
|
nuclear@13
|
591 at[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
nuclear@13
|
592 at[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
|
nuclear@13
|
593
|
nuclear@13
|
594 if(dsfmt != VK_FORMAT_UNDEFINED) {
|
nuclear@13
|
595 at[1].format = dsfmt;
|
nuclear@13
|
596 at[1].samples = VK_SAMPLE_COUNT_1_BIT;
|
nuclear@13
|
597 at[1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
nuclear@13
|
598 at[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
|
nuclear@13
|
599 at[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
|
nuclear@13
|
600 at[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
|
nuclear@13
|
601 at[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
nuclear@13
|
602 at[1].finalLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
nuclear@13
|
603
|
nuclear@13
|
604 subpass.pDepthStencilAttachment = &dsref;
|
nuclear@13
|
605 count++;
|
nuclear@13
|
606 }
|
nuclear@13
|
607
|
nuclear@13
|
608 memset(&rpinf, 0, sizeof rpinf);
|
nuclear@13
|
609 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
|
nuclear@13
|
610 rpinf.attachmentCount = count;
|
nuclear@13
|
611 rpinf.pAttachments = at;
|
nuclear@13
|
612 rpinf.subpassCount = 1;
|
nuclear@13
|
613 rpinf.pSubpasses = &subpass;
|
nuclear@13
|
614
|
nuclear@13
|
615 if(vkCreateRenderPass(vkdev, &rpinf, 0, &pass) != 0) {
|
nuclear@13
|
616 fprintf(stderr, "vku_create_renderpass: failed to create renderpass\n");
|
nuclear@13
|
617 return 0;
|
nuclear@13
|
618 }
|
nuclear@13
|
619
|
nuclear@13
|
620 return pass;
|
nuclear@13
|
621 }
|
nuclear@13
|
622
|
nuclear@13
|
623 void vku_destroy_renderpass(VkRenderPass rpass)
|
nuclear@13
|
624 {
|
nuclear@13
|
625 vkDestroyRenderPass(vkdev, rpass, 0);
|
nuclear@13
|
626 }
|
nuclear@13
|
627
|
nuclear@15
|
628 void vku_begin_renderpass(VkCommandBuffer cmdbuf, VkRenderPass rpass, VkFramebuffer fb,
|
nuclear@15
|
629 VkSubpassContents cont)
|
nuclear@15
|
630 {
|
nuclear@15
|
631 VkRenderPassBeginInfo rpinf;
|
nuclear@15
|
632
|
nuclear@15
|
633 memset(&rpinf, 0, sizeof rpinf);
|
nuclear@15
|
634 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
nuclear@15
|
635 rpinf.renderPass = rpass;
|
nuclear@15
|
636 rpinf.framebuffer = fb;
|
nuclear@15
|
637 rpinf.renderArea.offset.x = vkvport.x;
|
nuclear@15
|
638 rpinf.renderArea.offset.y = vkvport.y;
|
nuclear@15
|
639 rpinf.renderArea.extent.width = vkvport.width;
|
nuclear@15
|
640 rpinf.renderArea.extent.height = vkvport.height;
|
nuclear@15
|
641
|
nuclear@15
|
642 vkCmdBeginRenderPass(cmdbuf, &rpinf, cont);
|
nuclear@15
|
643 }
|
nuclear@15
|
644
|
nuclear@15
|
645 void vku_end_renderpass(VkCommandBuffer cmdbuf)
|
nuclear@15
|
646 {
|
nuclear@15
|
647 vkCmdEndRenderPass(cmdbuf);
|
nuclear@15
|
648 }
|
nuclear@13
|
649
|
nuclear@17
|
650 VkSemaphore vku_create_semaphore(void)
|
nuclear@17
|
651 {
|
nuclear@17
|
652 VkSemaphore s;
|
nuclear@17
|
653 VkSemaphoreCreateInfo sinf;
|
nuclear@17
|
654
|
nuclear@17
|
655 memset(&sinf, 0, sizeof sinf);
|
nuclear@17
|
656 sinf.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
nuclear@17
|
657
|
nuclear@17
|
658 if(vkCreateSemaphore(vkdev, &sinf, 0, &s) != 0) {
|
nuclear@17
|
659 fprintf(stderr, "vku_create_semaphore failed\n");
|
nuclear@17
|
660 return 0;
|
nuclear@17
|
661 }
|
nuclear@17
|
662 return s;
|
nuclear@17
|
663 }
|
nuclear@17
|
664
|
nuclear@17
|
665 void vku_destroy_semaphore(VkSemaphore s)
|
nuclear@17
|
666 {
|
nuclear@17
|
667 vkDestroySemaphore(vkdev, s, 0);
|
nuclear@17
|
668 }
|
nuclear@17
|
669
|
nuclear@17
|
670 VkFence vku_create_fence(void)
|
nuclear@17
|
671 {
|
nuclear@17
|
672 VkFence f;
|
nuclear@17
|
673 VkFenceCreateInfo finf;
|
nuclear@17
|
674
|
nuclear@17
|
675 memset(&finf, 0, sizeof finf);
|
nuclear@17
|
676 finf.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
nuclear@17
|
677
|
nuclear@17
|
678 if(vkCreateFence(vkdev, &finf, 0, &f) != 0) {
|
nuclear@17
|
679 fprintf(stderr, "vku_create_fence failed\n");
|
nuclear@17
|
680 return 0;
|
nuclear@17
|
681 }
|
nuclear@17
|
682 return f;
|
nuclear@17
|
683 }
|
nuclear@17
|
684
|
nuclear@17
|
685 void vku_destroy_fence(VkFence f)
|
nuclear@17
|
686 {
|
nuclear@17
|
687 vkDestroyFence(vkdev, f, 0);
|
nuclear@17
|
688 }
|
nuclear@17
|
689
|
nuclear@17
|
690 void vku_wait_fence(VkFence f)
|
nuclear@17
|
691 {
|
nuclear@17
|
692 vkWaitForFences(vkdev, 1, &f, VK_TRUE, UINT64_MAX);
|
nuclear@17
|
693 vkResetFences(vkdev, 1, &f);
|
nuclear@17
|
694 }
|
nuclear@17
|
695
|
nuclear@17
|
696
|
nuclear@4
|
697 #ifdef VK_USE_PLATFORM_XLIB_KHR
|
nuclear@4
|
698 int vku_xlib_usable_visual(Display *dpy, VisualID vid)
|
nuclear@4
|
699 {
|
nuclear@4
|
700 return vkGetPhysicalDeviceXlibPresentationSupportKHR(phys_devices[sel_dev],
|
nuclear@4
|
701 sel_qfamily, dpy, vid);
|
nuclear@4
|
702 }
|
nuclear@5
|
703
|
nuclear@5
|
704 VkSurfaceKHR vku_xlib_create_surface(Display *dpy, Window win)
|
nuclear@5
|
705 {
|
nuclear@5
|
706 VkSurfaceKHR surf;
|
nuclear@5
|
707 VkXlibSurfaceCreateInfoKHR inf;
|
nuclear@5
|
708
|
nuclear@5
|
709 memset(&inf, 0, sizeof inf);
|
nuclear@5
|
710 inf.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
|
nuclear@5
|
711 inf.dpy = dpy;
|
nuclear@5
|
712 inf.window = win;
|
nuclear@5
|
713
|
nuclear@5
|
714 if(vkCreateXlibSurfaceKHR(vk, &inf, 0, &surf) != 0) {
|
nuclear@5
|
715 return 0;
|
nuclear@5
|
716 }
|
nuclear@5
|
717 return surf;
|
nuclear@5
|
718 }
|
nuclear@5
|
719
|
nuclear@4
|
720 #endif /* VK_USE_PLATFORM_XLIB_KHR */
|
nuclear@4
|
721
|
nuclear@3
|
722 static const char *get_device_name(VkPhysicalDeviceType type)
|
nuclear@3
|
723 {
|
nuclear@3
|
724 switch(type) {
|
nuclear@3
|
725 case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
|
nuclear@3
|
726 return "integrated GPU";
|
nuclear@3
|
727 case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
|
nuclear@3
|
728 return "discrete GPU";
|
nuclear@3
|
729 case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
|
nuclear@3
|
730 return "virtual GPU";
|
nuclear@3
|
731 case VK_PHYSICAL_DEVICE_TYPE_CPU:
|
nuclear@3
|
732 return "CPU";
|
nuclear@3
|
733 default:
|
nuclear@3
|
734 break;
|
nuclear@3
|
735 }
|
nuclear@3
|
736 return "unknown";
|
nuclear@3
|
737 }
|
nuclear@3
|
738
|
nuclear@3
|
739 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags)
|
nuclear@3
|
740 {
|
nuclear@3
|
741 static char str[128];
|
nuclear@3
|
742
|
nuclear@3
|
743 str[0] = 0;
|
nuclear@3
|
744 if(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
|
nuclear@3
|
745 strcat(str, "device-local ");
|
nuclear@3
|
746 }
|
nuclear@3
|
747 if(flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
nuclear@3
|
748 strcat(str, "host-visible ");
|
nuclear@3
|
749 }
|
nuclear@3
|
750 if(flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
|
nuclear@3
|
751 strcat(str, "host-coherent ");
|
nuclear@3
|
752 }
|
nuclear@3
|
753 if(flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
|
nuclear@3
|
754 strcat(str, "host-cached ");
|
nuclear@3
|
755 }
|
nuclear@3
|
756 if(flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
|
nuclear@3
|
757 strcat(str, "lazily-allocated ");
|
nuclear@3
|
758 }
|
nuclear@3
|
759
|
nuclear@3
|
760 if(!*str) {
|
nuclear@3
|
761 strcat(str, "-");
|
nuclear@3
|
762 }
|
nuclear@3
|
763 return str;
|
nuclear@3
|
764 }
|
nuclear@3
|
765
|
nuclear@3
|
766 static const char *get_queue_flag_string(VkQueueFlagBits flags)
|
nuclear@3
|
767 {
|
nuclear@3
|
768 static char str[128];
|
nuclear@3
|
769
|
nuclear@3
|
770 str[0] = 0;
|
nuclear@3
|
771 if(flags & VK_QUEUE_GRAPHICS_BIT) {
|
nuclear@3
|
772 strcat(str, "graphics ");
|
nuclear@3
|
773 }
|
nuclear@3
|
774 if(flags & VK_QUEUE_COMPUTE_BIT) {
|
nuclear@3
|
775 strcat(str, "compute ");
|
nuclear@3
|
776 }
|
nuclear@3
|
777 if(flags & VK_QUEUE_TRANSFER_BIT) {
|
nuclear@3
|
778 strcat(str, "transfer ");
|
nuclear@3
|
779 }
|
nuclear@3
|
780 if(flags & VK_QUEUE_SPARSE_BINDING_BIT) {
|
nuclear@3
|
781 strcat(str, "sparse-binding ");
|
nuclear@3
|
782 }
|
nuclear@3
|
783 if(!*str) {
|
nuclear@3
|
784 strcat(str, "-");
|
nuclear@3
|
785 }
|
nuclear@3
|
786 return str;
|
nuclear@3
|
787 }
|
nuclear@3
|
788
|
nuclear@3
|
789 static int ver_major(uint32_t ver)
|
nuclear@3
|
790 {
|
nuclear@3
|
791 return (ver >> 22) & 0x3ff;
|
nuclear@3
|
792 }
|
nuclear@3
|
793
|
nuclear@3
|
794 static int ver_minor(uint32_t ver)
|
nuclear@3
|
795 {
|
nuclear@3
|
796 return (ver >> 12) & 0x3ff;
|
nuclear@3
|
797 }
|
nuclear@3
|
798
|
nuclear@3
|
799 static int ver_patch(uint32_t ver)
|
nuclear@3
|
800 {
|
nuclear@3
|
801 return ver & 0xfff;
|
nuclear@3
|
802 }
|
nuclear@3
|
803
|
nuclear@3
|
804 static const char *mem_size_str(long sz)
|
nuclear@3
|
805 {
|
nuclear@3
|
806 static char str[64];
|
nuclear@3
|
807 static const char *unitstr[] = { "bytes", "KB", "MB", "GB", "TB", "PB", 0 };
|
nuclear@3
|
808 int uidx = 0;
|
nuclear@3
|
809 sz *= 10;
|
nuclear@3
|
810
|
nuclear@3
|
811 while(sz >= 10240 && unitstr[uidx + 1]) {
|
nuclear@3
|
812 sz /= 1024;
|
nuclear@3
|
813 ++uidx;
|
nuclear@3
|
814 }
|
nuclear@3
|
815 sprintf(str, "%ld.%ld %s", sz / 10, sz % 10, unitstr[uidx]);
|
nuclear@3
|
816 return str;
|
nuclear@3
|
817 }
|
nuclear@15
|
818
|
nuclear@15
|
819 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
|
nuclear@15
|
820 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
|
nuclear@15
|
821 const char *layer_prefix, const char *msg, void *udata)
|
nuclear@15
|
822 {
|
nuclear@15
|
823 if(user_dbg_callback) {
|
nuclear@15
|
824 user_dbg_callback(msg, user_dbg_callback_data);
|
nuclear@15
|
825 } else {
|
nuclear@15
|
826 fprintf(stderr, "VK DEBUG (%s): %s\n", layer_prefix, msg);
|
nuclear@15
|
827 }
|
nuclear@15
|
828
|
nuclear@15
|
829 return VK_TRUE;
|
nuclear@15
|
830 }
|