vulkan_test2

view src/vku.c @ 16:236f923a00a3

more validation crap
author John Tsiombikas <nuclear@member.fsf.org>
date Tue, 26 Jun 2018 08:42:12 +0300
parents 196122a2b8c2
children f8bd29f124a8
line source
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <string.h>
4 #include <stdint.h>
5 #include "vku.h"
7 static const char *get_device_name(VkPhysicalDeviceType type);
8 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags);
9 static const char *get_queue_flag_string(VkQueueFlagBits flags);
10 static int ver_major(uint32_t ver);
11 static int ver_minor(uint32_t ver);
12 static int ver_patch(uint32_t ver);
13 static const char *mem_size_str(long sz);
15 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
16 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
17 const char *layer_prefix, const char *msg, void *udata);
19 VkInstance vk;
20 VkDevice vkdev;
21 VkQueue vkq;
23 static VkPhysicalDevice *phys_devices;
24 static int sel_dev, sel_qfamily;
26 static VkExtensionProperties *vkext, *vkdevext;
27 static uint32_t vkext_count, vkdevext_count;
29 static VkDebugReportCallbackEXT debug_callback_obj;
30 static VkResult (*vk_create_debug_report_callback)(VkInstance,
31 const VkDebugReportCallbackCreateInfoEXT*, const VkAllocationCallbacks*,
32 VkDebugReportCallbackEXT*);
33 static void (*user_dbg_callback)(const char*, void*);
34 static void *user_dbg_callback_data;
37 int vku_have_extension(const char *name)
38 {
39 int i;
41 if(!vkext) {
42 vkext_count = 0;
43 vkEnumerateInstanceExtensionProperties(0, &vkext_count, 0);
44 if(vkext_count) {
45 if(!(vkext = malloc(vkext_count * sizeof *vkext))) {
46 perror("failed to allocate instance extension list");
47 return 0;
48 }
49 vkEnumerateInstanceExtensionProperties(0, &vkext_count, vkext);
51 printf("instance extensions:\n");
52 for(i=0; i<(int)vkext_count; i++) {
53 printf(" %s (ver: %u)\n", vkext[i].extensionName, (unsigned int)vkext[i].specVersion);
54 }
55 }
56 }
58 for(i=0; i<(int)vkext_count; i++) {
59 if(strcmp(vkext[i].extensionName, name) == 0) {
60 return 1;
61 }
62 }
63 return 0;
64 }
66 int vku_have_device_extension(const char *name)
67 {
68 int i;
70 if(sel_dev < 0) return 0;
72 if(!vkdevext) {
73 vkdevext_count = 0;
74 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, 0);
75 if(vkdevext_count) {
76 if(!(vkdevext = malloc(vkdevext_count * sizeof *vkdevext))) {
77 perror("failed to allocate device extension list");
78 return 0;
79 }
80 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, vkdevext);
82 printf("selected device extensions:\n");
83 for(i=0; i<(int)vkdevext_count; i++) {
84 printf(" %s (ver: %u)\n", vkdevext[i].extensionName, (unsigned int)vkdevext[i].specVersion);
85 }
86 }
87 }
89 for(i=0; i<(int)vkdevext_count; i++) {
90 if(strcmp(vkdevext[i].extensionName, name) == 0) {
91 return 1;
92 }
93 }
94 return 0;
95 }
97 void vku_set_debug_callback(void (*func)(const char*, void*), void *cls)
98 {
99 if(!debug_callback_obj && vk_create_debug_report_callback) {
100 VkDebugReportCallbackCreateInfoEXT foo;
102 memset(&foo, 0, sizeof foo);
103 foo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
104 foo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
105 foo.pfnCallback = debug_callback_thunk;
107 vk_create_debug_report_callback(vk, &foo, 0, &debug_callback_obj);
108 }
110 user_dbg_callback = func;
111 user_dbg_callback_data = cls;
112 }
115 int vku_create_dev(void)
116 {
117 int i, j;
118 uint32_t nlayers;
119 VkInstanceCreateInfo inst_info;
120 VkLayerProperties *layers;
121 VkDeviceCreateInfo dev_info;
122 VkDeviceQueueCreateInfo queue_info;
123 VkCommandPoolCreateInfo cmdpool_info;
124 uint32_t num_devices;
125 float qprio = 0.0f;
127 static const char *ext_names[] = {
128 #ifdef VK_USE_PLATFORM_XLIB_KHR
129 "VK_KHR_xlib_surface",
130 #endif
131 "VK_KHR_surface",
132 "VK_EXT_debug_report"
133 };
134 static const char *devext_names[] = {
135 "VK_KHR_swapchain"
136 };
137 static const char *layer_names[] = {
138 "VK_LAYER_LUNARG_standard_validation",
139 "VK_LAYER_LUNARG_parameter_validation",
140 "VK_LAYER_LUNARG_core_validation"
141 };
143 sel_dev = -1;
144 sel_qfamily = -1;
146 for(i=0; i<sizeof ext_names / sizeof *ext_names; i++) {
147 if(!vku_have_extension(ext_names[i])) {
148 fprintf(stderr, "required extension (%s) not found\n", ext_names[i]);
149 return -1;
150 }
151 }
153 /* enumerate available validation layers */
154 vkEnumerateInstanceLayerProperties(&nlayers, 0);
155 layers = alloca(nlayers * sizeof *layers);
156 vkEnumerateInstanceLayerProperties(&nlayers, layers);
158 printf("Available validation layers:\n");
159 for(i=0; i<(int)nlayers; i++) {
160 printf(" %s\n", layers[i].layerName);
161 }
163 memset(&inst_info, 0, sizeof inst_info);
164 inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
165 inst_info.ppEnabledExtensionNames = ext_names;
166 inst_info.enabledExtensionCount = sizeof ext_names / sizeof *ext_names;
167 inst_info.ppEnabledLayerNames = layer_names;
168 inst_info.enabledLayerCount = sizeof layer_names / sizeof *layer_names;
170 if(vkCreateInstance(&inst_info, 0, &vk) != 0) {
171 fprintf(stderr, "failed to create vulkan instance\n");
172 return -1;
173 }
174 printf("created vulkan instance\n");
176 if(!(vk_create_debug_report_callback = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(vk, "vkCreateDebugReportCallbackEXT"))) {
177 fprintf(stderr, "FUCK EVERYTHING\n");
178 return -1;
179 }
180 vku_set_debug_callback(user_dbg_callback, user_dbg_callback_data); /* set debug callback */
183 if(vkEnumeratePhysicalDevices(vk, &num_devices, 0) != 0) {
184 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
185 return -1;
186 }
187 phys_devices = malloc(num_devices * sizeof *phys_devices);
188 if(vkEnumeratePhysicalDevices(vk, &num_devices, phys_devices) != 0) {
189 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
190 return -1;
191 }
192 printf("found %u physical device(s)\n", (unsigned int)num_devices);
194 for(i=0; i<(int)num_devices; i++) {
195 VkPhysicalDeviceProperties dev_prop;
196 VkPhysicalDeviceMemoryProperties mem_prop;
197 VkQueueFamilyProperties *qprop;
198 uint32_t qprop_count;
200 vkGetPhysicalDeviceProperties(phys_devices[i], &dev_prop);
202 printf("Device %d: %s\n", i, dev_prop.deviceName);
203 printf(" type: %s\n", get_device_name(dev_prop.deviceType));
204 printf(" API version: %d.%d.%d\n", ver_major(dev_prop.apiVersion), ver_minor(dev_prop.apiVersion),
205 ver_patch(dev_prop.apiVersion));
206 printf(" driver version: %d.%d.%d\n", ver_major(dev_prop.driverVersion), ver_minor(dev_prop.driverVersion),
207 ver_patch(dev_prop.driverVersion));
208 printf(" vendor id: %x device id: %x\n", dev_prop.vendorID, dev_prop.deviceID);
211 vkGetPhysicalDeviceMemoryProperties(phys_devices[i], &mem_prop);
212 printf(" %d memory heaps:\n", mem_prop.memoryHeapCount);
213 for(j=0; j<mem_prop.memoryHeapCount; j++) {
214 VkMemoryHeap heap = mem_prop.memoryHeaps[j];
215 printf(" Heap %d - size: %s, flags: %s\n", j, mem_size_str(heap.size),
216 heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT ? "device-local" : "-");
217 }
218 printf(" %d memory types:\n", mem_prop.memoryTypeCount);
219 for(j=0; j<mem_prop.memoryTypeCount; j++) {
220 VkMemoryType type = mem_prop.memoryTypes[j];
221 printf(" Type %d - heap: %d, flags: %s\n", j, type.heapIndex,
222 get_mem_prop_flag_string(type.propertyFlags));
223 }
225 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, 0);
226 if(qprop_count <= 0) {
227 continue;
228 }
229 qprop = malloc(qprop_count * sizeof *qprop);
230 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, qprop);
232 for(j=0; j<qprop_count; j++) {
233 printf(" Queue family %d:\n", j);
234 printf(" flags: %s\n", get_queue_flag_string(qprop[j].queueFlags));
235 printf(" num queues: %u\n", qprop[j].queueCount);
237 if(qprop[j].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
238 sel_dev = i;
239 sel_qfamily = j;
240 }
241 }
242 free(qprop);
243 }
245 if(sel_dev < 0 || sel_qfamily < 0) {
246 fprintf(stderr, "failed to find any device with a graphics-capable command queue\n");
247 vkDestroyDevice(vkdev, 0);
248 return -1;
249 }
251 for(i=0; i<sizeof devext_names / sizeof *devext_names; i++) {
252 if(!vku_have_device_extension(devext_names[i])) {
253 fprintf(stderr, "required extension (%s) not found on the selected device (%d)\n",
254 ext_names[i], sel_dev);
255 return -1;
256 }
257 }
259 /* create device & command queue */
260 memset(&queue_info, 0, sizeof queue_info);
261 queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
262 queue_info.queueFamilyIndex = sel_qfamily;
263 queue_info.queueCount = 1;
264 queue_info.pQueuePriorities = &qprio;
266 memset(&dev_info, 0, sizeof dev_info);
267 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
268 dev_info.queueCreateInfoCount = 1;
269 dev_info.pQueueCreateInfos = &queue_info;
270 dev_info.enabledExtensionCount = sizeof devext_names / sizeof *devext_names;
271 dev_info.ppEnabledExtensionNames = devext_names;
273 if(vkCreateDevice(phys_devices[sel_dev], &dev_info, 0, &vkdev) != 0) {
274 fprintf(stderr, "failed to create device %d\n", sel_dev);
275 return -1;
276 }
277 printf("created device %d\n", sel_dev);
279 vkGetDeviceQueue(vkdev, sel_qfamily, 0, &vkq);
281 /* create command buffer pool */
282 memset(&cmdpool_info, 0, sizeof cmdpool_info);
283 cmdpool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
284 cmdpool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
285 cmdpool_info.queueFamilyIndex = sel_qfamily;
287 if(vkCreateCommandPool(vkdev, &cmdpool_info, 0, &vkcmdpool) != 0) {
288 fprintf(stderr, "failed to get command quque!\n");
289 return -1;
290 }
292 if(!(vkcmdbuf = vku_alloc_cmdbuf(vkcmdpool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))) {
293 fprintf(stderr, "failed to create primary command buffer\n");
294 return -1;
295 }
297 return 0;
298 }
300 void vku_cleanup(void)
301 {
302 if(vk) {
303 vkDeviceWaitIdle(vkdev);
304 vkDestroyCommandPool(vkdev, vkcmdpool, 0);
305 vkDestroyDevice(vkdev, 0);
306 vkDestroyInstance(vk, 0);
307 vk = 0;
308 }
310 free(phys_devices);
311 phys_devices = 0;
312 }
314 VkCommandBuffer vku_alloc_cmdbuf(VkCommandPool pool, VkCommandBufferLevel level)
315 {
316 VkCommandBuffer cmdbuf;
317 VkCommandBufferAllocateInfo inf;
319 memset(&inf, 0, sizeof inf);
320 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
321 inf.commandPool = pool;
322 inf.level = level;
323 inf.commandBufferCount = 1;
325 if(vkAllocateCommandBuffers(vkdev, &inf, &cmdbuf) != 0) {
326 return 0;
327 }
328 return cmdbuf;
329 }
331 void vku_free_cmdbuf(VkCommandPool pool, VkCommandBuffer buf)
332 {
333 vkFreeCommandBuffers(vkdev, pool, 1, &buf);
334 }
336 void vku_begin_cmdbuf(VkCommandBuffer buf, unsigned int flags)
337 {
338 VkCommandBufferBeginInfo inf;
340 memset(&inf, 0, sizeof inf);
341 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
342 inf.flags = flags;
344 vkBeginCommandBuffer(buf, &inf);
345 }
348 void vku_end_cmdbuf(VkCommandBuffer buf)
349 {
350 vkEndCommandBuffer(buf);
351 }
353 void vku_reset_cmdbuf(VkCommandBuffer buf)
354 {
355 vkResetCommandBuffer(buf, 0);
356 }
358 void vku_submit_cmdbuf(VkQueue q, VkCommandBuffer buf, VkFence done_fence)
359 {
360 VkSubmitInfo info;
362 memset(&info, 0, sizeof info);
363 info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
364 info.commandBufferCount = 1;
365 info.pCommandBuffers = &buf;
367 vkQueueSubmit(q, 1, &info, done_fence);
368 }
370 VkSwapchainKHR vku_create_swapchain(VkSurfaceKHR surf, int xsz, int ysz, int n,
371 VkFormat fmt, VkPresentModeKHR pmode, VkSwapchainKHR prev)
372 {
373 VkSwapchainKHR sc;
374 VkSwapchainCreateInfoKHR inf;
376 memset(&inf, 0, sizeof inf);
377 inf.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
378 inf.surface = surf;
379 inf.minImageCount = n;
380 inf.imageFormat = fmt;
381 inf.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
382 inf.imageExtent.width = xsz;
383 inf.imageExtent.height = ysz;
384 inf.imageArrayLayers = 1;
385 inf.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
386 inf.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; /* XXX make this an option? */
387 inf.preTransform = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR;
388 inf.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
389 inf.presentMode = pmode;
390 inf.oldSwapchain = prev;
392 if(vkCreateSwapchainKHR(vkdev, &inf, 0, &sc) != 0) {
393 return 0;
394 }
395 return sc;
396 }
398 VkImage *vku_get_swapchain_images(VkSwapchainKHR sc, int *count)
399 {
400 uint32_t nimg;
401 VkImage *images;
403 if(vkGetSwapchainImagesKHR(vkdev, sc, &nimg, 0) != 0) {
404 return 0;
405 }
406 if(!(images = malloc(nimg * sizeof *images))) {
407 return 0;
408 }
409 vkGetSwapchainImagesKHR(vkdev, sc, &nimg, images);
411 if(count) *count = (int)nimg;
412 return images;
413 }
415 int vku_get_next_image(VkSwapchainKHR sc)
416 {
417 uint32_t next;
419 if(vkAcquireNextImageKHR(vkdev, sc, UINT64_MAX, 0, 0, &next) != 0) {
420 return -1;
421 }
422 return (int)next;
423 }
425 VkImageView vku_create_view(VkImage img, VkFormat fmt)
426 {
427 VkImageView view;
428 VkImageViewCreateInfo iv;
430 memset(&iv, 0, sizeof iv);
431 iv.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
432 iv.image = img;
433 iv.viewType = VK_IMAGE_VIEW_TYPE_2D;
434 iv.format = fmt;
435 iv.components.r = iv.components.g = iv.components.b = iv.components.a =
436 VK_COMPONENT_SWIZZLE_IDENTITY;
437 iv.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
438 iv.subresourceRange.baseMipLevel = 0;
439 iv.subresourceRange.levelCount = 1;
440 iv.subresourceRange.baseArrayLayer = 0;
441 iv.subresourceRange.layerCount = 1;
443 if(vkCreateImageView(vkdev, &iv, 0, &view) != 0) {
444 fprintf(stderr, "vku_create_view failed\n");
445 return 0;
446 }
447 return view;
448 }
450 void vku_destroy_view(VkImageView view)
451 {
452 vkDestroyImageView(vkdev, view, 0);
453 }
455 VkFramebuffer vku_create_framebuffer(VkImageView view, int width, int height, VkRenderPass rpass)
456 {
457 VkFramebuffer fb;
458 VkFramebufferCreateInfo fbinf;
460 memset(&fbinf, 0, sizeof fbinf);
461 fbinf.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
462 fbinf.renderPass = rpass;
463 fbinf.attachmentCount = 1;
464 fbinf.pAttachments = &view;
465 fbinf.width = width;
466 fbinf.height = height;
467 fbinf.layers = 1;
469 if(vkCreateFramebuffer(vkdev, &fbinf, 0, &fb) != 0) {
470 fprintf(stderr, "vku_create_framebuffer failed\n");
471 return 0;
472 }
473 return fb;
474 }
476 void vku_destroy_framebuffer(VkFramebuffer fb)
477 {
478 vkDestroyFramebuffer(vkdev, fb, 0);
479 }
481 void vku_present(VkSwapchainKHR sc, int img_idx)
482 {
483 VkPresentInfoKHR inf;
484 VkResult res;
485 uint32_t index = img_idx;
487 memset(&inf, 0, sizeof inf);
488 inf.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
489 inf.swapchainCount = 1;
490 inf.pSwapchains = &sc;
491 inf.pImageIndices = &index;
492 inf.pResults = &res;
494 vkQueuePresentKHR(vkq, &inf);
495 }
497 struct vku_buffer *vku_create_buffer(int sz, unsigned int usage)
498 {
499 struct vku_buffer *buf;
500 VkBufferCreateInfo binfo;
502 if(!(buf = malloc(sizeof *buf))) {
503 perror("failed to allocate vk_buffer structure");
504 return 0;
505 }
507 memset(&binfo, 0, sizeof binfo);
508 binfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
509 binfo.size = sz;
510 binfo.usage = usage;
511 binfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
513 if(vkCreateBuffer(vkdev, &binfo, 0, &buf->buf) != 0) {
514 fprintf(stderr, "failed to create %d byte buffer (usage: %x)\n", sz, usage);
515 return 0;
516 }
517 // TODO back with memory
518 return buf;
519 }
521 void vku_destroy_buffer(struct vku_buffer *buf)
522 {
523 if(buf) {
524 vkDestroyBuffer(vkdev, buf->buf, 0);
525 free(buf);
526 }
527 }
529 void vku_cmd_copybuf(VkCommandBuffer cmdbuf, VkBuffer dest, int doffs,
530 VkBuffer src, int soffs, int size)
531 {
532 VkBufferCopy copy;
533 copy.size = size;
534 copy.srcOffset = soffs;
535 copy.dstOffset = doffs;
537 vkCmdCopyBuffer(cmdbuf, src, dest, 1, &copy);
538 }
541 VkRenderPass vku_create_renderpass(VkFormat cfmt, VkFormat dsfmt)
542 {
543 int count = 1; /* always assume we have a color attachment for now */
544 VkAttachmentDescription at[2];
545 VkAttachmentReference colref, dsref;
546 VkSubpassDescription subpass;
547 VkRenderPass pass;
548 VkRenderPassCreateInfo rpinf;
550 colref.attachment = 0;
551 colref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
552 dsref.attachment = 1;
553 dsref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
555 memset(&subpass, 0, sizeof subpass);
556 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
557 subpass.colorAttachmentCount = 1;
558 subpass.pColorAttachments = &colref;
560 at[0].format = cfmt;
561 at[0].samples = VK_SAMPLE_COUNT_1_BIT; /* TODO multisampling */
562 at[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
563 at[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
564 at[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
565 at[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
566 at[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
567 at[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
569 if(dsfmt != VK_FORMAT_UNDEFINED) {
570 at[1].format = dsfmt;
571 at[1].samples = VK_SAMPLE_COUNT_1_BIT;
572 at[1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
573 at[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
574 at[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
575 at[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
576 at[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
577 at[1].finalLayout = VK_IMAGE_LAYOUT_UNDEFINED;
579 subpass.pDepthStencilAttachment = &dsref;
580 count++;
581 }
583 memset(&rpinf, 0, sizeof rpinf);
584 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
585 rpinf.attachmentCount = count;
586 rpinf.pAttachments = at;
587 rpinf.subpassCount = 1;
588 rpinf.pSubpasses = &subpass;
590 if(vkCreateRenderPass(vkdev, &rpinf, 0, &pass) != 0) {
591 fprintf(stderr, "vku_create_renderpass: failed to create renderpass\n");
592 return 0;
593 }
595 return pass;
596 }
598 void vku_destroy_renderpass(VkRenderPass rpass)
599 {
600 vkDestroyRenderPass(vkdev, rpass, 0);
601 }
603 void vku_begin_renderpass(VkCommandBuffer cmdbuf, VkRenderPass rpass, VkFramebuffer fb,
604 VkSubpassContents cont)
605 {
606 VkRenderPassBeginInfo rpinf;
608 memset(&rpinf, 0, sizeof rpinf);
609 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
610 rpinf.renderPass = rpass;
611 rpinf.framebuffer = fb;
612 rpinf.renderArea.offset.x = vkvport.x;
613 rpinf.renderArea.offset.y = vkvport.y;
614 rpinf.renderArea.extent.width = vkvport.width;
615 rpinf.renderArea.extent.height = vkvport.height;
617 vkCmdBeginRenderPass(cmdbuf, &rpinf, cont);
618 }
620 void vku_end_renderpass(VkCommandBuffer cmdbuf)
621 {
622 vkCmdEndRenderPass(cmdbuf);
623 }
625 #ifdef VK_USE_PLATFORM_XLIB_KHR
626 int vku_xlib_usable_visual(Display *dpy, VisualID vid)
627 {
628 return vkGetPhysicalDeviceXlibPresentationSupportKHR(phys_devices[sel_dev],
629 sel_qfamily, dpy, vid);
630 }
632 VkSurfaceKHR vku_xlib_create_surface(Display *dpy, Window win)
633 {
634 VkSurfaceKHR surf;
635 VkXlibSurfaceCreateInfoKHR inf;
637 memset(&inf, 0, sizeof inf);
638 inf.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
639 inf.dpy = dpy;
640 inf.window = win;
642 if(vkCreateXlibSurfaceKHR(vk, &inf, 0, &surf) != 0) {
643 return 0;
644 }
645 return surf;
646 }
648 #endif /* VK_USE_PLATFORM_XLIB_KHR */
650 static const char *get_device_name(VkPhysicalDeviceType type)
651 {
652 switch(type) {
653 case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
654 return "integrated GPU";
655 case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
656 return "discrete GPU";
657 case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
658 return "virtual GPU";
659 case VK_PHYSICAL_DEVICE_TYPE_CPU:
660 return "CPU";
661 default:
662 break;
663 }
664 return "unknown";
665 }
667 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags)
668 {
669 static char str[128];
671 str[0] = 0;
672 if(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
673 strcat(str, "device-local ");
674 }
675 if(flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
676 strcat(str, "host-visible ");
677 }
678 if(flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
679 strcat(str, "host-coherent ");
680 }
681 if(flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
682 strcat(str, "host-cached ");
683 }
684 if(flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
685 strcat(str, "lazily-allocated ");
686 }
688 if(!*str) {
689 strcat(str, "-");
690 }
691 return str;
692 }
694 static const char *get_queue_flag_string(VkQueueFlagBits flags)
695 {
696 static char str[128];
698 str[0] = 0;
699 if(flags & VK_QUEUE_GRAPHICS_BIT) {
700 strcat(str, "graphics ");
701 }
702 if(flags & VK_QUEUE_COMPUTE_BIT) {
703 strcat(str, "compute ");
704 }
705 if(flags & VK_QUEUE_TRANSFER_BIT) {
706 strcat(str, "transfer ");
707 }
708 if(flags & VK_QUEUE_SPARSE_BINDING_BIT) {
709 strcat(str, "sparse-binding ");
710 }
711 if(!*str) {
712 strcat(str, "-");
713 }
714 return str;
715 }
717 static int ver_major(uint32_t ver)
718 {
719 return (ver >> 22) & 0x3ff;
720 }
722 static int ver_minor(uint32_t ver)
723 {
724 return (ver >> 12) & 0x3ff;
725 }
727 static int ver_patch(uint32_t ver)
728 {
729 return ver & 0xfff;
730 }
732 static const char *mem_size_str(long sz)
733 {
734 static char str[64];
735 static const char *unitstr[] = { "bytes", "KB", "MB", "GB", "TB", "PB", 0 };
736 int uidx = 0;
737 sz *= 10;
739 while(sz >= 10240 && unitstr[uidx + 1]) {
740 sz /= 1024;
741 ++uidx;
742 }
743 sprintf(str, "%ld.%ld %s", sz / 10, sz % 10, unitstr[uidx]);
744 return str;
745 }
747 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
748 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
749 const char *layer_prefix, const char *msg, void *udata)
750 {
751 if(user_dbg_callback) {
752 user_dbg_callback(msg, user_dbg_callback_data);
753 } else {
754 fprintf(stderr, "VK DEBUG (%s): %s\n", layer_prefix, msg);
755 }
757 return VK_TRUE;
758 }