vulkan_test2

view src/vku.c @ 15:196122a2b8c2

...
author John Tsiombikas <nuclear@member.fsf.org>
date Tue, 26 Jun 2018 08:39:30 +0300
parents 9fb6c24691ea
children 236f923a00a3
line source
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <string.h>
4 #include <stdint.h>
5 #include "vku.h"
7 static const char *get_device_name(VkPhysicalDeviceType type);
8 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags);
9 static const char *get_queue_flag_string(VkQueueFlagBits flags);
10 static int ver_major(uint32_t ver);
11 static int ver_minor(uint32_t ver);
12 static int ver_patch(uint32_t ver);
13 static const char *mem_size_str(long sz);
15 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
16 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
17 const char *layer_prefix, const char *msg, void *udata);
19 VkInstance vk;
20 VkDevice vkdev;
21 VkQueue vkq;
23 static VkPhysicalDevice *phys_devices;
24 static int sel_dev, sel_qfamily;
26 static VkExtensionProperties *vkext, *vkdevext;
27 static uint32_t vkext_count, vkdevext_count;
29 static VkDebugReportCallbackEXT debug_callback_obj;
30 static VkResult (*vk_create_debug_report_callback)(VkInstance,
31 const VkDebugReportCallbackCreateInfoEXT*, const VkAllocationCallbacks*,
32 VkDebugReportCallbackEXT*);
33 static void (*user_dbg_callback)(const char*, void*);
34 static void *user_dbg_callback_data;
37 int vku_have_extension(const char *name)
38 {
39 int i;
41 if(!vkext) {
42 vkext_count = 0;
43 vkEnumerateInstanceExtensionProperties(0, &vkext_count, 0);
44 if(vkext_count) {
45 if(!(vkext = malloc(vkext_count * sizeof *vkext))) {
46 perror("failed to allocate instance extension list");
47 return 0;
48 }
49 vkEnumerateInstanceExtensionProperties(0, &vkext_count, vkext);
51 printf("instance extensions:\n");
52 for(i=0; i<(int)vkext_count; i++) {
53 printf(" %s (ver: %u)\n", vkext[i].extensionName, (unsigned int)vkext[i].specVersion);
54 }
55 }
56 }
58 for(i=0; i<(int)vkext_count; i++) {
59 if(strcmp(vkext[i].extensionName, name) == 0) {
60 return 1;
61 }
62 }
63 return 0;
64 }
66 int vku_have_device_extension(const char *name)
67 {
68 int i;
70 if(sel_dev < 0) return 0;
72 if(!vkdevext) {
73 vkdevext_count = 0;
74 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, 0);
75 if(vkdevext_count) {
76 if(!(vkdevext = malloc(vkdevext_count * sizeof *vkdevext))) {
77 perror("failed to allocate device extension list");
78 return 0;
79 }
80 vkEnumerateDeviceExtensionProperties(phys_devices[sel_dev], 0, &vkdevext_count, vkdevext);
82 printf("selected device extensions:\n");
83 for(i=0; i<(int)vkdevext_count; i++) {
84 printf(" %s (ver: %u)\n", vkdevext[i].extensionName, (unsigned int)vkdevext[i].specVersion);
85 }
86 }
87 }
89 for(i=0; i<(int)vkdevext_count; i++) {
90 if(strcmp(vkdevext[i].extensionName, name) == 0) {
91 return 1;
92 }
93 }
94 return 0;
95 }
97 void vku_set_debug_callback(void (*func)(const char*, void*), void *cls)
98 {
99 if(!debug_callback_obj && vk_create_debug_report_callback) {
100 VkDebugReportCallbackCreateInfoEXT foo;
102 memset(&foo, 0, sizeof foo);
103 foo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
104 foo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
105 foo.pfnCallback = debug_callback_thunk;
107 vk_create_debug_report_callback(vk, &foo, 0, &debug_callback_obj);
108 }
110 user_dbg_callback = func;
111 user_dbg_callback_data = cls;
112 }
115 int vku_create_dev(void)
116 {
117 int i, j;
118 uint32_t nlayers;
119 VkInstanceCreateInfo inst_info;
120 VkLayerProperties *layers;
121 VkDeviceCreateInfo dev_info;
122 VkDeviceQueueCreateInfo queue_info;
123 VkCommandPoolCreateInfo cmdpool_info;
124 uint32_t num_devices;
125 float qprio = 0.0f;
127 static const char *ext_names[] = {
128 #ifdef VK_USE_PLATFORM_XLIB_KHR
129 "VK_KHR_xlib_surface",
130 #endif
131 "VK_KHR_surface",
132 "VK_EXT_debug_report"
133 };
134 static const char *devext_names[] = {
135 "VK_KHR_swapchain"
136 };
137 static const char *layer_names[] = {
138 "VK_LAYER_LUNARG_standard_validation"
139 };
141 sel_dev = -1;
142 sel_qfamily = -1;
144 for(i=0; i<sizeof ext_names / sizeof *ext_names; i++) {
145 if(!vku_have_extension(ext_names[i])) {
146 fprintf(stderr, "required extension (%s) not found\n", ext_names[i]);
147 return -1;
148 }
149 }
151 /* enumerate available validation layers */
152 vkEnumerateInstanceLayerProperties(&nlayers, 0);
153 layers = alloca(nlayers * sizeof *layers);
154 vkEnumerateInstanceLayerProperties(&nlayers, layers);
156 printf("Available validation layers:\n");
157 for(i=0; i<(int)nlayers; i++) {
158 printf(" %s\n", layers[i].layerName);
159 }
161 memset(&inst_info, 0, sizeof inst_info);
162 inst_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
163 inst_info.ppEnabledExtensionNames = ext_names;
164 inst_info.enabledExtensionCount = sizeof ext_names / sizeof *ext_names;
165 inst_info.ppEnabledLayerNames = layer_names;
166 inst_info.enabledLayerCount = sizeof layer_names / sizeof *layer_names;
168 if(vkCreateInstance(&inst_info, 0, &vk) != 0) {
169 fprintf(stderr, "failed to create vulkan instance\n");
170 return -1;
171 }
172 printf("created vulkan instance\n");
174 if(!(vk_create_debug_report_callback = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(vk, "vkCreateDebugReportCallbackEXT"))) {
175 fprintf(stderr, "FUCK EVERYTHING\n");
176 return -1;
177 }
178 vku_set_debug_callback(user_dbg_callback, user_dbg_callback_data); /* set debug callback */
181 if(vkEnumeratePhysicalDevices(vk, &num_devices, 0) != 0) {
182 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
183 return -1;
184 }
185 phys_devices = malloc(num_devices * sizeof *phys_devices);
186 if(vkEnumeratePhysicalDevices(vk, &num_devices, phys_devices) != 0) {
187 fprintf(stderr, "failed to enumerate vulkan physical devices\n");
188 return -1;
189 }
190 printf("found %u physical device(s)\n", (unsigned int)num_devices);
192 for(i=0; i<(int)num_devices; i++) {
193 VkPhysicalDeviceProperties dev_prop;
194 VkPhysicalDeviceMemoryProperties mem_prop;
195 VkQueueFamilyProperties *qprop;
196 uint32_t qprop_count;
198 vkGetPhysicalDeviceProperties(phys_devices[i], &dev_prop);
200 printf("Device %d: %s\n", i, dev_prop.deviceName);
201 printf(" type: %s\n", get_device_name(dev_prop.deviceType));
202 printf(" API version: %d.%d.%d\n", ver_major(dev_prop.apiVersion), ver_minor(dev_prop.apiVersion),
203 ver_patch(dev_prop.apiVersion));
204 printf(" driver version: %d.%d.%d\n", ver_major(dev_prop.driverVersion), ver_minor(dev_prop.driverVersion),
205 ver_patch(dev_prop.driverVersion));
206 printf(" vendor id: %x device id: %x\n", dev_prop.vendorID, dev_prop.deviceID);
209 vkGetPhysicalDeviceMemoryProperties(phys_devices[i], &mem_prop);
210 printf(" %d memory heaps:\n", mem_prop.memoryHeapCount);
211 for(j=0; j<mem_prop.memoryHeapCount; j++) {
212 VkMemoryHeap heap = mem_prop.memoryHeaps[j];
213 printf(" Heap %d - size: %s, flags: %s\n", j, mem_size_str(heap.size),
214 heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT ? "device-local" : "-");
215 }
216 printf(" %d memory types:\n", mem_prop.memoryTypeCount);
217 for(j=0; j<mem_prop.memoryTypeCount; j++) {
218 VkMemoryType type = mem_prop.memoryTypes[j];
219 printf(" Type %d - heap: %d, flags: %s\n", j, type.heapIndex,
220 get_mem_prop_flag_string(type.propertyFlags));
221 }
223 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, 0);
224 if(qprop_count <= 0) {
225 continue;
226 }
227 qprop = malloc(qprop_count * sizeof *qprop);
228 vkGetPhysicalDeviceQueueFamilyProperties(phys_devices[i], &qprop_count, qprop);
230 for(j=0; j<qprop_count; j++) {
231 printf(" Queue family %d:\n", j);
232 printf(" flags: %s\n", get_queue_flag_string(qprop[j].queueFlags));
233 printf(" num queues: %u\n", qprop[j].queueCount);
235 if(qprop[j].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
236 sel_dev = i;
237 sel_qfamily = j;
238 }
239 }
240 free(qprop);
241 }
243 if(sel_dev < 0 || sel_qfamily < 0) {
244 fprintf(stderr, "failed to find any device with a graphics-capable command queue\n");
245 vkDestroyDevice(vkdev, 0);
246 return -1;
247 }
249 for(i=0; i<sizeof devext_names / sizeof *devext_names; i++) {
250 if(!vku_have_device_extension(devext_names[i])) {
251 fprintf(stderr, "required extension (%s) not found on the selected device (%d)\n",
252 ext_names[i], sel_dev);
253 return -1;
254 }
255 }
257 /* create device & command queue */
258 memset(&queue_info, 0, sizeof queue_info);
259 queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
260 queue_info.queueFamilyIndex = sel_qfamily;
261 queue_info.queueCount = 1;
262 queue_info.pQueuePriorities = &qprio;
264 memset(&dev_info, 0, sizeof dev_info);
265 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
266 dev_info.queueCreateInfoCount = 1;
267 dev_info.pQueueCreateInfos = &queue_info;
268 dev_info.enabledExtensionCount = sizeof devext_names / sizeof *devext_names;
269 dev_info.ppEnabledExtensionNames = devext_names;
271 if(vkCreateDevice(phys_devices[sel_dev], &dev_info, 0, &vkdev) != 0) {
272 fprintf(stderr, "failed to create device %d\n", sel_dev);
273 return -1;
274 }
275 printf("created device %d\n", sel_dev);
277 vkGetDeviceQueue(vkdev, sel_qfamily, 0, &vkq);
279 /* create command buffer pool */
280 memset(&cmdpool_info, 0, sizeof cmdpool_info);
281 cmdpool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
282 cmdpool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
283 cmdpool_info.queueFamilyIndex = sel_qfamily;
285 if(vkCreateCommandPool(vkdev, &cmdpool_info, 0, &vkcmdpool) != 0) {
286 fprintf(stderr, "failed to get command quque!\n");
287 return -1;
288 }
290 if(!(vkcmdbuf = vku_alloc_cmdbuf(vkcmdpool, VK_COMMAND_BUFFER_LEVEL_PRIMARY))) {
291 fprintf(stderr, "failed to create primary command buffer\n");
292 return -1;
293 }
295 return 0;
296 }
298 void vku_cleanup(void)
299 {
300 if(vk) {
301 vkDeviceWaitIdle(vkdev);
302 vkDestroyCommandPool(vkdev, vkcmdpool, 0);
303 vkDestroyDevice(vkdev, 0);
304 vkDestroyInstance(vk, 0);
305 vk = 0;
306 }
308 free(phys_devices);
309 phys_devices = 0;
310 }
312 VkCommandBuffer vku_alloc_cmdbuf(VkCommandPool pool, VkCommandBufferLevel level)
313 {
314 VkCommandBuffer cmdbuf;
315 VkCommandBufferAllocateInfo inf;
317 memset(&inf, 0, sizeof inf);
318 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
319 inf.commandPool = pool;
320 inf.level = level;
321 inf.commandBufferCount = 1;
323 if(vkAllocateCommandBuffers(vkdev, &inf, &cmdbuf) != 0) {
324 return 0;
325 }
326 return cmdbuf;
327 }
329 void vku_free_cmdbuf(VkCommandPool pool, VkCommandBuffer buf)
330 {
331 vkFreeCommandBuffers(vkdev, pool, 1, &buf);
332 }
334 void vku_begin_cmdbuf(VkCommandBuffer buf, unsigned int flags)
335 {
336 VkCommandBufferBeginInfo inf;
338 memset(&inf, 0, sizeof inf);
339 inf.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
340 inf.flags = flags;
342 vkBeginCommandBuffer(buf, &inf);
343 }
346 void vku_end_cmdbuf(VkCommandBuffer buf)
347 {
348 vkEndCommandBuffer(buf);
349 }
351 void vku_reset_cmdbuf(VkCommandBuffer buf)
352 {
353 vkResetCommandBuffer(buf, 0);
354 }
356 void vku_submit_cmdbuf(VkQueue q, VkCommandBuffer buf, VkFence done_fence)
357 {
358 VkSubmitInfo info;
360 memset(&info, 0, sizeof info);
361 info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
362 info.commandBufferCount = 1;
363 info.pCommandBuffers = &buf;
365 vkQueueSubmit(q, 1, &info, done_fence);
366 }
368 VkSwapchainKHR vku_create_swapchain(VkSurfaceKHR surf, int xsz, int ysz, int n,
369 VkFormat fmt, VkPresentModeKHR pmode, VkSwapchainKHR prev)
370 {
371 VkSwapchainKHR sc;
372 VkSwapchainCreateInfoKHR inf;
374 memset(&inf, 0, sizeof inf);
375 inf.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
376 inf.surface = surf;
377 inf.minImageCount = n;
378 inf.imageFormat = fmt;
379 inf.imageColorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
380 inf.imageExtent.width = xsz;
381 inf.imageExtent.height = ysz;
382 inf.imageArrayLayers = 1;
383 inf.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
384 inf.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; /* XXX make this an option? */
385 inf.preTransform = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR;
386 inf.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
387 inf.presentMode = pmode;
388 inf.oldSwapchain = prev;
390 if(vkCreateSwapchainKHR(vkdev, &inf, 0, &sc) != 0) {
391 return 0;
392 }
393 return sc;
394 }
396 VkImage *vku_get_swapchain_images(VkSwapchainKHR sc, int *count)
397 {
398 uint32_t nimg;
399 VkImage *images;
401 if(vkGetSwapchainImagesKHR(vkdev, sc, &nimg, 0) != 0) {
402 return 0;
403 }
404 if(!(images = malloc(nimg * sizeof *images))) {
405 return 0;
406 }
407 vkGetSwapchainImagesKHR(vkdev, sc, &nimg, images);
409 if(count) *count = (int)nimg;
410 return images;
411 }
413 int vku_get_next_image(VkSwapchainKHR sc)
414 {
415 uint32_t next;
417 if(vkAcquireNextImageKHR(vkdev, sc, UINT64_MAX, 0, 0, &next) != 0) {
418 return -1;
419 }
420 return (int)next;
421 }
423 VkImageView vku_create_view(VkImage img, VkFormat fmt)
424 {
425 VkImageView view;
426 VkImageViewCreateInfo iv;
428 memset(&iv, 0, sizeof iv);
429 iv.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
430 iv.image = img;
431 iv.viewType = VK_IMAGE_VIEW_TYPE_2D;
432 iv.format = fmt;
433 iv.components.r = iv.components.g = iv.components.b = iv.components.a =
434 VK_COMPONENT_SWIZZLE_IDENTITY;
435 iv.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
436 iv.subresourceRange.baseMipLevel = 0;
437 iv.subresourceRange.levelCount = 1;
438 iv.subresourceRange.baseArrayLayer = 0;
439 iv.subresourceRange.layerCount = 1;
441 if(vkCreateImageView(vkdev, &iv, 0, &view) != 0) {
442 fprintf(stderr, "vku_create_view failed\n");
443 return 0;
444 }
445 return view;
446 }
448 void vku_destroy_view(VkImageView view)
449 {
450 vkDestroyImageView(vkdev, view, 0);
451 }
453 VkFramebuffer vku_create_framebuffer(VkImageView view, int width, int height, VkRenderPass rpass)
454 {
455 VkFramebuffer fb;
456 VkFramebufferCreateInfo fbinf;
458 memset(&fbinf, 0, sizeof fbinf);
459 fbinf.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
460 fbinf.renderPass = rpass;
461 fbinf.attachmentCount = 1;
462 fbinf.pAttachments = &view;
463 fbinf.width = width;
464 fbinf.height = height;
465 fbinf.layers = 1;
467 if(vkCreateFramebuffer(vkdev, &fbinf, 0, &fb) != 0) {
468 fprintf(stderr, "vku_create_framebuffer failed\n");
469 return 0;
470 }
471 return fb;
472 }
474 void vku_destroy_framebuffer(VkFramebuffer fb)
475 {
476 vkDestroyFramebuffer(vkdev, fb, 0);
477 }
479 void vku_present(VkSwapchainKHR sc, int img_idx)
480 {
481 VkPresentInfoKHR inf;
482 VkResult res;
483 uint32_t index = img_idx;
485 memset(&inf, 0, sizeof inf);
486 inf.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
487 inf.swapchainCount = 1;
488 inf.pSwapchains = &sc;
489 inf.pImageIndices = &index;
490 inf.pResults = &res;
492 vkQueuePresentKHR(vkq, &inf);
493 }
495 struct vku_buffer *vku_create_buffer(int sz, unsigned int usage)
496 {
497 struct vku_buffer *buf;
498 VkBufferCreateInfo binfo;
500 if(!(buf = malloc(sizeof *buf))) {
501 perror("failed to allocate vk_buffer structure");
502 return 0;
503 }
505 memset(&binfo, 0, sizeof binfo);
506 binfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
507 binfo.size = sz;
508 binfo.usage = usage;
509 binfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
511 if(vkCreateBuffer(vkdev, &binfo, 0, &buf->buf) != 0) {
512 fprintf(stderr, "failed to create %d byte buffer (usage: %x)\n", sz, usage);
513 return 0;
514 }
515 // TODO back with memory
516 return buf;
517 }
519 void vku_destroy_buffer(struct vku_buffer *buf)
520 {
521 if(buf) {
522 vkDestroyBuffer(vkdev, buf->buf, 0);
523 free(buf);
524 }
525 }
527 void vku_cmd_copybuf(VkCommandBuffer cmdbuf, VkBuffer dest, int doffs,
528 VkBuffer src, int soffs, int size)
529 {
530 VkBufferCopy copy;
531 copy.size = size;
532 copy.srcOffset = soffs;
533 copy.dstOffset = doffs;
535 vkCmdCopyBuffer(cmdbuf, src, dest, 1, &copy);
536 }
539 VkRenderPass vku_create_renderpass(VkFormat cfmt, VkFormat dsfmt)
540 {
541 int count = 1; /* always assume we have a color attachment for now */
542 VkAttachmentDescription at[2];
543 VkAttachmentReference colref, dsref;
544 VkSubpassDescription subpass;
545 VkRenderPass pass;
546 VkRenderPassCreateInfo rpinf;
548 colref.attachment = 0;
549 colref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
550 dsref.attachment = 1;
551 dsref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
553 memset(&subpass, 0, sizeof subpass);
554 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
555 subpass.colorAttachmentCount = 1;
556 subpass.pColorAttachments = &colref;
558 at[0].format = cfmt;
559 at[0].samples = VK_SAMPLE_COUNT_1_BIT; /* TODO multisampling */
560 at[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
561 at[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
562 at[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
563 at[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
564 at[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
565 at[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
567 if(dsfmt != VK_FORMAT_UNDEFINED) {
568 at[1].format = dsfmt;
569 at[1].samples = VK_SAMPLE_COUNT_1_BIT;
570 at[1].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
571 at[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
572 at[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
573 at[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
574 at[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
575 at[1].finalLayout = VK_IMAGE_LAYOUT_UNDEFINED;
577 subpass.pDepthStencilAttachment = &dsref;
578 count++;
579 }
581 memset(&rpinf, 0, sizeof rpinf);
582 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
583 rpinf.attachmentCount = count;
584 rpinf.pAttachments = at;
585 rpinf.subpassCount = 1;
586 rpinf.pSubpasses = &subpass;
588 if(vkCreateRenderPass(vkdev, &rpinf, 0, &pass) != 0) {
589 fprintf(stderr, "vku_create_renderpass: failed to create renderpass\n");
590 return 0;
591 }
593 return pass;
594 }
596 void vku_destroy_renderpass(VkRenderPass rpass)
597 {
598 vkDestroyRenderPass(vkdev, rpass, 0);
599 }
601 void vku_begin_renderpass(VkCommandBuffer cmdbuf, VkRenderPass rpass, VkFramebuffer fb,
602 VkSubpassContents cont)
603 {
604 VkRenderPassBeginInfo rpinf;
606 memset(&rpinf, 0, sizeof rpinf);
607 rpinf.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
608 rpinf.renderPass = rpass;
609 rpinf.framebuffer = fb;
610 rpinf.renderArea.offset.x = vkvport.x;
611 rpinf.renderArea.offset.y = vkvport.y;
612 rpinf.renderArea.extent.width = vkvport.width;
613 rpinf.renderArea.extent.height = vkvport.height;
615 vkCmdBeginRenderPass(cmdbuf, &rpinf, cont);
616 }
618 void vku_end_renderpass(VkCommandBuffer cmdbuf)
619 {
620 vkCmdEndRenderPass(cmdbuf);
621 }
623 #ifdef VK_USE_PLATFORM_XLIB_KHR
624 int vku_xlib_usable_visual(Display *dpy, VisualID vid)
625 {
626 return vkGetPhysicalDeviceXlibPresentationSupportKHR(phys_devices[sel_dev],
627 sel_qfamily, dpy, vid);
628 }
630 VkSurfaceKHR vku_xlib_create_surface(Display *dpy, Window win)
631 {
632 VkSurfaceKHR surf;
633 VkXlibSurfaceCreateInfoKHR inf;
635 memset(&inf, 0, sizeof inf);
636 inf.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
637 inf.dpy = dpy;
638 inf.window = win;
640 if(vkCreateXlibSurfaceKHR(vk, &inf, 0, &surf) != 0) {
641 return 0;
642 }
643 return surf;
644 }
646 #endif /* VK_USE_PLATFORM_XLIB_KHR */
648 static const char *get_device_name(VkPhysicalDeviceType type)
649 {
650 switch(type) {
651 case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
652 return "integrated GPU";
653 case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
654 return "discrete GPU";
655 case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
656 return "virtual GPU";
657 case VK_PHYSICAL_DEVICE_TYPE_CPU:
658 return "CPU";
659 default:
660 break;
661 }
662 return "unknown";
663 }
665 static const char *get_mem_prop_flag_string(VkMemoryPropertyFlags flags)
666 {
667 static char str[128];
669 str[0] = 0;
670 if(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
671 strcat(str, "device-local ");
672 }
673 if(flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
674 strcat(str, "host-visible ");
675 }
676 if(flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
677 strcat(str, "host-coherent ");
678 }
679 if(flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
680 strcat(str, "host-cached ");
681 }
682 if(flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
683 strcat(str, "lazily-allocated ");
684 }
686 if(!*str) {
687 strcat(str, "-");
688 }
689 return str;
690 }
692 static const char *get_queue_flag_string(VkQueueFlagBits flags)
693 {
694 static char str[128];
696 str[0] = 0;
697 if(flags & VK_QUEUE_GRAPHICS_BIT) {
698 strcat(str, "graphics ");
699 }
700 if(flags & VK_QUEUE_COMPUTE_BIT) {
701 strcat(str, "compute ");
702 }
703 if(flags & VK_QUEUE_TRANSFER_BIT) {
704 strcat(str, "transfer ");
705 }
706 if(flags & VK_QUEUE_SPARSE_BINDING_BIT) {
707 strcat(str, "sparse-binding ");
708 }
709 if(!*str) {
710 strcat(str, "-");
711 }
712 return str;
713 }
715 static int ver_major(uint32_t ver)
716 {
717 return (ver >> 22) & 0x3ff;
718 }
720 static int ver_minor(uint32_t ver)
721 {
722 return (ver >> 12) & 0x3ff;
723 }
725 static int ver_patch(uint32_t ver)
726 {
727 return ver & 0xfff;
728 }
730 static const char *mem_size_str(long sz)
731 {
732 static char str[64];
733 static const char *unitstr[] = { "bytes", "KB", "MB", "GB", "TB", "PB", 0 };
734 int uidx = 0;
735 sz *= 10;
737 while(sz >= 10240 && unitstr[uidx + 1]) {
738 sz /= 1024;
739 ++uidx;
740 }
741 sprintf(str, "%ld.%ld %s", sz / 10, sz % 10, unitstr[uidx]);
742 return str;
743 }
745 static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback_thunk(VkDebugReportFlagsEXT flags,
746 VkDebugReportObjectTypeEXT otype, uint64_t obj, size_t loc, int32_t code,
747 const char *layer_prefix, const char *msg, void *udata)
748 {
749 if(user_dbg_callback) {
750 user_dbg_callback(msg, user_dbg_callback_data);
751 } else {
752 fprintf(stderr, "VK DEBUG (%s): %s\n", layer_prefix, msg);
753 }
755 return VK_TRUE;
756 }