Added clang format with mozilla code style

This commit is contained in:
Alejandro Saucedo 2020-07-30 09:06:52 +01:00
parent a012551d00
commit 859ca1e0fd
5 changed files with 1613 additions and 1421 deletions

View file

@ -2,6 +2,7 @@
####### SRC Build Params #######
CC=/c/Users/axsau/scoop/apps/gcc/current/bin/g++.exe
CF=~/Programming/lib/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format
####### Shader Build Params #######
@ -25,6 +26,9 @@ build: build_shaders
build_shaders:
$(SCMP) -V shaders/glsl/computeheadless.comp -o shaders/glsl/computeheadless.comp.spv
format:
$(CF) -i -style=mozilla src/*.cpp src/*.h src/*.hpp
clean:
rm ./bin/main.exe;

File diff suppressed because it is too large Load diff

View file

@ -1,343 +1,367 @@
/*
* Assorted commonly used Vulkan helper functions
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
* Assorted commonly used Vulkan helper functions
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT)
* (http://opensource.org/licenses/MIT)
*/
#include "VulkanTools.h"
const std::string getAssetPath()
const std::string
getAssetPath()
{
#if defined(VK_EXAMPLE_DATA_DIR)
return VK_EXAMPLE_DATA_DIR;
return VK_EXAMPLE_DATA_DIR;
#else
return "./../";
return "./../";
#endif
}
namespace vks
namespace vks {
namespace tools {
bool errorModeSilent = false;
std::string
errorString(VkResult errorCode)
{
namespace tools
{
bool errorModeSilent = false;
std::string errorString(VkResult errorCode)
{
switch (errorCode)
{
#define STR(r) case VK_ ##r: return #r
STR(NOT_READY);
STR(TIMEOUT);
STR(EVENT_SET);
STR(EVENT_RESET);
STR(INCOMPLETE);
STR(ERROR_OUT_OF_HOST_MEMORY);
STR(ERROR_OUT_OF_DEVICE_MEMORY);
STR(ERROR_INITIALIZATION_FAILED);
STR(ERROR_DEVICE_LOST);
STR(ERROR_MEMORY_MAP_FAILED);
STR(ERROR_LAYER_NOT_PRESENT);
STR(ERROR_EXTENSION_NOT_PRESENT);
STR(ERROR_FEATURE_NOT_PRESENT);
STR(ERROR_INCOMPATIBLE_DRIVER);
STR(ERROR_TOO_MANY_OBJECTS);
STR(ERROR_FORMAT_NOT_SUPPORTED);
STR(ERROR_SURFACE_LOST_KHR);
STR(ERROR_NATIVE_WINDOW_IN_USE_KHR);
STR(SUBOPTIMAL_KHR);
STR(ERROR_OUT_OF_DATE_KHR);
STR(ERROR_INCOMPATIBLE_DISPLAY_KHR);
STR(ERROR_VALIDATION_FAILED_EXT);
STR(ERROR_INVALID_SHADER_NV);
switch (errorCode) {
#define STR(r) \
case VK_##r: \
return #r
STR(NOT_READY);
STR(TIMEOUT);
STR(EVENT_SET);
STR(EVENT_RESET);
STR(INCOMPLETE);
STR(ERROR_OUT_OF_HOST_MEMORY);
STR(ERROR_OUT_OF_DEVICE_MEMORY);
STR(ERROR_INITIALIZATION_FAILED);
STR(ERROR_DEVICE_LOST);
STR(ERROR_MEMORY_MAP_FAILED);
STR(ERROR_LAYER_NOT_PRESENT);
STR(ERROR_EXTENSION_NOT_PRESENT);
STR(ERROR_FEATURE_NOT_PRESENT);
STR(ERROR_INCOMPATIBLE_DRIVER);
STR(ERROR_TOO_MANY_OBJECTS);
STR(ERROR_FORMAT_NOT_SUPPORTED);
STR(ERROR_SURFACE_LOST_KHR);
STR(ERROR_NATIVE_WINDOW_IN_USE_KHR);
STR(SUBOPTIMAL_KHR);
STR(ERROR_OUT_OF_DATE_KHR);
STR(ERROR_INCOMPATIBLE_DISPLAY_KHR);
STR(ERROR_VALIDATION_FAILED_EXT);
STR(ERROR_INVALID_SHADER_NV);
#undef STR
default:
return "UNKNOWN_ERROR";
}
}
std::string physicalDeviceTypeString(VkPhysicalDeviceType type)
{
switch (type)
{
#define STR(r) case VK_PHYSICAL_DEVICE_TYPE_ ##r: return #r
STR(OTHER);
STR(INTEGRATED_GPU);
STR(DISCRETE_GPU);
STR(VIRTUAL_GPU);
#undef STR
default: return "UNKNOWN_DEVICE_TYPE";
}
}
VkBool32 getSupportedDepthFormat(VkPhysicalDevice physicalDevice, VkFormat *depthFormat)
{
// Since all depth formats may be optional, we need to find a suitable depth format to use
// Start with the highest precision packed format
std::vector<VkFormat> depthFormats = {
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_D32_SFLOAT,
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM
};
for (auto& format : depthFormats)
{
VkFormatProperties formatProps;
vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProps);
// Format must support depth stencil attachment for optimal tiling
if (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
{
*depthFormat = format;
return true;
}
}
return false;
}
// Returns if a given format support LINEAR filtering
VkBool32 formatIsFilterable(VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling)
{
VkFormatProperties formatProps;
vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProps);
if (tiling == VK_IMAGE_TILING_OPTIMAL)
return formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
if (tiling == VK_IMAGE_TILING_LINEAR)
return formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
return false;
}
// Create an image memory barrier for changing the layout of
// an image and put it into an active command buffer
// See chapter 11.4 "Image Layout" for details
void setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkImageSubresourceRange subresourceRange,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask)
{
// Create an image barrier object
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange = subresourceRange;
// Source layouts (old)
// Source access mask controls actions that have to be finished on the old layout
// before it will be transitioned to the new layout
switch (oldImageLayout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Image layout is undefined (or does not matter)
// Only valid as initial layout
// No flags required, listed only for completeness
imageMemoryBarrier.srcAccessMask = 0;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image is preinitialized
// Only valid as initial layout for linear images, preserves memory contents
// Make sure host writes have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image is a color attachment
// Make sure any writes to the color buffer have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image is a depth/stencil attachment
// Make sure any writes to the depth/stencil buffer have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image is a transfer source
// Make sure any reads from the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image is a transfer destination
// Make sure any writes to the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image is read by a shader
// Make sure any shader reads from the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other source layouts aren't handled (yet)
break;
}
// Target layouts (new)
// Destination access mask controls the dependency for the new image layout
switch (newImageLayout)
{
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image will be used as a transfer destination
// Make sure any writes to the image have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image will be used as a transfer source
// Make sure any reads from the image have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image will be used as a color attachment
// Make sure any writes to the color buffer have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image layout will be used as a depth/stencil attachment
// Make sure any writes to depth/stencil buffer have been finished
imageMemoryBarrier.dstAccessMask = imageMemoryBarrier.dstAccessMask | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image will be read in a shader (sampler, input attachment)
// Make sure any writes to the image have been finished
if (imageMemoryBarrier.srcAccessMask == 0)
{
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
}
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other source layouts aren't handled (yet)
break;
}
// Put barrier inside setup command buffer
vkCmdPipelineBarrier(
cmdbuffer,
srcStageMask,
dstStageMask,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
}
// Fixed sub resource on first mip level and layer
void setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageAspectFlags aspectMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask)
{
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = aspectMask;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = 1;
subresourceRange.layerCount = 1;
setImageLayout(cmdbuffer, image, oldImageLayout, newImageLayout, subresourceRange, srcStageMask, dstStageMask);
}
void insertImageMemoryBarrier(
VkCommandBuffer cmdbuffer,
VkImage image,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkImageSubresourceRange subresourceRange)
{
VkImageMemoryBarrier imageMemoryBarrier = vks::initializers::imageMemoryBarrier();
imageMemoryBarrier.srcAccessMask = srcAccessMask;
imageMemoryBarrier.dstAccessMask = dstAccessMask;
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange = subresourceRange;
vkCmdPipelineBarrier(
cmdbuffer,
srcStageMask,
dstStageMask,
0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier);
}
void exitFatal(std::string message, int32_t exitCode)
{
#if defined(_WIN32)
if (!errorModeSilent) {
MessageBox(NULL, message.c_str(), NULL, MB_OK | MB_ICONERROR);
}
#endif
std::cerr << message << "\n";
}
void exitFatal(std::string message, VkResult resultCode)
{
exitFatal(message, (int32_t)resultCode);
}
VkShaderModule loadShader(const char *fileName, VkDevice device)
{
std::ifstream is(fileName, std::ios::binary | std::ios::in | std::ios::ate);
if (is.is_open())
{
size_t size = is.tellg();
is.seekg(0, std::ios::beg);
char* shaderCode = new char[size];
is.read(shaderCode, size);
is.close();
assert(size > 0);
VkShaderModule shaderModule;
VkShaderModuleCreateInfo moduleCreateInfo{};
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.codeSize = size;
moduleCreateInfo.pCode = (uint32_t*)shaderCode;
VK_CHECK_RESULT(vkCreateShaderModule(device, &moduleCreateInfo, NULL, &shaderModule));
delete[] shaderCode;
return shaderModule;
}
else
{
std::cerr << "Error: Could not open shader file \"" << fileName << "\"" << std::endl;
return VK_NULL_HANDLE;
}
}
bool fileExists(const std::string &filename)
{
std::ifstream f(filename.c_str());
return !f.fail();
}
}
default:
return "UNKNOWN_ERROR";
}
}
std::string
physicalDeviceTypeString(VkPhysicalDeviceType type)
{
switch (type) {
#define STR(r) \
case VK_PHYSICAL_DEVICE_TYPE_##r: \
return #r
STR(OTHER);
STR(INTEGRATED_GPU);
STR(DISCRETE_GPU);
STR(VIRTUAL_GPU);
#undef STR
default:
return "UNKNOWN_DEVICE_TYPE";
}
}
VkBool32
getSupportedDepthFormat(VkPhysicalDevice physicalDevice, VkFormat* depthFormat)
{
// Since all depth formats may be optional, we need to find a suitable depth
// format to use Start with the highest precision packed format
std::vector<VkFormat> depthFormats = { VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_D32_SFLOAT,
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM_S8_UINT,
VK_FORMAT_D16_UNORM };
for (auto& format : depthFormats) {
VkFormatProperties formatProps;
vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProps);
// Format must support depth stencil attachment for optimal tiling
if (formatProps.optimalTilingFeatures &
VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
*depthFormat = format;
return true;
}
}
return false;
}
// Returns if a given format support LINEAR filtering
VkBool32
formatIsFilterable(VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageTiling tiling)
{
VkFormatProperties formatProps;
vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProps);
if (tiling == VK_IMAGE_TILING_OPTIMAL)
return formatProps.optimalTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
if (tiling == VK_IMAGE_TILING_LINEAR)
return formatProps.linearTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
return false;
}
// Create an image memory barrier for changing the layout of
// an image and put it into an active command buffer
// See chapter 11.4 "Image Layout" for details
void
setImageLayout(VkCommandBuffer cmdbuffer,
VkImage image,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkImageSubresourceRange subresourceRange,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask)
{
// Create an image barrier object
VkImageMemoryBarrier imageMemoryBarrier =
vks::initializers::imageMemoryBarrier();
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange = subresourceRange;
// Source layouts (old)
// Source access mask controls actions that have to be finished on the old
// layout before it will be transitioned to the new layout
switch (oldImageLayout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
// Image layout is undefined (or does not matter)
// Only valid as initial layout
// No flags required, listed only for completeness
imageMemoryBarrier.srcAccessMask = 0;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image is preinitialized
// Only valid as initial layout for linear images, preserves memory
// contents Make sure host writes have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image is a color attachment
// Make sure any writes to the color buffer have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image is a depth/stencil attachment
// Make sure any writes to the depth/stencil buffer have been finished
imageMemoryBarrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image is a transfer source
// Make sure any reads from the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image is a transfer destination
// Make sure any writes to the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image is read by a shader
// Make sure any shader reads from the image have been finished
imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other source layouts aren't handled (yet)
break;
}
// Target layouts (new)
// Destination access mask controls the dependency for the new image layout
switch (newImageLayout) {
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image will be used as a transfer destination
// Make sure any writes to the image have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image will be used as a transfer source
// Make sure any reads from the image have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image will be used as a color attachment
// Make sure any writes to the color buffer have been finished
imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image layout will be used as a depth/stencil attachment
// Make sure any writes to depth/stencil buffer have been finished
imageMemoryBarrier.dstAccessMask =
imageMemoryBarrier.dstAccessMask |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image will be read in a shader (sampler, input attachment)
// Make sure any writes to the image have been finished
if (imageMemoryBarrier.srcAccessMask == 0) {
imageMemoryBarrier.srcAccessMask =
VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
}
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other source layouts aren't handled (yet)
break;
}
// Put barrier inside setup command buffer
vkCmdPipelineBarrier(cmdbuffer,
srcStageMask,
dstStageMask,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrier);
}
// Fixed sub resource on first mip level and layer
void
setImageLayout(VkCommandBuffer cmdbuffer,
VkImage image,
VkImageAspectFlags aspectMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask)
{
VkImageSubresourceRange subresourceRange = {};
subresourceRange.aspectMask = aspectMask;
subresourceRange.baseMipLevel = 0;
subresourceRange.levelCount = 1;
subresourceRange.layerCount = 1;
setImageLayout(cmdbuffer,
image,
oldImageLayout,
newImageLayout,
subresourceRange,
srcStageMask,
dstStageMask);
}
void
insertImageMemoryBarrier(VkCommandBuffer cmdbuffer,
VkImage image,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkImageSubresourceRange subresourceRange)
{
VkImageMemoryBarrier imageMemoryBarrier =
vks::initializers::imageMemoryBarrier();
imageMemoryBarrier.srcAccessMask = srcAccessMask;
imageMemoryBarrier.dstAccessMask = dstAccessMask;
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange = subresourceRange;
vkCmdPipelineBarrier(cmdbuffer,
srcStageMask,
dstStageMask,
0,
0,
nullptr,
0,
nullptr,
1,
&imageMemoryBarrier);
}
void
exitFatal(std::string message, int32_t exitCode)
{
#if defined(_WIN32)
if (!errorModeSilent) {
MessageBox(NULL, message.c_str(), NULL, MB_OK | MB_ICONERROR);
}
#endif
std::cerr << message << "\n";
}
void
exitFatal(std::string message, VkResult resultCode)
{
exitFatal(message, (int32_t)resultCode);
}
VkShaderModule
loadShader(const char* fileName, VkDevice device)
{
std::ifstream is(fileName, std::ios::binary | std::ios::in | std::ios::ate);
if (is.is_open()) {
size_t size = is.tellg();
is.seekg(0, std::ios::beg);
char* shaderCode = new char[size];
is.read(shaderCode, size);
is.close();
assert(size > 0);
VkShaderModule shaderModule;
VkShaderModuleCreateInfo moduleCreateInfo{};
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.codeSize = size;
moduleCreateInfo.pCode = (uint32_t*)shaderCode;
VK_CHECK_RESULT(
vkCreateShaderModule(device, &moduleCreateInfo, NULL, &shaderModule));
delete[] shaderCode;
return shaderModule;
} else {
std::cerr << "Error: Could not open shader file \"" << fileName << "\""
<< std::endl;
return VK_NULL_HANDLE;
}
}
bool
fileExists(const std::string& filename)
{
std::ifstream f(filename.c_str());
return !f.fail();
}
} // namespace tools
} // namespace vks

View file

@ -1,31 +1,31 @@
/*
* Assorted Vulkan helper functions
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
*/
* Assorted Vulkan helper functions
*
* Copyright (C) 2016 by Sascha Willems - www.saschawillems.de
*
* This code is licensed under the MIT license (MIT)
* (http://opensource.org/licenses/MIT)
*/
#pragma once
#include "vulkan/vulkan.h"
#include "VulkanInitializers.hpp"
#include "vulkan/vulkan.h"
#include <math.h>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <cstring>
#include <fstream>
#include <assert.h>
#include <stdio.h>
#include <vector>
#include <iostream>
#include <math.h>
#include <stdexcept>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#if defined(_WIN32)
#include <windows.h>
#include <fcntl.h>
#include <io.h>
#include <windows.h>
#endif
// Custom define for better code readability
@ -34,77 +34,91 @@
#define DEFAULT_FENCE_TIMEOUT 100000000000
// Macro to check and display Vulkan return results
#define VK_CHECK_RESULT(f) \
{ \
VkResult res = (f); \
if (res != VK_SUCCESS) \
{ \
std::cout << "Fatal : VkResult is \"" << vks::tools::errorString(res) << "\" in " << __FILE__ << " at line " << __LINE__ << std::endl; \
assert(res == VK_SUCCESS); \
} \
}
#define VK_CHECK_RESULT(f) \
{ \
VkResult res = (f); \
if (res != VK_SUCCESS) { \
std::cout << "Fatal : VkResult is \"" << vks::tools::errorString(res) \
<< "\" in " << __FILE__ << " at line " << __LINE__ \
<< std::endl; \
assert(res == VK_SUCCESS); \
} \
}
const std::string getAssetPath();
const std::string
getAssetPath();
namespace vks
{
namespace tools
{
/** @brief Disable message boxes on fatal errors */
extern bool errorModeSilent;
namespace vks {
namespace tools {
/** @brief Disable message boxes on fatal errors */
extern bool errorModeSilent;
/** @brief Returns an error code as a string */
std::string errorString(VkResult errorCode);
/** @brief Returns an error code as a string */
std::string
errorString(VkResult errorCode);
/** @brief Returns the device type as a string */
std::string physicalDeviceTypeString(VkPhysicalDeviceType type);
/** @brief Returns the device type as a string */
std::string
physicalDeviceTypeString(VkPhysicalDeviceType type);
// Selected a suitable supported depth format starting with 32 bit down to 16 bit
// Returns false if none of the depth formats in the list is supported by the device
VkBool32 getSupportedDepthFormat(VkPhysicalDevice physicalDevice, VkFormat *depthFormat);
// Selected a suitable supported depth format starting with 32 bit down to 16
// bit Returns false if none of the depth formats in the list is supported by
// the device
VkBool32
getSupportedDepthFormat(VkPhysicalDevice physicalDevice, VkFormat* depthFormat);
// Returns if a given format support LINEAR filtering
VkBool32 formatIsFilterable(VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling);
// Returns if a given format support LINEAR filtering
VkBool32
formatIsFilterable(VkPhysicalDevice physicalDevice,
VkFormat format,
VkImageTiling tiling);
// Put an image memory barrier for setting an image layout on the sub resource into the given command buffer
void setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkImageSubresourceRange subresourceRange,
VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
// Uses a fixed sub resource layout with first mip level and layer
void setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageAspectFlags aspectMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
// Put an image memory barrier for setting an image layout on the sub resource
// into the given command buffer
void
setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkImageSubresourceRange subresourceRange,
VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
// Uses a fixed sub resource layout with first mip level and layer
void
setImageLayout(
VkCommandBuffer cmdbuffer,
VkImage image,
VkImageAspectFlags aspectMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
/** @brief Inser an image memory barrier into the command buffer */
void insertImageMemoryBarrier(
VkCommandBuffer cmdbuffer,
VkImage image,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkImageSubresourceRange subresourceRange);
/** @brief Inser an image memory barrier into the command buffer */
void
insertImageMemoryBarrier(VkCommandBuffer cmdbuffer,
VkImage image,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask,
VkImageLayout oldImageLayout,
VkImageLayout newImageLayout,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkImageSubresourceRange subresourceRange);
// Display error message and exit on fatal error
void exitFatal(std::string message, int32_t exitCode);
void exitFatal(std::string message, VkResult resultCode);
// Display error message and exit on fatal error
void
exitFatal(std::string message, int32_t exitCode);
void
exitFatal(std::string message, VkResult resultCode);
// Load a SPIR-V shader (binary)
VkShaderModule loadShader(const char *fileName, VkDevice device);
// Load a SPIR-V shader (binary)
VkShaderModule
loadShader(const char* fileName, VkDevice device);
/** @brief Checks if a file exists */
bool fileExists(const std::string &filename);
}
}
/** @brief Checks if a file exists */
bool
fileExists(const std::string& filename);
} // namespace tools
} // namespace vks

View file

@ -2,16 +2,16 @@
#pragma comment(linker, "/subsystem:console")
#endif
#include <algorithm>
#include <assert.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <vector>
#include <iostream>
#include <algorithm>
#include <vulkan/vulkan.h>
#include "VulkanTools.h"
#include <vulkan/vulkan.h>
#define DEBUG (!NDEBUG)
@ -19,486 +19,575 @@
#define LOG(...) printf(__VA_ARGS__)
static VKAPI_ATTR VkBool32 VKAPI_CALL debugMessageCallback(
VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objectType,
uint64_t object,
size_t location,
int32_t messageCode,
const char* pLayerPrefix,
const char* pMessage,
void* pUserData)
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugMessageCallback(VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objectType,
uint64_t object,
size_t location,
int32_t messageCode,
const char* pLayerPrefix,
const char* pMessage,
void* pUserData)
{
LOG("[VALIDATION]: %s - %s\n", pLayerPrefix, pMessage);
return VK_FALSE;
LOG("[VALIDATION]: %s - %s\n", pLayerPrefix, pMessage);
return VK_FALSE;
}
class VulkanExample
{
public:
VkInstance instance;
VkPhysicalDevice physicalDevice;
VkDevice device;
uint32_t queueFamilyIndex;
VkPipelineCache pipelineCache;
VkQueue queue;
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
VkFence fence;
VkDescriptorPool descriptorPool;
VkDescriptorSetLayout descriptorSetLayout;
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkPipeline pipeline;
VkShaderModule shaderModule;
VkInstance instance;
VkPhysicalDevice physicalDevice;
VkDevice device;
uint32_t queueFamilyIndex;
VkPipelineCache pipelineCache;
VkQueue queue;
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
VkFence fence;
VkDescriptorPool descriptorPool;
VkDescriptorSetLayout descriptorSetLayout;
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkPipeline pipeline;
VkShaderModule shaderModule;
VkDebugReportCallbackEXT debugReportCallback{};
VkDebugReportCallbackEXT debugReportCallback{};
VkResult createBuffer(VkBufferUsageFlags usageFlags, VkMemoryPropertyFlags memoryPropertyFlags, VkBuffer *buffer, VkDeviceMemory *memory, VkDeviceSize size, void *data = nullptr)
{
// Create the buffer handle
VkBufferCreateInfo bufferCreateInfo = vks::initializers::bufferCreateInfo(usageFlags, size);
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, buffer));
VkResult createBuffer(VkBufferUsageFlags usageFlags,
VkMemoryPropertyFlags memoryPropertyFlags,
VkBuffer* buffer,
VkDeviceMemory* memory,
VkDeviceSize size,
void* data = nullptr)
{
// Create the buffer handle
VkBufferCreateInfo bufferCreateInfo =
vks::initializers::bufferCreateInfo(usageFlags, size);
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VK_CHECK_RESULT(vkCreateBuffer(device, &bufferCreateInfo, nullptr, buffer));
// Create the memory backing up the buffer handle
VkPhysicalDeviceMemoryProperties deviceMemoryProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &deviceMemoryProperties);
VkMemoryRequirements memReqs;
VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo();
vkGetBufferMemoryRequirements(device, *buffer, &memReqs);
memAlloc.allocationSize = memReqs.size;
// Find a memory type index that fits the properties of the buffer
bool memTypeFound = false;
for (uint32_t i = 0; i < deviceMemoryProperties.memoryTypeCount; i++) {
if ((memReqs.memoryTypeBits & 1) == 1) {
if ((deviceMemoryProperties.memoryTypes[i].propertyFlags & memoryPropertyFlags) == memoryPropertyFlags) {
memAlloc.memoryTypeIndex = i;
memTypeFound = true;
}
}
memReqs.memoryTypeBits >>= 1;
}
assert(memTypeFound);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, memory));
// Create the memory backing up the buffer handle
VkPhysicalDeviceMemoryProperties deviceMemoryProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice,
&deviceMemoryProperties);
VkMemoryRequirements memReqs;
VkMemoryAllocateInfo memAlloc = vks::initializers::memoryAllocateInfo();
vkGetBufferMemoryRequirements(device, *buffer, &memReqs);
memAlloc.allocationSize = memReqs.size;
// Find a memory type index that fits the properties of the buffer
bool memTypeFound = false;
for (uint32_t i = 0; i < deviceMemoryProperties.memoryTypeCount; i++) {
if ((memReqs.memoryTypeBits & 1) == 1) {
if ((deviceMemoryProperties.memoryTypes[i].propertyFlags &
memoryPropertyFlags) == memoryPropertyFlags) {
memAlloc.memoryTypeIndex = i;
memTypeFound = true;
}
}
memReqs.memoryTypeBits >>= 1;
}
assert(memTypeFound);
VK_CHECK_RESULT(vkAllocateMemory(device, &memAlloc, nullptr, memory));
if (data != nullptr) {
void *mapped;
VK_CHECK_RESULT(vkMapMemory(device, *memory, 0, size, 0, &mapped));
memcpy(mapped, data, size);
vkUnmapMemory(device, *memory);
}
if (data != nullptr) {
void* mapped;
VK_CHECK_RESULT(vkMapMemory(device, *memory, 0, size, 0, &mapped));
memcpy(mapped, data, size);
vkUnmapMemory(device, *memory);
}
VK_CHECK_RESULT(vkBindBufferMemory(device, *buffer, *memory, 0));
VK_CHECK_RESULT(vkBindBufferMemory(device, *buffer, *memory, 0));
return VK_SUCCESS;
}
return VK_SUCCESS;
}
VulkanExample()
{
LOG("Running headless compute example\n");
VulkanExample()
{
LOG("Running headless compute example\n");
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan headless example";
appInfo.pEngineName = "VulkanExample";
appInfo.apiVersion = VK_API_VERSION_1_0;
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan headless example";
appInfo.pEngineName = "VulkanExample";
appInfo.apiVersion = VK_API_VERSION_1_0;
/*
Vulkan instance creation (without surface extensions)
*/
VkInstanceCreateInfo instanceCreateInfo = {};
instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instanceCreateInfo.pApplicationInfo = &appInfo;
/*
Vulkan instance creation (without surface extensions)
*/
VkInstanceCreateInfo instanceCreateInfo = {};
instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
instanceCreateInfo.pApplicationInfo = &appInfo;
uint32_t layerCount = 0;
const char* validationLayers[] = { "VK_LAYER_LUNARG_standard_validation" };
layerCount = 1;
uint32_t layerCount = 0;
const char* validationLayers[] = { "VK_LAYER_LUNARG_standard_validation" };
layerCount = 1;
#if DEBUG
// Check if layers are available
uint32_t instanceLayerCount;
vkEnumerateInstanceLayerProperties(&instanceLayerCount, nullptr);
std::vector<VkLayerProperties> instanceLayers(instanceLayerCount);
vkEnumerateInstanceLayerProperties(&instanceLayerCount, instanceLayers.data());
// Check if layers are available
uint32_t instanceLayerCount;
vkEnumerateInstanceLayerProperties(&instanceLayerCount, nullptr);
std::vector<VkLayerProperties> instanceLayers(instanceLayerCount);
vkEnumerateInstanceLayerProperties(&instanceLayerCount,
instanceLayers.data());
bool layersAvailable = true;
for (auto layerName : validationLayers) {
bool layerAvailable = false;
for (auto instanceLayer : instanceLayers) {
if (strcmp(instanceLayer.layerName, layerName) == 0) {
layerAvailable = true;
break;
}
}
if (!layerAvailable) {
layersAvailable = false;
break;
}
}
bool layersAvailable = true;
for (auto layerName : validationLayers) {
bool layerAvailable = false;
for (auto instanceLayer : instanceLayers) {
if (strcmp(instanceLayer.layerName, layerName) == 0) {
layerAvailable = true;
break;
}
}
if (!layerAvailable) {
layersAvailable = false;
break;
}
}
if (layersAvailable) {
instanceCreateInfo.ppEnabledLayerNames = validationLayers;
const char *validationExt = VK_EXT_DEBUG_REPORT_EXTENSION_NAME;
instanceCreateInfo.enabledLayerCount = layerCount;
instanceCreateInfo.enabledExtensionCount = 1;
instanceCreateInfo.ppEnabledExtensionNames = &validationExt;
}
if (layersAvailable) {
instanceCreateInfo.ppEnabledLayerNames = validationLayers;
const char* validationExt = VK_EXT_DEBUG_REPORT_EXTENSION_NAME;
instanceCreateInfo.enabledLayerCount = layerCount;
instanceCreateInfo.enabledExtensionCount = 1;
instanceCreateInfo.ppEnabledExtensionNames = &validationExt;
}
#endif
VK_CHECK_RESULT(vkCreateInstance(&instanceCreateInfo, nullptr, &instance));
VK_CHECK_RESULT(vkCreateInstance(&instanceCreateInfo, nullptr, &instance));
#if DEBUG
if (layersAvailable) {
VkDebugReportCallbackCreateInfoEXT debugReportCreateInfo = {};
debugReportCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
debugReportCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
debugReportCreateInfo.pfnCallback = (PFN_vkDebugReportCallbackEXT)debugMessageCallback;
if (layersAvailable) {
VkDebugReportCallbackCreateInfoEXT debugReportCreateInfo = {};
debugReportCreateInfo.sType =
VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
debugReportCreateInfo.flags =
VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
debugReportCreateInfo.pfnCallback =
(PFN_vkDebugReportCallbackEXT)debugMessageCallback;
// We have to explicitly load this function.
PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT = reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(vkGetInstanceProcAddr(instance, "vkCreateDebugReportCallbackEXT"));
assert(vkCreateDebugReportCallbackEXT);
VK_CHECK_RESULT(vkCreateDebugReportCallbackEXT(instance, &debugReportCreateInfo, nullptr, &debugReportCallback));
}
// We have to explicitly load this function.
PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT =
reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
vkGetInstanceProcAddr(instance, "vkCreateDebugReportCallbackEXT"));
assert(vkCreateDebugReportCallbackEXT);
VK_CHECK_RESULT(vkCreateDebugReportCallbackEXT(
instance, &debugReportCreateInfo, nullptr, &debugReportCallback));
}
#endif
/*
Vulkan device creation
*/
// Physical device (always use first)
uint32_t deviceCount = 0;
VK_CHECK_RESULT(vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr));
std::vector<VkPhysicalDevice> physicalDevices(deviceCount);
VK_CHECK_RESULT(vkEnumeratePhysicalDevices(instance, &deviceCount, physicalDevices.data()));
physicalDevice = physicalDevices[0];
/*
Vulkan device creation
*/
// Physical device (always use first)
uint32_t deviceCount = 0;
VK_CHECK_RESULT(
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr));
std::vector<VkPhysicalDevice> physicalDevices(deviceCount);
VK_CHECK_RESULT(vkEnumeratePhysicalDevices(
instance, &deviceCount, physicalDevices.data()));
physicalDevice = physicalDevices[0];
VkPhysicalDeviceProperties deviceProperties;
vkGetPhysicalDeviceProperties(physicalDevice, &deviceProperties);
LOG("GPU: %s\n", deviceProperties.deviceName);
VkPhysicalDeviceProperties deviceProperties;
vkGetPhysicalDeviceProperties(physicalDevice, &deviceProperties);
LOG("GPU: %s\n", deviceProperties.deviceName);
// Request a single compute queue
const float defaultQueuePriority(0.0f);
VkDeviceQueueCreateInfo queueCreateInfo = {};
uint32_t queueFamilyCount;
vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueFamilyCount, nullptr);
std::vector<VkQueueFamilyProperties> queueFamilyProperties(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueFamilyCount, queueFamilyProperties.data());
for (uint32_t i = 0; i < static_cast<uint32_t>(queueFamilyProperties.size()); i++) {
if (queueFamilyProperties[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
queueFamilyIndex = i;
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = i;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &defaultQueuePriority;
break;
}
}
// Create logical device
VkDeviceCreateInfo deviceCreateInfo = {};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = 1;
deviceCreateInfo.pQueueCreateInfos = &queueCreateInfo;
VK_CHECK_RESULT(vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &device));
// Request a single compute queue
const float defaultQueuePriority(0.0f);
VkDeviceQueueCreateInfo queueCreateInfo = {};
uint32_t queueFamilyCount;
vkGetPhysicalDeviceQueueFamilyProperties(
physicalDevice, &queueFamilyCount, nullptr);
std::vector<VkQueueFamilyProperties> queueFamilyProperties(
queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(
physicalDevice, &queueFamilyCount, queueFamilyProperties.data());
for (uint32_t i = 0;
i < static_cast<uint32_t>(queueFamilyProperties.size());
i++) {
if (queueFamilyProperties[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
queueFamilyIndex = i;
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = i;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &defaultQueuePriority;
break;
}
}
// Create logical device
VkDeviceCreateInfo deviceCreateInfo = {};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = 1;
deviceCreateInfo.pQueueCreateInfos = &queueCreateInfo;
VK_CHECK_RESULT(
vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &device));
// Get a compute queue
vkGetDeviceQueue(device, queueFamilyIndex, 0, &queue);
// Get a compute queue
vkGetDeviceQueue(device, queueFamilyIndex, 0, &queue);
// Compute command pool
VkCommandPoolCreateInfo cmdPoolInfo = {};
cmdPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmdPoolInfo.queueFamilyIndex = queueFamilyIndex;
cmdPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
VK_CHECK_RESULT(vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &commandPool));
// Compute command pool
VkCommandPoolCreateInfo cmdPoolInfo = {};
cmdPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmdPoolInfo.queueFamilyIndex = queueFamilyIndex;
cmdPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
VK_CHECK_RESULT(
vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &commandPool));
/*
Prepare storage buffers
*/
std::vector<uint32_t> computeInput(BUFFER_ELEMENTS);
std::vector<uint32_t> computeOutput(BUFFER_ELEMENTS);
/*
Prepare storage buffers
*/
std::vector<uint32_t> computeInput(BUFFER_ELEMENTS);
std::vector<uint32_t> computeOutput(BUFFER_ELEMENTS);
// Fill input data
uint32_t n = 0;
std::generate(computeInput.begin(), computeInput.end(), [&n] { return n++; });
// Fill input data
uint32_t n = 0;
std::generate(
computeInput.begin(), computeInput.end(), [&n] { return n++; });
const VkDeviceSize bufferSize = BUFFER_ELEMENTS * sizeof(uint32_t);
const VkDeviceSize bufferSize = BUFFER_ELEMENTS * sizeof(uint32_t);
VkBuffer deviceBuffer, hostBuffer;
VkDeviceMemory deviceMemory, hostMemory;
VkBuffer deviceBuffer, hostBuffer;
VkDeviceMemory deviceMemory, hostMemory;
// Copy input data to VRAM using a staging buffer
{
createBuffer(
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
&hostBuffer,
&hostMemory,
bufferSize,
computeInput.data());
// Copy input data to VRAM using a staging buffer
{
createBuffer(VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
&hostBuffer,
&hostMemory,
bufferSize,
computeInput.data());
// Flush writes to host visible buffer
void* mapped;
vkMapMemory(device, hostMemory, 0, VK_WHOLE_SIZE, 0, &mapped);
VkMappedMemoryRange mappedRange = vks::initializers::mappedMemoryRange();
mappedRange.memory = hostMemory;
mappedRange.offset = 0;
mappedRange.size = VK_WHOLE_SIZE;
vkFlushMappedMemoryRanges(device, 1, &mappedRange);
vkUnmapMemory(device, hostMemory);
// Flush writes to host visible buffer
void* mapped;
vkMapMemory(device, hostMemory, 0, VK_WHOLE_SIZE, 0, &mapped);
VkMappedMemoryRange mappedRange = vks::initializers::mappedMemoryRange();
mappedRange.memory = hostMemory;
mappedRange.offset = 0;
mappedRange.size = VK_WHOLE_SIZE;
vkFlushMappedMemoryRanges(device, 1, &mappedRange);
vkUnmapMemory(device, hostMemory);
createBuffer(
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
&deviceBuffer,
&deviceMemory,
bufferSize);
createBuffer(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
&deviceBuffer,
&deviceMemory,
bufferSize);
// Copy to staging buffer
VkCommandBufferAllocateInfo cmdBufAllocateInfo = vks::initializers::commandBufferAllocateInfo(commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1);
VkCommandBuffer copyCmd;
VK_CHECK_RESULT(vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &copyCmd));
VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo();
VK_CHECK_RESULT(vkBeginCommandBuffer(copyCmd, &cmdBufInfo));
// Copy to staging buffer
VkCommandBufferAllocateInfo cmdBufAllocateInfo =
vks::initializers::commandBufferAllocateInfo(
commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1);
VkCommandBuffer copyCmd;
VK_CHECK_RESULT(
vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &copyCmd));
VkCommandBufferBeginInfo cmdBufInfo =
vks::initializers::commandBufferBeginInfo();
VK_CHECK_RESULT(vkBeginCommandBuffer(copyCmd, &cmdBufInfo));
VkBufferCopy copyRegion = {};
copyRegion.size = bufferSize;
vkCmdCopyBuffer(copyCmd, hostBuffer, deviceBuffer, 1, &copyRegion);
VK_CHECK_RESULT(vkEndCommandBuffer(copyCmd));
VkBufferCopy copyRegion = {};
copyRegion.size = bufferSize;
vkCmdCopyBuffer(copyCmd, hostBuffer, deviceBuffer, 1, &copyRegion);
VK_CHECK_RESULT(vkEndCommandBuffer(copyCmd));
VkSubmitInfo submitInfo = vks::initializers::submitInfo();
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &copyCmd;
VkFenceCreateInfo fenceInfo = vks::initializers::fenceCreateInfo(VK_FLAGS_NONE);
VkFence fence;
VK_CHECK_RESULT(vkCreateFence(device, &fenceInfo, nullptr, &fence));
VkSubmitInfo submitInfo = vks::initializers::submitInfo();
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &copyCmd;
VkFenceCreateInfo fenceInfo =
vks::initializers::fenceCreateInfo(VK_FLAGS_NONE);
VkFence fence;
VK_CHECK_RESULT(vkCreateFence(device, &fenceInfo, nullptr, &fence));
// Submit to the queue
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, fence));
VK_CHECK_RESULT(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX));
// Submit to the queue
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &submitInfo, fence));
VK_CHECK_RESULT(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX));
vkDestroyFence(device, fence, nullptr);
vkFreeCommandBuffers(device, commandPool, 1, &copyCmd);
}
vkDestroyFence(device, fence, nullptr);
vkFreeCommandBuffers(device, commandPool, 1, &copyCmd);
}
/*
Prepare compute pipeline
*/
{
std::vector<VkDescriptorPoolSize> poolSizes = {
vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1),
};
/*
Prepare compute pipeline
*/
{
std::vector<VkDescriptorPoolSize> poolSizes = {
vks::initializers::descriptorPoolSize(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1),
};
VkDescriptorPoolCreateInfo descriptorPoolInfo =
vks::initializers::descriptorPoolCreateInfo(static_cast<uint32_t>(poolSizes.size()), poolSizes.data(), 1);
VK_CHECK_RESULT(vkCreateDescriptorPool(device, &descriptorPoolInfo, nullptr, &descriptorPool));
VkDescriptorPoolCreateInfo descriptorPoolInfo =
vks::initializers::descriptorPoolCreateInfo(
static_cast<uint32_t>(poolSizes.size()), poolSizes.data(), 1);
VK_CHECK_RESULT(vkCreateDescriptorPool(
device, &descriptorPoolInfo, nullptr, &descriptorPool));
std::vector<VkDescriptorSetLayoutBinding> setLayoutBindings = {
vks::initializers::descriptorSetLayoutBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT, 0),
};
VkDescriptorSetLayoutCreateInfo descriptorLayout =
vks::initializers::descriptorSetLayoutCreateInfo(setLayoutBindings);
VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorLayout, nullptr, &descriptorSetLayout));
std::vector<VkDescriptorSetLayoutBinding> setLayoutBindings = {
vks::initializers::descriptorSetLayoutBinding(
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT, 0),
};
VkDescriptorSetLayoutCreateInfo descriptorLayout =
vks::initializers::descriptorSetLayoutCreateInfo(setLayoutBindings);
VK_CHECK_RESULT(vkCreateDescriptorSetLayout(
device, &descriptorLayout, nullptr, &descriptorSetLayout));
VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
vks::initializers::pipelineLayoutCreateInfo(&descriptorSetLayout, 1);
VK_CHECK_RESULT(vkCreatePipelineLayout(device, &pipelineLayoutCreateInfo, nullptr, &pipelineLayout));
VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
vks::initializers::pipelineLayoutCreateInfo(&descriptorSetLayout, 1);
VK_CHECK_RESULT(vkCreatePipelineLayout(
device, &pipelineLayoutCreateInfo, nullptr, &pipelineLayout));
VkDescriptorSetAllocateInfo allocInfo =
vks::initializers::descriptorSetAllocateInfo(descriptorPool, &descriptorSetLayout, 1);
VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet));
VkDescriptorSetAllocateInfo allocInfo =
vks::initializers::descriptorSetAllocateInfo(
descriptorPool, &descriptorSetLayout, 1);
VK_CHECK_RESULT(
vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet));
VkDescriptorBufferInfo bufferDescriptor = { deviceBuffer, 0, VK_WHOLE_SIZE };
std::vector<VkWriteDescriptorSet> computeWriteDescriptorSets = {
vks::initializers::writeDescriptorSet(descriptorSet, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 0, &bufferDescriptor),
};
vkUpdateDescriptorSets(device, static_cast<uint32_t>(computeWriteDescriptorSets.size()), computeWriteDescriptorSets.data(), 0, NULL);
VkDescriptorBufferInfo bufferDescriptor = { deviceBuffer,
0,
VK_WHOLE_SIZE };
std::vector<VkWriteDescriptorSet> computeWriteDescriptorSets = {
vks::initializers::writeDescriptorSet(descriptorSet,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
0,
&bufferDescriptor),
};
vkUpdateDescriptorSets(
device,
static_cast<uint32_t>(computeWriteDescriptorSets.size()),
computeWriteDescriptorSets.data(),
0,
NULL);
VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {};
pipelineCacheCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VK_CHECK_RESULT(vkCreatePipelineCache(device, &pipelineCacheCreateInfo, nullptr, &pipelineCache));
VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {};
pipelineCacheCreateInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VK_CHECK_RESULT(vkCreatePipelineCache(
device, &pipelineCacheCreateInfo, nullptr, &pipelineCache));
// Create pipeline
VkComputePipelineCreateInfo computePipelineCreateInfo = vks::initializers::computePipelineCreateInfo(pipelineLayout, 0);
// Create pipeline
VkComputePipelineCreateInfo computePipelineCreateInfo =
vks::initializers::computePipelineCreateInfo(pipelineLayout, 0);
// Pass SSBO size via specialization constant
struct SpecializationData {
uint32_t BUFFER_ELEMENT_COUNT = BUFFER_ELEMENTS;
} specializationData;
VkSpecializationMapEntry specializationMapEntry = vks::initializers::specializationMapEntry(0, 0, sizeof(uint32_t));
VkSpecializationInfo specializationInfo = vks::initializers::specializationInfo(1, &specializationMapEntry, sizeof(SpecializationData), &specializationData);
// Pass SSBO size via specialization constant
struct SpecializationData
{
uint32_t BUFFER_ELEMENT_COUNT = BUFFER_ELEMENTS;
} specializationData;
VkSpecializationMapEntry specializationMapEntry =
vks::initializers::specializationMapEntry(0, 0, sizeof(uint32_t));
VkSpecializationInfo specializationInfo =
vks::initializers::specializationInfo(1,
&specializationMapEntry,
sizeof(SpecializationData),
&specializationData);
// TODO: There is no command line arguments parsing (nor Android settings) for this
// example, so we have no way of picking between GLSL or HLSL shaders.
// Hard-code to glsl for now.
const std::string shadersPath = getAssetPath() + "shaders/glsl/";
std::cout << "Shader path: " << shadersPath << std::endl;
// TODO: There is no command line arguments parsing (nor Android settings)
// for this example, so we have no way of picking between GLSL or HLSL
// shaders. Hard-code to glsl for now.
const std::string shadersPath = getAssetPath() + "shaders/glsl/";
std::cout << "Shader path: " << shadersPath << std::endl;
VkPipelineShaderStageCreateInfo shaderStage = {};
shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
VkPipelineShaderStageCreateInfo shaderStage = {};
shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
shaderStage.module = vks::tools::loadShader((shadersPath + "computeheadless.comp.spv").c_str(), device);
shaderStage.module = vks::tools::loadShader(
(shadersPath + "computeheadless.comp.spv").c_str(), device);
shaderStage.pName = "main";
shaderStage.pSpecializationInfo = &specializationInfo;
shaderModule = shaderStage.module;
shaderStage.pName = "main";
shaderStage.pSpecializationInfo = &specializationInfo;
shaderModule = shaderStage.module;
assert(shaderStage.module != VK_NULL_HANDLE);
computePipelineCreateInfo.stage = shaderStage;
VK_CHECK_RESULT(vkCreateComputePipelines(device, pipelineCache, 1, &computePipelineCreateInfo, nullptr, &pipeline));
assert(shaderStage.module != VK_NULL_HANDLE);
computePipelineCreateInfo.stage = shaderStage;
VK_CHECK_RESULT(vkCreateComputePipelines(device,
pipelineCache,
1,
&computePipelineCreateInfo,
nullptr,
&pipeline));
// Create a command buffer for compute operations
VkCommandBufferAllocateInfo cmdBufAllocateInfo =
vks::initializers::commandBufferAllocateInfo(commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1);
VK_CHECK_RESULT(vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &commandBuffer));
// Create a command buffer for compute operations
VkCommandBufferAllocateInfo cmdBufAllocateInfo =
vks::initializers::commandBufferAllocateInfo(
commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1);
VK_CHECK_RESULT(
vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &commandBuffer));
// Fence for compute CB sync
VkFenceCreateInfo fenceCreateInfo = vks::initializers::fenceCreateInfo(VK_FENCE_CREATE_SIGNALED_BIT);
VK_CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &fence));
}
// Fence for compute CB sync
VkFenceCreateInfo fenceCreateInfo =
vks::initializers::fenceCreateInfo(VK_FENCE_CREATE_SIGNALED_BIT);
VK_CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &fence));
}
/*
Command buffer creation (for compute work submission)
*/
{
VkCommandBufferBeginInfo cmdBufInfo = vks::initializers::commandBufferBeginInfo();
/*
Command buffer creation (for compute work submission)
*/
{
VkCommandBufferBeginInfo cmdBufInfo =
vks::initializers::commandBufferBeginInfo();
VK_CHECK_RESULT(vkBeginCommandBuffer(commandBuffer, &cmdBufInfo));
VK_CHECK_RESULT(vkBeginCommandBuffer(commandBuffer, &cmdBufInfo));
// Barrier to ensure that input buffer transfer is finished before compute shader reads from it
VkBufferMemoryBarrier bufferBarrier = vks::initializers::bufferMemoryBarrier();
bufferBarrier.buffer = deviceBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
// Barrier to ensure that input buffer transfer is finished before compute
// shader reads from it
VkBufferMemoryBarrier bufferBarrier =
vks::initializers::bufferMemoryBarrier();
bufferBarrier.buffer = deviceBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(
commandBuffer,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_FLAGS_NONE,
0, nullptr,
1, &bufferBarrier,
0, nullptr);
vkCmdPipelineBarrier(commandBuffer,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_FLAGS_NONE,
0,
nullptr,
1,
&bufferBarrier,
0,
nullptr);
vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0, 1, &descriptorSet, 0, 0);
vkCmdBindPipeline(
commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
vkCmdBindDescriptorSets(commandBuffer,
VK_PIPELINE_BIND_POINT_COMPUTE,
pipelineLayout,
0,
1,
&descriptorSet,
0,
0);
vkCmdDispatch(commandBuffer, BUFFER_ELEMENTS, 1, 1);
vkCmdDispatch(commandBuffer, BUFFER_ELEMENTS, 1, 1);
// Barrier to ensure that shader writes are finished before buffer is read back from GPU
bufferBarrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
bufferBarrier.buffer = deviceBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
// Barrier to ensure that shader writes are finished before buffer is read
// back from GPU
bufferBarrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
bufferBarrier.buffer = deviceBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(
commandBuffer,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_FLAGS_NONE,
0, nullptr,
1, &bufferBarrier,
0, nullptr);
vkCmdPipelineBarrier(commandBuffer,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_FLAGS_NONE,
0,
nullptr,
1,
&bufferBarrier,
0,
nullptr);
// Read back to host visible buffer
VkBufferCopy copyRegion = {};
copyRegion.size = bufferSize;
vkCmdCopyBuffer(commandBuffer, deviceBuffer, hostBuffer, 1, &copyRegion);
// Read back to host visible buffer
VkBufferCopy copyRegion = {};
copyRegion.size = bufferSize;
vkCmdCopyBuffer(commandBuffer, deviceBuffer, hostBuffer, 1, &copyRegion);
// Barrier to ensure that buffer copy is finished before host reading from it
bufferBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
bufferBarrier.buffer = hostBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
// Barrier to ensure that buffer copy is finished before host reading from
// it
bufferBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
bufferBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
bufferBarrier.buffer = hostBuffer;
bufferBarrier.size = VK_WHOLE_SIZE;
bufferBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bufferBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(
commandBuffer,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_FLAGS_NONE,
0, nullptr,
1, &bufferBarrier,
0, nullptr);
vkCmdPipelineBarrier(commandBuffer,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_FLAGS_NONE,
0,
nullptr,
1,
&bufferBarrier,
0,
nullptr);
VK_CHECK_RESULT(vkEndCommandBuffer(commandBuffer));
VK_CHECK_RESULT(vkEndCommandBuffer(commandBuffer));
// Submit compute work
vkResetFences(device, 1, &fence);
const VkPipelineStageFlags waitStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkSubmitInfo computeSubmitInfo = vks::initializers::submitInfo();
computeSubmitInfo.pWaitDstStageMask = &waitStageMask;
computeSubmitInfo.commandBufferCount = 1;
computeSubmitInfo.pCommandBuffers = &commandBuffer;
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &computeSubmitInfo, fence));
VK_CHECK_RESULT(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX));
// Submit compute work
vkResetFences(device, 1, &fence);
const VkPipelineStageFlags waitStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
VkSubmitInfo computeSubmitInfo = vks::initializers::submitInfo();
computeSubmitInfo.pWaitDstStageMask = &waitStageMask;
computeSubmitInfo.commandBufferCount = 1;
computeSubmitInfo.pCommandBuffers = &commandBuffer;
VK_CHECK_RESULT(vkQueueSubmit(queue, 1, &computeSubmitInfo, fence));
VK_CHECK_RESULT(vkWaitForFences(device, 1, &fence, VK_TRUE, UINT64_MAX));
// Make device writes visible to the host
void *mapped;
vkMapMemory(device, hostMemory, 0, VK_WHOLE_SIZE, 0, &mapped);
VkMappedMemoryRange mappedRange = vks::initializers::mappedMemoryRange();
mappedRange.memory = hostMemory;
mappedRange.offset = 0;
mappedRange.size = VK_WHOLE_SIZE;
vkInvalidateMappedMemoryRanges(device, 1, &mappedRange);
// Make device writes visible to the host
void* mapped;
vkMapMemory(device, hostMemory, 0, VK_WHOLE_SIZE, 0, &mapped);
VkMappedMemoryRange mappedRange = vks::initializers::mappedMemoryRange();
mappedRange.memory = hostMemory;
mappedRange.offset = 0;
mappedRange.size = VK_WHOLE_SIZE;
vkInvalidateMappedMemoryRanges(device, 1, &mappedRange);
// Copy to output
memcpy(computeOutput.data(), mapped, bufferSize);
vkUnmapMemory(device, hostMemory);
}
// Copy to output
memcpy(computeOutput.data(), mapped, bufferSize);
vkUnmapMemory(device, hostMemory);
}
vkQueueWaitIdle(queue);
vkQueueWaitIdle(queue);
// Output buffer contents
LOG("Compute input:\n");
for (auto v : computeInput) {
LOG("%d ", v);
}
std::cout << std::endl;
// Output buffer contents
LOG("Compute input:\n");
for (auto v : computeInput) {
LOG("%d ", v);
}
std::cout << std::endl;
LOG("Compute output:\n");
for (auto v : computeOutput) {
LOG("%d ", v);
}
std::cout << std::endl;
LOG("Compute output:\n");
for (auto v : computeOutput) {
LOG("%d ", v);
}
std::cout << std::endl;
// Clean up
vkDestroyBuffer(device, deviceBuffer, nullptr);
vkFreeMemory(device, deviceMemory, nullptr);
vkDestroyBuffer(device, hostBuffer, nullptr);
vkFreeMemory(device, hostMemory, nullptr);
}
// Clean up
vkDestroyBuffer(device, deviceBuffer, nullptr);
vkFreeMemory(device, deviceMemory, nullptr);
vkDestroyBuffer(device, hostBuffer, nullptr);
vkFreeMemory(device, hostMemory, nullptr);
}
~VulkanExample()
{
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
vkDestroyPipeline(device, pipeline, nullptr);
vkDestroyPipelineCache(device, pipelineCache, nullptr);
vkDestroyFence(device, fence, nullptr);
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyShaderModule(device, shaderModule, nullptr);
vkDestroyDevice(device, nullptr);
~VulkanExample()
{
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
vkDestroyPipeline(device, pipeline, nullptr);
vkDestroyPipelineCache(device, pipelineCache, nullptr);
vkDestroyFence(device, fence, nullptr);
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyShaderModule(device, shaderModule, nullptr);
vkDestroyDevice(device, nullptr);
#if DEBUG
if (debugReportCallback) {
PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallback = reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"));
assert(vkDestroyDebugReportCallback);
vkDestroyDebugReportCallback(instance, debugReportCallback, nullptr);
}
if (debugReportCallback) {
PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallback =
reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(instance, "vkDestroyDebugReportCallbackEXT"));
assert(vkDestroyDebugReportCallback);
vkDestroyDebugReportCallback(instance, debugReportCallback, nullptr);
}
#endif
vkDestroyInstance(instance, nullptr);
}
vkDestroyInstance(instance, nullptr);
}
};
int main() {
VulkanExample *vulkanExample = new VulkanExample();
std::cout << "Finished. Press enter to terminate...";
getchar();
delete(vulkanExample);
return 0;
int
main()
{
VulkanExample* vulkanExample = new VulkanExample();
std::cout << "Finished. Press enter to terminate...";
getchar();
delete (vulkanExample);
return 0;
}