engine/src/gfx_device_vulkan.cpp

1615 lines
61 KiB
C++
Raw Normal View History

// The implementation of the graphics layer using Vulkan 1.3.
2022-10-01 14:44:12 +00:00
2023-03-15 23:11:24 +00:00
/* IMPORTANT INFORMATION
*
* When allocating memory with VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, always set a memory priority.
* This feature uses the device extension VK_EXT_memory_priority. Depth buffers have a priority of 1.0f.
2023-03-15 23:11:24 +00:00
* Other, non-essential allocations will have a priority of 0.5f.
*
* Call vkResetCommandPool before reusing it in another frame.
* Otherwise, the pool will keep on growing until you run out of memory.
* - NVIDIA Vulkan Dos and Don'ts
*
2023-03-15 23:11:24 +00:00
*/
2023-03-21 15:21:24 +00:00
/* TODO
*
* - Support index buffers of both UINT16 and UINT32 types
* - Allow descriptor sets and layouts to also be combined texture samplers (maybe support other descriptor types?)
* - Use pipeline cache
*
*/
2022-09-13 18:25:18 +00:00
#include <assert.h>
#include <unordered_set>
#include <array>
#include <fstream>
#include <filesystem>
2022-10-21 11:31:46 +00:00
#include <optional>
2023-03-13 17:10:46 +00:00
#include <deque>
2022-10-23 23:19:07 +00:00
#include <map>
2022-11-08 15:34:59 +00:00
#include <iostream>
2022-09-13 21:43:24 +00:00
#include <SDL_vulkan.h>
#include <shaderc/shaderc.hpp>
#include <volk.h>
#include "gfx_device.hpp"
#include "vulkan/instance.h"
#include "vulkan/device.h"
#include "vulkan/gpu_allocator.h"
#include "vulkan/swapchain.h"
#include "util.hpp"
#include "config.h"
#include "log.hpp"
#include "util/files.hpp"
2023-03-12 17:11:13 +00:00
inline static void checkVulkanError(VkResult errorCode, int lineNo)
{
if (errorCode != VK_SUCCESS) {
const std::string message("VULKAN ERROR ON LINE " + std::to_string(lineNo));
throw std::runtime_error(message.c_str());
}
}
#undef VKCHECK
#define VKCHECK(ErrCode) \
checkVulkanError(ErrCode, __LINE__)
2022-10-02 15:34:51 +00:00
namespace engine {
2022-09-13 18:25:18 +00:00
2023-03-21 11:03:20 +00:00
static constexpr uint32_t FRAMES_IN_FLIGHT = 2; // This improved FPS by 5x! (on Intel IGPU)
2022-10-27 16:58:30 +00:00
2022-10-31 16:21:07 +00:00
static constexpr size_t PUSH_CONSTANT_MAX_SIZE = 128; // bytes
2023-03-12 20:39:11 +00:00
static constexpr VkIndexType INDEX_TYPE = VK_INDEX_TYPE_UINT32;
// structures and enums
2022-10-14 12:56:28 +00:00
2023-03-12 20:39:11 +00:00
struct FrameData {
VkFence renderFence = VK_NULL_HANDLE;
2023-03-21 02:31:10 +00:00
VkSemaphore transferSemaphore = VK_NULL_HANDLE;
2023-03-12 20:39:11 +00:00
VkSemaphore renderSemaphore = VK_NULL_HANDLE;
VkSemaphore presentSemaphore = VK_NULL_HANDLE;
2023-03-21 02:31:10 +00:00
VkCommandPool graphicsPool = VK_NULL_HANDLE;
2023-03-12 20:39:11 +00:00
VkCommandBuffer drawBuf = VK_NULL_HANDLE;
2023-03-21 02:31:10 +00:00
VkCommandPool transferPool = VK_NULL_HANDLE;
VkCommandBuffer transferBuf = VK_NULL_HANDLE;
2022-10-24 00:10:48 +00:00
};
2022-10-21 16:03:36 +00:00
// handles
2023-03-13 01:19:32 +00:00
2022-10-24 00:10:48 +00:00
struct gfx::Buffer {
gfx::BufferType type;
2022-10-21 16:03:36 +00:00
VkBuffer buffer = VK_NULL_HANDLE;
VmaAllocation allocation = nullptr;
2022-10-24 00:10:48 +00:00
VkDeviceSize size = 0;
2022-10-21 16:03:36 +00:00
};
2022-10-23 23:19:07 +00:00
struct gfx::Pipeline {
VkPipelineLayout layout = VK_NULL_HANDLE;
VkPipeline handle = VK_NULL_HANDLE;
};
2023-03-21 23:52:52 +00:00
struct gfx::Image {
VkImage image = VK_NULL_HANDLE;
VkImageView view = VK_NULL_HANDLE;
VmaAllocation allocation = VK_NULL_HANDLE;
};
struct gfx::Sampler {
VkSampler sampler = VK_NULL_HANDLE;
};
2023-03-12 20:39:11 +00:00
struct gfx::DrawBuffer {
FrameData frameData{};
uint32_t currentFrameIndex = 0; // corresponds to the frameData
uint32_t imageIndex = 0; // for swapchain present
};
2023-03-13 01:19:32 +00:00
struct gfx::DescriptorSetLayout {
VkDescriptorSetLayout layout;
};
struct gfx::DescriptorSet {
2023-03-13 17:10:46 +00:00
std::array<VkDescriptorSet, FRAMES_IN_FLIGHT> sets; // frames in flight cannot use the same descriptor set in case the buffer needs updating
};
struct gfx::UniformBuffer {
2023-03-13 17:10:46 +00:00
gfx::Buffer stagingBuffer{};
std::array<gfx::Buffer, FRAMES_IN_FLIGHT> gpuBuffers;
2023-03-13 01:19:32 +00:00
};
2022-10-22 12:15:25 +00:00
// enum converters
2023-03-21 15:21:24 +00:00
namespace converters {
2022-10-22 12:15:25 +00:00
static VkFormat getVertexAttribFormat(gfx::VertexAttribFormat fmt)
{
switch (fmt) {
2023-01-20 16:30:35 +00:00
case gfx::VertexAttribFormat::FLOAT2:
2022-10-22 12:15:25 +00:00
return VK_FORMAT_R32G32_SFLOAT;
2023-01-20 16:30:35 +00:00
case gfx::VertexAttribFormat::FLOAT3:
2022-10-22 12:15:25 +00:00
return VK_FORMAT_R32G32B32_SFLOAT;
2023-01-20 16:30:35 +00:00
case gfx::VertexAttribFormat::FLOAT4:
return VK_FORMAT_R32G32B32A32_SFLOAT;
2022-10-22 12:15:25 +00:00
}
2022-10-22 12:19:47 +00:00
throw std::runtime_error("Unknown vertex attribute format");
2022-10-22 12:15:25 +00:00
}
2022-10-24 00:10:48 +00:00
static VkBufferUsageFlagBits getBufferUsageFlag(gfx::BufferType type)
{
switch (type) {
case gfx::BufferType::VERTEX:
return VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
case gfx::BufferType::INDEX:
return VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
2023-03-13 01:19:32 +00:00
case gfx::BufferType::UNIFORM:
return VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
2023-01-05 13:21:33 +00:00
default:
throw std::runtime_error("This buffer type does not have usage bits");
2022-10-24 00:10:48 +00:00
}
}
[[maybe_unused]] static VkFilter getFilter(gfx::Filter filter)
2022-11-15 13:59:43 +00:00
{
2023-02-19 13:55:08 +00:00
switch (filter) {
case gfx::Filter::LINEAR:
2022-11-15 13:59:43 +00:00
return VK_FILTER_LINEAR;
case gfx::Filter::NEAREST:
2022-11-15 13:59:43 +00:00
return VK_FILTER_NEAREST;
}
throw std::runtime_error("Unknown filter");
}
[[maybe_unused]] static VkSamplerMipmapMode getSamplerMipmapMode(gfx::Filter filter)
{
switch (filter) {
case gfx::Filter::LINEAR:
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
case gfx::Filter::NEAREST:
return VK_SAMPLER_MIPMAP_MODE_NEAREST;
}
throw std::runtime_error("Unknown filter");
2022-11-15 13:59:43 +00:00
}
2023-03-13 17:35:22 +00:00
[[maybe_unused]] static VkSampleCountFlags getSampleCountFlags(gfx::MSAALevel level)
2023-02-19 13:55:08 +00:00
{
switch (level) {
case gfx::MSAALevel::MSAA_OFF:
return VK_SAMPLE_COUNT_1_BIT;
case gfx::MSAALevel::MSAA_2X:
return VK_SAMPLE_COUNT_2_BIT;
case gfx::MSAALevel::MSAA_4X:
return VK_SAMPLE_COUNT_4_BIT;
case gfx::MSAALevel::MSAA_8X:
return VK_SAMPLE_COUNT_8_BIT;
case gfx::MSAALevel::MSAA_16X:
return VK_SAMPLE_COUNT_16_BIT;
default:
throw std::runtime_error("Unknown MSAA level");
}
}
static VkDescriptorType getDescriptorType(gfx::DescriptorType type)
{
switch (type) {
case gfx::DescriptorType::UNIFORM_BUFFER:
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
case gfx::DescriptorType::COMBINED_IMAGE_SAMPLER:
return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
default:
throw std::runtime_error("Unknown descriptor type");
}
}
static VkShaderStageFlags getShaderStageFlags(gfx::ShaderStageFlags::Flags flags)
{
VkShaderStageFlags out = 0;
if (flags & gfx::ShaderStageFlags::VERTEX) out |= VK_SHADER_STAGE_VERTEX_BIT;
if (flags & gfx::ShaderStageFlags::FRAGMENT) out |= VK_SHADER_STAGE_FRAGMENT_BIT;
return out;
}
2022-10-22 12:15:25 +00:00
}
2022-10-14 12:56:28 +00:00
// functions
2022-11-08 13:42:07 +00:00
static VkShaderModule compileShader(VkDevice device, shaderc_shader_kind kind, const std::string& source, const char* filename)
{
shaderc::Compiler compiler;
shaderc::CompileOptions options;
2022-11-08 15:34:59 +00:00
options.SetSourceLanguage(shaderc_source_language_glsl);
options.SetTargetEnvironment(shaderc_target_env_vulkan, shaderc_env_version_vulkan_1_3);
2022-12-20 23:51:04 +00:00
options.SetOptimizationLevel(shaderc_optimization_level_performance);
2022-11-08 15:34:59 +00:00
options.SetTargetSpirv(shaderc_spirv_version_1_6);
options.SetAutoBindUniforms(false);
2022-11-08 13:42:07 +00:00
// preprocess
shaderc::PreprocessedSourceCompilationResult preprocessed = compiler.PreprocessGlsl(source, kind, filename, options);
if (preprocessed.GetCompilationStatus() != shaderc_compilation_status_success)
{
throw std::runtime_error("PREPROCESS ERR " + preprocessed.GetErrorMessage());
2022-11-08 13:42:07 +00:00
}
2023-03-13 01:19:32 +00:00
std::string shaderStr{ preprocessed.cbegin(), preprocessed.cend() };
2022-11-08 13:42:07 +00:00
// compile
shaderc::SpvCompilationResult compiledShader = compiler.CompileGlslToSpv(shaderStr.c_str(), kind, filename, options);
2022-11-08 13:42:07 +00:00
if (compiledShader.GetCompilationStatus() != shaderc_compilation_status_success)
2022-11-08 13:42:07 +00:00
{
throw std::runtime_error("COMPILE ERR " + compiledShader.GetErrorMessage());
2022-11-08 13:42:07 +00:00
}
std::vector<uint32_t> shaderBytecode = { compiledShader.cbegin(), compiledShader.cend() };// not sure why sample code copy vector like this
2022-11-08 13:42:07 +00:00
VkShaderModuleCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
2022-11-08 15:34:59 +00:00
createInfo.codeSize = shaderBytecode.size() * sizeof(uint32_t);
createInfo.pCode = compiledShader.cbegin();
2022-11-08 13:42:07 +00:00
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) != VK_SUCCESS) {
2023-03-21 15:21:24 +00:00
throw std::runtime_error("Failed to create shader module!");
2022-11-08 13:42:07 +00:00
}
return shaderModule;
}
2022-10-23 11:05:09 +00:00
static void copyBuffer(VkDevice device, VkCommandPool commandPool, VkQueue queue, VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size)
{
2023-01-05 13:21:33 +00:00
[[maybe_unused]] VkResult res;
2022-10-23 11:05:09 +00:00
VkCommandBufferAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
res = vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
assert(res == VK_SUCCESS);
{ // record the command buffer
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
res = vkBeginCommandBuffer(commandBuffer, &beginInfo);
assert(res == VK_SUCCESS);
VkBufferCopy copyRegion{};
copyRegion.srcOffset = 0;
copyRegion.dstOffset = 0;
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, &copyRegion);
res = vkEndCommandBuffer(commandBuffer);
assert(res == VK_SUCCESS);
}
// submit
VkSubmitInfo submitInfo{};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
res = vkQueueSubmit(queue, 1, &submitInfo, VK_NULL_HANDLE);
assert(res == VK_SUCCESS);
res = vkQueueWaitIdle(queue);
assert(res == VK_SUCCESS);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
2023-03-13 01:19:32 +00:00
2022-10-23 11:05:09 +00:00
}
// class definitions
2022-10-14 12:56:28 +00:00
struct GFXDevice::Impl {
2023-03-13 01:19:32 +00:00
// device settings
gfx::GraphicsSettings graphicsSettings;
SDL_Window* window = nullptr;
Instance instance{};
VkSurfaceKHR surface = VK_NULL_HANDLE;
Device device{};
VmaAllocator allocator{};
SwapchainInfo swapchainInfo{};
2023-01-26 21:17:07 +00:00
Swapchain swapchain{};
2023-03-13 01:19:32 +00:00
VkDescriptorPool descriptorPool;
std::array<std::unordered_set<gfx::UniformBuffer*>, FRAMES_IN_FLIGHT> uniformBufferWriteQueues{};
2023-03-13 01:19:32 +00:00
2023-03-23 19:07:10 +00:00
// For one-off transfer operations not bound to a specific frame-in-flight
2023-03-21 02:31:10 +00:00
VkCommandPool transferCommandPool = VK_NULL_HANDLE;
2023-03-23 19:07:10 +00:00
// For one-off operation on the draw queue family not bound to a specific frame-in-flight
VkCommandPool graphicsCommandPool = VK_NULL_HANDLE;
2023-03-21 02:31:10 +00:00
uint64_t FRAMECOUNT = 0;
2023-03-13 01:19:32 +00:00
2023-03-12 20:39:11 +00:00
FrameData frameData[FRAMES_IN_FLIGHT] = {};
bool swapchainIsOutOfDate = false;
2023-03-13 01:19:32 +00:00
2022-10-14 12:56:28 +00:00
};
2022-09-17 00:22:35 +00:00
2023-02-19 13:55:08 +00:00
GFXDevice::GFXDevice(const char* appName, const char* appVersion, SDL_Window* window, gfx::GraphicsSettings settings)
2022-10-14 12:56:28 +00:00
{
2022-10-27 16:58:30 +00:00
pimpl = std::make_unique<Impl>();
2022-09-17 00:22:35 +00:00
2022-10-14 12:56:28 +00:00
VkResult res;
2022-09-17 00:22:35 +00:00
pimpl->window = window;
2023-02-19 13:55:08 +00:00
pimpl->graphicsSettings = settings;
2022-10-14 12:56:28 +00:00
// initialise vulkan
2022-09-13 21:43:24 +00:00
2022-10-14 12:56:28 +00:00
res = volkInitialize();
2023-01-26 21:17:07 +00:00
if (res != VK_SUCCESS) {
2022-10-14 12:56:28 +00:00
throw std::runtime_error("Unable to load vulkan, is it installed?");
}
2022-09-21 19:52:26 +00:00
2022-10-14 12:56:28 +00:00
uint32_t vulkanVersion = volkGetInstanceVersion();
2023-01-26 21:17:07 +00:00
assert(vulkanVersion != 0);
if (vulkanVersion < VK_API_VERSION_1_3) {
2022-10-14 12:56:28 +00:00
throw std::runtime_error("The loaded Vulkan version must be at least 1.3");
}
2022-09-21 19:52:26 +00:00
pimpl->instance = createVulkanInstance(pimpl->window, appName, appVersion, pimpl->graphicsSettings.enableValidation, MessageSeverity::SEV_WARNING);
2022-09-21 19:52:26 +00:00
if (SDL_Vulkan_CreateSurface(pimpl->window, pimpl->instance.instance, &pimpl->surface) == false) {
throw std::runtime_error("Unable to create window surface");
2022-09-21 19:52:26 +00:00
};
DeviceRequirements deviceRequirements{};
deviceRequirements.requiredExtensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
deviceRequirements.optionalExtensions.push_back(VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME);
deviceRequirements.optionalExtensions.push_back(VK_EXT_MEMORY_BUDGET_EXTENSION_NAME);
2023-03-30 10:30:01 +00:00
deviceRequirements.requiredFeatures.samplerAnisotropy = VK_TRUE;
//deviceRequirements.requiredFeatures.fillModeNonSolid = VK_TRUE;
deviceRequirements.formats.push_back(
FormatRequirements{
.format = VK_FORMAT_R8G8B8A8_SRGB,
.properties = VkFormatProperties{
.linearTilingFeatures = {},
2023-03-21 23:52:52 +00:00
.optimalTilingFeatures =
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT |
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT,
.bufferFeatures = {},
}
}
);
deviceRequirements.formats.push_back(
FormatRequirements{
.format = VK_FORMAT_R32G32_SFLOAT,
.properties = VkFormatProperties{
.linearTilingFeatures = {},
.optimalTilingFeatures = {},
.bufferFeatures = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT,
}
}
);
deviceRequirements.formats.push_back(
FormatRequirements{
.format = VK_FORMAT_R32G32B32_SFLOAT,
.properties = VkFormatProperties{
.linearTilingFeatures = {},
.optimalTilingFeatures = {},
.bufferFeatures = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT,
}
}
);
deviceRequirements.formats.push_back(
FormatRequirements{
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.properties = VkFormatProperties{
.linearTilingFeatures = {},
.optimalTilingFeatures = {},
.bufferFeatures = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT,
}
}
);
deviceRequirements.formats.push_back( // Depth buffer format
FormatRequirements{
.format = VK_FORMAT_D16_UNORM,
.properties = VkFormatProperties{
.linearTilingFeatures = {},
.optimalTilingFeatures = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT,
.bufferFeatures = {},
}
}
);
pimpl->device = createDevice(pimpl->instance.instance, deviceRequirements, pimpl->surface);
2022-09-19 21:10:44 +00:00
pimpl->allocator = createAllocator(pimpl->instance.instance, pimpl->device);
2022-09-22 12:15:34 +00:00
pimpl->swapchainInfo.device = pimpl->device.device;
2023-03-13 20:27:47 +00:00
pimpl->swapchainInfo.allocator = pimpl->allocator;
pimpl->swapchainInfo.physicalDevice = pimpl->device.physicalDevice;
pimpl->swapchainInfo.surface = pimpl->surface;
pimpl->swapchainInfo.window = pimpl->window;
pimpl->swapchainInfo.vsync = pimpl->graphicsSettings.vsync;
pimpl->swapchainInfo.waitForPresent = pimpl->graphicsSettings.waitForPresent;
createSwapchain(&pimpl->swapchain, pimpl->swapchainInfo);
2023-03-21 14:47:15 +00:00
2023-03-13 01:19:32 +00:00
/* make synchronisation primitives for rendering and allocate command buffers */
2022-09-19 21:10:44 +00:00
2023-03-13 17:35:22 +00:00
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
2023-03-12 20:39:11 +00:00
VkFenceCreateInfo fenceInfo{
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
2022-10-14 12:56:28 +00:00
.pNext = nullptr,
.flags = VK_FENCE_CREATE_SIGNALED_BIT
2023-03-12 20:39:11 +00:00
};
res = vkCreateFence(pimpl->device.device, &fenceInfo, nullptr, &pimpl->frameData[i].renderFence);
if (res != VK_SUCCESS) throw std::runtime_error("Failed to create fence!");
2022-11-11 16:18:22 +00:00
2023-03-12 20:39:11 +00:00
VkSemaphoreCreateInfo smphInfo{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = nullptr,
.flags = 0
};
2023-03-21 02:31:10 +00:00
VKCHECK(vkCreateSemaphore(pimpl->device.device, &smphInfo, nullptr, &pimpl->frameData[i].transferSemaphore));
VKCHECK(vkCreateSemaphore(pimpl->device.device, &smphInfo, nullptr, &pimpl->frameData[i].renderSemaphore));
VKCHECK(vkCreateSemaphore(pimpl->device.device, &smphInfo, nullptr, &pimpl->frameData[i].presentSemaphore));
2023-03-12 20:39:11 +00:00
VkCommandPoolCreateInfo poolInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0, // Command buffers cannot be individually reset (more performant this way)
.queueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily
};
2023-03-21 02:31:10 +00:00
VKCHECK(vkCreateCommandPool(pimpl->device.device, &poolInfo, nullptr, &pimpl->frameData[i].graphicsPool));
poolInfo.queueFamilyIndex = pimpl->device.queues.transferQueueFamily;
VKCHECK(vkCreateCommandPool(pimpl->device.device, &poolInfo, nullptr, &pimpl->frameData[i].transferPool));
2023-03-12 20:39:11 +00:00
VkCommandBufferAllocateInfo cmdAllocInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = nullptr,
2023-03-21 02:31:10 +00:00
.commandPool = pimpl->frameData[i].graphicsPool,
2023-03-12 20:39:11 +00:00
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1
};
VKCHECK(vkAllocateCommandBuffers(pimpl->device.device, &cmdAllocInfo, &pimpl->frameData[i].drawBuf));
2023-03-21 02:31:10 +00:00
cmdAllocInfo.commandPool = pimpl->frameData[i].transferPool;
VKCHECK(vkAllocateCommandBuffers(pimpl->device.device, &cmdAllocInfo, &pimpl->frameData[i].transferBuf));
2023-03-12 20:39:11 +00:00
}
2023-03-13 01:19:32 +00:00
2023-03-21 02:31:10 +00:00
/* create command pool for one-off transfer operations */
VkCommandPoolCreateInfo transferPoolInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // These command buffers don't last very long
.queueFamilyIndex = pimpl->device.queues.transferQueueFamily
};
VKCHECK(vkCreateCommandPool(pimpl->device.device, &transferPoolInfo, nullptr, &pimpl->transferCommandPool));
2023-03-23 19:07:10 +00:00
/* create command pool for one-off draw queue operations */
VkCommandPoolCreateInfo graphicsPoolInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // These command buffers don't last very long
.queueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily
};
VKCHECK(vkCreateCommandPool(pimpl->device.device, &graphicsPoolInfo, nullptr, &pimpl->graphicsCommandPool));
2023-03-13 01:19:32 +00:00
/* create a global descriptor pool */
std::vector<VkDescriptorPoolSize> poolSizes{};
2023-03-23 19:07:10 +00:00
poolSizes.push_back({ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 100u });
poolSizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 100u });
2023-03-13 01:19:32 +00:00
2023-03-13 17:35:22 +00:00
VkDescriptorPoolCreateInfo descriptorPoolInfo{};
descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptorPoolInfo.pNext = nullptr;
2023-03-13 01:19:32 +00:00
descriptorPoolInfo.flags = 0;
2023-03-23 19:07:10 +00:00
descriptorPoolInfo.maxSets = 1000u;
descriptorPoolInfo.poolSizeCount = (uint32_t)poolSizes.size();
2023-03-13 01:19:32 +00:00
descriptorPoolInfo.pPoolSizes = poolSizes.data();
VKCHECK(vkCreateDescriptorPool(pimpl->device.device, &descriptorPoolInfo, nullptr, &pimpl->descriptorPool));
2022-09-13 18:25:18 +00:00
}
2022-09-21 19:52:26 +00:00
GFXDevice::~GFXDevice()
2022-09-13 18:25:18 +00:00
{
2023-03-13 01:19:32 +00:00
vkDestroyDescriptorPool(pimpl->device.device, pimpl->descriptorPool, nullptr);
2023-03-23 19:07:10 +00:00
vkDestroyCommandPool(pimpl->device.device, pimpl->graphicsCommandPool, nullptr);
vkDestroyCommandPool(pimpl->device.device, pimpl->transferCommandPool, nullptr);
2023-03-13 17:35:22 +00:00
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
2023-03-21 02:31:10 +00:00
vkDestroyCommandPool(pimpl->device.device, pimpl->frameData[i].transferPool, nullptr);
vkDestroyCommandPool(pimpl->device.device, pimpl->frameData[i].graphicsPool, nullptr);
2023-03-12 20:39:11 +00:00
vkDestroySemaphore(pimpl->device.device, pimpl->frameData[i].presentSemaphore, nullptr);
vkDestroySemaphore(pimpl->device.device, pimpl->frameData[i].renderSemaphore, nullptr);
2023-03-21 02:31:10 +00:00
vkDestroySemaphore(pimpl->device.device, pimpl->frameData[i].transferSemaphore, nullptr);
2023-03-12 20:39:11 +00:00
vkDestroyFence(pimpl->device.device, pimpl->frameData[i].renderFence, nullptr);
}
2023-03-13 01:19:32 +00:00
destroySwapchain(pimpl->swapchain);
destroyAllocator(pimpl->allocator);
destroyDevice(pimpl->device);
vkDestroySurfaceKHR(pimpl->instance.instance, pimpl->surface, nullptr);
destroyVulkanInstance(pimpl->instance);
2022-09-13 18:25:18 +00:00
}
2023-03-13 01:19:32 +00:00
void GFXDevice::getViewportSize(uint32_t* w, uint32_t* h)
2022-10-27 22:06:56 +00:00
{
2022-11-07 20:15:26 +00:00
int width, height;
SDL_Vulkan_GetDrawableSize(pimpl->window, &width, &height);
2022-11-20 13:26:52 +00:00
if (width == 0 || height == 0) {
*w = (uint32_t)pimpl->swapchain.extent.width;
*h = (uint32_t)pimpl->swapchain.extent.height;
}
else {
*w = (uint32_t)width;
*h = (uint32_t)height;
}
2022-10-22 12:15:25 +00:00
}
2023-03-12 20:39:11 +00:00
gfx::DrawBuffer* GFXDevice::beginRender()
2022-10-02 12:56:13 +00:00
{
VkResult res;
if (pimpl->FRAMECOUNT == 0) {
// perform clean-up before any rendering
vkResetCommandPool(pimpl->device.device, pimpl->transferCommandPool, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
vkResetCommandPool(pimpl->device.device, pimpl->graphicsCommandPool, VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
}
2023-03-13 17:10:46 +00:00
const uint32_t currentFrameIndex = pimpl->FRAMECOUNT % FRAMES_IN_FLIGHT;
const FrameData frameData = pimpl->frameData[currentFrameIndex];
vmaSetCurrentFrameIndex(pimpl->allocator, (uint32_t)pimpl->FRAMECOUNT);
/* wait until the previous frame RENDERING has finished */
res = vkWaitForFences(pimpl->device.device, 1, &frameData.renderFence, VK_TRUE, 1000000000LL);
VKCHECK(res);
res = vkResetFences(pimpl->device.device, 1, &frameData.renderFence);
VKCHECK(res);
2023-03-21 11:03:20 +00:00
2023-03-21 02:31:10 +00:00
/* perform any pending uniform buffer writes */
VKCHECK(vkResetCommandPool(pimpl->device.device, frameData.transferPool, 0));
2023-03-21 02:31:10 +00:00
VkCommandBufferBeginInfo transferBeginInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr // ignored
};
VKCHECK(vkBeginCommandBuffer(frameData.transferBuf, &transferBeginInfo));
2023-03-15 23:11:24 +00:00
2023-03-21 02:31:10 +00:00
// transfer cmds...
2023-03-18 10:50:00 +00:00
2023-03-21 11:03:20 +00:00
std::vector<VkBufferMemoryBarrier2> barriers{};
for (gfx::UniformBuffer* uniformBuffer : pimpl->uniformBufferWriteQueues[currentFrameIndex]) {
2023-03-21 11:03:20 +00:00
VkBufferCopy copyRegion{};
copyRegion.srcOffset = 0;
copyRegion.dstOffset = 0;
copyRegion.size = uniformBuffer->stagingBuffer.size;
2023-03-21 11:03:20 +00:00
vkCmdCopyBuffer(
frameData.transferBuf,
uniformBuffer->stagingBuffer.buffer,
uniformBuffer->gpuBuffers[currentFrameIndex].buffer,
2023-03-21 11:03:20 +00:00
1,
&copyRegion
);
VkBufferMemoryBarrier2& barrier = barriers.emplace_back();
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2;
barrier.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
barrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
barrier.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT;
2023-03-21 02:31:10 +00:00
barrier.dstAccessMask = 0;
barrier.srcQueueFamilyIndex = pimpl->device.queues.transferQueueFamily;
barrier.dstQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
barrier.buffer = uniformBuffer->gpuBuffers[currentFrameIndex].buffer;
2023-03-21 02:31:10 +00:00
barrier.offset = 0;
barrier.size = uniformBuffer->gpuBuffers[currentFrameIndex].size;
2023-03-21 02:31:10 +00:00
}
pimpl->uniformBufferWriteQueues[currentFrameIndex].clear();
2023-03-15 23:11:24 +00:00
2023-03-21 11:03:20 +00:00
VkDependencyInfo dependencyInfo{};
dependencyInfo.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
dependencyInfo.bufferMemoryBarrierCount = (uint32_t)barriers.size();
dependencyInfo.pBufferMemoryBarriers = barriers.data();
vkCmdPipelineBarrier2(frameData.transferBuf, &dependencyInfo);
2023-03-15 23:11:24 +00:00
2023-03-21 02:31:10 +00:00
VKCHECK(vkEndCommandBuffer(frameData.transferBuf));
2023-03-21 02:31:10 +00:00
VkSubmitInfo transferSubmitInfo{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0, // needs to wait for render but the fence does that
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frameData.transferBuf,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frameData.transferSemaphore,
};
res = vkQueueSubmit(pimpl->device.queues.transferQueues[0], 1, &transferSubmitInfo, VK_NULL_HANDLE);
assert(res == VK_SUCCESS);
2023-03-21 11:03:20 +00:00
2023-03-13 17:10:46 +00:00
uint32_t swapchainImageIndex;
2023-03-12 20:39:11 +00:00
do {
if (pimpl->swapchainIsOutOfDate) {
// re-create swapchain
vkQueueWaitIdle(pimpl->device.queues.drawQueues[0]);
vkQueueWaitIdle(pimpl->device.queues.presentQueue);
createSwapchain(&pimpl->swapchain, pimpl->swapchainInfo);
}
// THIS FUNCTION BLOCKS UNTIL AN IMAGE IS AVAILABLE (it waits for vsync)
res = vkAcquireNextImageKHR(
pimpl->device.device, pimpl->swapchain.swapchain, 1000000000LL,
2023-03-12 20:39:11 +00:00
frameData.presentSemaphore, VK_NULL_HANDLE, &swapchainImageIndex);
2023-03-12 17:11:13 +00:00
if (res != VK_SUBOPTIMAL_KHR && res != VK_ERROR_OUT_OF_DATE_KHR) VKCHECK(res);
if (res == VK_SUCCESS) pimpl->swapchainIsOutOfDate = false;
} while (pimpl->swapchainIsOutOfDate);
/* record command buffer */
2023-03-21 02:31:10 +00:00
res = vkResetCommandPool(pimpl->device.device, frameData.graphicsPool, 0);
2023-03-12 17:11:13 +00:00
VKCHECK(res);
VkCommandBufferBeginInfo beginInfo{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr // ignored
};
2023-03-12 20:39:11 +00:00
res = vkBeginCommandBuffer(frameData.drawBuf, &beginInfo);
2023-03-12 17:11:13 +00:00
VKCHECK(res);
{ // RECORDING
2023-03-21 11:03:20 +00:00
/* change barriers to perform a queue ownership acquire operation */
for (VkBufferMemoryBarrier2& barrier : barriers) {
barrier.srcStageMask = VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT;
barrier.srcAccessMask = 0;
barrier.dstStageMask = VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT;
barrier.dstAccessMask = VK_ACCESS_2_UNIFORM_READ_BIT;
}
vkCmdPipelineBarrier2(frameData.drawBuf, &dependencyInfo);
2023-03-13 20:35:15 +00:00
std::array<VkClearValue, 2> clearValues{}; // Using same value for all components enables compression according to NVIDIA Best Practices
clearValues[0].color.float32[0] = 1.0f;
clearValues[0].color.float32[1] = 1.0f;
clearValues[0].color.float32[2] = 1.0f;
clearValues[0].color.float32[3] = 1.0f;
clearValues[1].depthStencil.depth = 1.0f;
2023-03-13 17:35:22 +00:00
VkRenderPassBeginInfo passBegin{};
passBegin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
passBegin.pNext = nullptr;
passBegin.renderPass = pimpl->swapchain.renderpass;
passBegin.framebuffer = pimpl->swapchain.framebuffers[swapchainImageIndex];
passBegin.renderArea.extent = pimpl->swapchain.extent;
passBegin.renderArea.offset = { 0, 0 };
passBegin.clearValueCount = (uint32_t)clearValues.size();
2023-03-13 20:35:15 +00:00
passBegin.pClearValues = clearValues.data();
2023-03-12 20:39:11 +00:00
vkCmdBeginRenderPass(frameData.drawBuf, &passBegin, VK_SUBPASS_CONTENTS_INLINE);
VkViewport viewport{};
viewport.x = 0.0f;
2022-11-11 16:18:22 +00:00
viewport.y = (float)pimpl->swapchain.extent.height;
viewport.width = (float)pimpl->swapchain.extent.width;
2022-11-11 16:18:22 +00:00
viewport.height = -(float)pimpl->swapchain.extent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
2023-03-12 20:39:11 +00:00
vkCmdSetViewport(frameData.drawBuf, 0, 1, &viewport);
VkRect2D scissor{};
scissor.offset = { 0, 0 };
scissor.extent = pimpl->swapchain.extent;
2023-03-12 20:39:11 +00:00
vkCmdSetScissor(frameData.drawBuf, 0, 1, &scissor);
}
// hand command buffer over to caller
2023-03-21 02:31:10 +00:00
gfx::DrawBuffer* drawBuffer = new gfx::DrawBuffer;
2023-03-12 20:39:11 +00:00
drawBuffer->frameData = frameData;
2023-03-13 17:10:46 +00:00
drawBuffer->currentFrameIndex = currentFrameIndex;
2023-03-12 20:39:11 +00:00
drawBuffer->imageIndex = swapchainImageIndex;
return drawBuffer;
}
2023-03-12 20:39:11 +00:00
void GFXDevice::finishRender(gfx::DrawBuffer* drawBuffer)
{
2023-03-21 02:31:10 +00:00
assert(drawBuffer != nullptr);
2023-03-12 20:39:11 +00:00
uint32_t swapchainImageIndex = drawBuffer->imageIndex;
VkResult res;
2023-03-12 20:39:11 +00:00
vkCmdEndRenderPass(drawBuffer->frameData.drawBuf);
2023-03-12 20:39:11 +00:00
res = vkEndCommandBuffer(drawBuffer->frameData.drawBuf);
2023-03-12 17:11:13 +00:00
VKCHECK(res);
// SUBMIT
2023-03-18 10:50:00 +00:00
std::vector<VkSemaphore> waitSemaphores{};
2023-03-15 23:11:24 +00:00
std::vector<VkPipelineStageFlags> waitDstStageMasks{};
2023-03-18 10:50:00 +00:00
waitSemaphores.push_back(drawBuffer->frameData.presentSemaphore);
2023-03-15 23:11:24 +00:00
waitDstStageMasks.push_back(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
2023-03-21 11:03:20 +00:00
waitSemaphores.push_back(drawBuffer->frameData.transferSemaphore);
waitDstStageMasks.push_back(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT);
VkSubmitInfo submitInfo{
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
2023-03-18 10:50:00 +00:00
.waitSemaphoreCount = (uint32_t)waitSemaphores.size(),
.pWaitSemaphores = waitSemaphores.data(),
2023-03-15 23:11:24 +00:00
.pWaitDstStageMask = waitDstStageMasks.data(),
.commandBufferCount = 1,
2023-03-12 20:39:11 +00:00
.pCommandBuffers = &drawBuffer->frameData.drawBuf,
.signalSemaphoreCount = 1,
2023-03-12 20:39:11 +00:00
.pSignalSemaphores = &drawBuffer->frameData.renderSemaphore,
2023-03-18 10:50:00 +00:00
};
2023-03-12 20:39:11 +00:00
res = vkQueueSubmit(pimpl->device.queues.drawQueues[0], 1, &submitInfo, drawBuffer->frameData.renderFence);
assert(res == VK_SUCCESS);
2023-03-12 20:39:11 +00:00
// VKCHECK(res); // expensive operation for some reason
// PRESENT
VkPresentInfoKHR presentInfo{
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreCount = 1,
2023-03-12 20:39:11 +00:00
.pWaitSemaphores = &drawBuffer->frameData.renderSemaphore,
.swapchainCount = 1,
.pSwapchains = &pimpl->swapchain.swapchain,
.pImageIndices = &swapchainImageIndex,
.pResults = nullptr
};
res = vkQueuePresentKHR(pimpl->device.queues.presentQueue, &presentInfo);
if (res == VK_SUBOPTIMAL_KHR || res == VK_ERROR_OUT_OF_DATE_KHR) {
// flag to re-create the swapchain before next render
pimpl->swapchainIsOutOfDate = true;
}
2023-03-20 20:58:12 +00:00
else if (res != VK_SUCCESS) throw std::runtime_error("Failed to queue present! Code: " + std::to_string(res));
pimpl->FRAMECOUNT++;
2023-03-12 20:39:11 +00:00
delete drawBuffer;
}
void GFXDevice::cmdBindPipeline(gfx::DrawBuffer* drawBuffer, const gfx::Pipeline* pipeline)
{
assert(drawBuffer != nullptr);
vkCmdBindPipeline(drawBuffer->frameData.drawBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->handle);
}
void GFXDevice::cmdBindVertexBuffer(gfx::DrawBuffer* drawBuffer, uint32_t binding, const gfx::Buffer* buffer)
{
assert(drawBuffer != nullptr);
assert(buffer != nullptr);
assert(buffer->type == gfx::BufferType::VERTEX);
const VkDeviceSize offset = 0;
vkCmdBindVertexBuffers(drawBuffer->frameData.drawBuf, binding, 1, &buffer->buffer, &offset);
}
void GFXDevice::cmdBindIndexBuffer(gfx::DrawBuffer* drawBuffer, const gfx::Buffer* buffer)
{
assert(drawBuffer != nullptr);
assert(buffer != nullptr);
assert(buffer->type == gfx::BufferType::INDEX);
vkCmdBindIndexBuffer(drawBuffer->frameData.drawBuf, buffer->buffer, 0, INDEX_TYPE);
}
void GFXDevice::cmdDrawIndexed(gfx::DrawBuffer* drawBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
{
assert(drawBuffer != nullptr);
vkCmdDrawIndexed(drawBuffer->frameData.drawBuf, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
}
2023-03-13 17:10:46 +00:00
void GFXDevice::cmdPushConstants(gfx::DrawBuffer* drawBuffer, const gfx::Pipeline* pipeline, uint32_t offset, uint32_t size, const void* data)
{
assert(drawBuffer != nullptr);
vkCmdPushConstants(drawBuffer->frameData.drawBuf, pipeline->layout, VK_SHADER_STAGE_VERTEX_BIT, offset, size, data);
}
2023-03-13 01:19:32 +00:00
void GFXDevice::cmdBindDescriptorSet(gfx::DrawBuffer* drawBuffer, const gfx::Pipeline* pipeline, const gfx::DescriptorSet* set, uint32_t setNumber)
{
2023-03-13 17:10:46 +00:00
vkCmdBindDescriptorSets(drawBuffer->frameData.drawBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->layout, setNumber, 1, &set->sets[drawBuffer->currentFrameIndex], 0, nullptr);
2023-03-13 01:19:32 +00:00
}
gfx::Pipeline* GFXDevice::createPipeline(const gfx::PipelineInfo& info)
{
2023-01-05 13:21:33 +00:00
[[maybe_unused]] VkResult res;
2022-10-23 23:19:07 +00:00
gfx::Pipeline* pipeline = new gfx::Pipeline;
2023-03-13 01:19:32 +00:00
auto vertShaderCode = util::readTextFile(info.vertShaderPath);
auto fragShaderCode = util::readTextFile(info.fragShaderPath);
2022-11-08 15:34:59 +00:00
2023-03-13 01:19:32 +00:00
VkShaderModule vertShaderModule = compileShader(pimpl->device.device, shaderc_vertex_shader, vertShaderCode->data(), info.vertShaderPath);
VkShaderModule fragShaderModule = compileShader(pimpl->device.device, shaderc_fragment_shader, fragShaderCode->data(), info.fragShaderPath);
2022-10-24 14:16:04 +00:00
2022-10-22 12:15:25 +00:00
// get vertex attrib layout:
VkVertexInputBindingDescription bindingDescription{ };
bindingDescription.binding = 0;
2023-03-13 01:19:32 +00:00
bindingDescription.stride = info.vertexFormat.stride;
2022-10-22 12:15:25 +00:00
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
std::vector<VkVertexInputAttributeDescription> attribDescs{};
2023-03-13 01:19:32 +00:00
attribDescs.reserve(info.vertexFormat.attributeDescriptions.size());
for (const auto& desc : info.vertexFormat.attributeDescriptions) {
2022-10-22 12:15:25 +00:00
VkVertexInputAttributeDescription vulkanAttribDesc{};
vulkanAttribDesc.location = desc.location;
vulkanAttribDesc.binding = 0;
2023-03-21 15:21:24 +00:00
vulkanAttribDesc.format = converters::getVertexAttribFormat(desc.format);
vulkanAttribDesc.offset = desc.offset;
2022-10-22 12:15:25 +00:00
attribDescs.push_back(vulkanAttribDesc);
}
2023-01-05 13:21:33 +00:00
VkPipelineShaderStageCreateInfo vertShaderStageInfo{};
vertShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
vertShaderStageInfo.pSpecializationInfo = nullptr;
2023-01-05 13:21:33 +00:00
VkPipelineShaderStageCreateInfo fragShaderStageInfo{};
fragShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
fragShaderStageInfo.pSpecializationInfo = nullptr;
VkPipelineShaderStageCreateInfo shaderStages[2] = { vertShaderStageInfo, fragShaderStageInfo };
2023-03-12 17:11:13 +00:00
// set the vertex input layout
VkPipelineVertexInputStateCreateInfo vertexInputInfo{};
vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
2022-10-22 12:15:25 +00:00
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
2022-11-28 15:02:08 +00:00
vertexInputInfo.vertexAttributeDescriptionCount = (uint32_t)attribDescs.size();
2022-10-22 12:15:25 +00:00
vertexInputInfo.pVertexAttributeDescriptions = attribDescs.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly{};
inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport{};
viewport.x = 0.0f;
2022-11-11 16:18:22 +00:00
viewport.y = (float)pimpl->swapchain.extent.height;
viewport.width = (float)pimpl->swapchain.extent.width;
2022-11-11 16:18:22 +00:00
viewport.height = -(float)pimpl->swapchain.extent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor{};
scissor.offset = { 0, 0 };
scissor.extent = pimpl->swapchain.extent;
2023-03-12 17:11:13 +00:00
// Dynamic states removes the need to re-create pipelines whenever the window size changes
std::vector<VkDynamicState> dynamicStates = {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR
};
VkPipelineDynamicStateCreateInfo dynamicState{};
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
2022-11-28 15:02:08 +00:00
dynamicState.dynamicStateCount = (uint32_t)dynamicStates.size();
dynamicState.pDynamicStates = dynamicStates.data();
VkPipelineViewportStateCreateInfo viewportState{};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer{};
rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE; // enabling this will not run the fragment shaders at all
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
2023-03-13 01:19:32 +00:00
if (info.backfaceCulling == true) {
2022-11-27 14:35:41 +00:00
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
}
else {
rasterizer.cullMode = VK_CULL_MODE_NONE;
}
2022-10-31 16:21:07 +00:00
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
rasterizer.depthBiasConstantFactor = 0.0f; // ignored
rasterizer.depthBiasClamp = 0.0f; // ignored
rasterizer.depthBiasSlopeFactor = 0.0f; // ignored
VkPipelineMultisampleStateCreateInfo multisampling{};
multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
2023-03-12 17:11:13 +00:00
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.minSampleShading = 1.0f; // ignored
multisampling.pSampleMask = nullptr; // ignored
multisampling.alphaToCoverageEnable = VK_FALSE; // ignored
multisampling.alphaToOneEnable = VK_FALSE; // ignored
VkPipelineColorBlendAttachmentState colorBlendAttachment{};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT |
VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT |
VK_COLOR_COMPONENT_A_BIT;
2023-03-13 01:19:32 +00:00
if (info.alphaBlending) {
2022-11-27 14:35:41 +00:00
colorBlendAttachment.blendEnable = VK_TRUE;
colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
colorBlendAttachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD;
colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
colorBlendAttachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD;
}
else {
colorBlendAttachment.blendEnable = VK_FALSE;
}
2023-03-13 01:19:32 +00:00
VkPipelineColorBlendStateCreateInfo colorBlending{};
colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY; // ignored
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f; // ignored
colorBlending.blendConstants[1] = 0.0f; // ignored
colorBlending.blendConstants[2] = 0.0f; // ignored
colorBlending.blendConstants[3] = 0.0f; // ignored
2022-10-31 16:21:07 +00:00
VkPipelineDepthStencilStateCreateInfo depthStencil{};
depthStencil.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depthStencil.depthTestEnable = VK_TRUE;
depthStencil.depthWriteEnable = VK_TRUE;
2023-03-23 19:07:10 +00:00
depthStencil.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
2022-10-31 16:21:07 +00:00
depthStencil.depthBoundsTestEnable = VK_FALSE;
depthStencil.minDepthBounds = 0.0f;
depthStencil.maxDepthBounds = 1.0f;
depthStencil.stencilTestEnable = VK_FALSE;
depthStencil.front = {};
depthStencil.back = {};
VkPushConstantRange pushConstantRange{};
pushConstantRange.offset = 0;
pushConstantRange.size = PUSH_CONSTANT_MAX_SIZE;
pushConstantRange.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
2023-03-13 01:19:32 +00:00
std::vector<VkDescriptorSetLayout> descriptorSetLayouts(info.descriptorSetLayouts.size());
for (size_t i = 0; i < descriptorSetLayouts.size(); i++) {
descriptorSetLayouts[i] = info.descriptorSetLayouts[i]->layout;
}
VkPipelineLayoutCreateInfo layoutInfo{};
layoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layoutInfo.setLayoutCount = (uint32_t)descriptorSetLayouts.size();
2023-03-13 01:19:32 +00:00
layoutInfo.pSetLayouts = descriptorSetLayouts.data();
2022-10-31 16:21:07 +00:00
layoutInfo.pushConstantRangeCount = 1;
layoutInfo.pPushConstantRanges = &pushConstantRange;
2023-03-12 17:11:13 +00:00
res = vkCreatePipelineLayout(pimpl->device.device, &layoutInfo, nullptr, &pipeline->layout);
assert(res == VK_SUCCESS);
VkGraphicsPipelineCreateInfo createInfo{};
createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
createInfo.stageCount = 2;
createInfo.pStages = shaderStages;
createInfo.pVertexInputState = &vertexInputInfo;
createInfo.pInputAssemblyState = &inputAssembly;
2023-03-12 17:11:13 +00:00
createInfo.pViewportState = &viewportState; // TODO: maybe this isn't needed?
createInfo.pRasterizationState = &rasterizer;
createInfo.pMultisampleState = &multisampling;
2022-10-31 16:21:07 +00:00
createInfo.pDepthStencilState = &depthStencil;
createInfo.pColorBlendState = &colorBlending;
createInfo.pDynamicState = &dynamicState;
2022-10-23 23:19:07 +00:00
createInfo.layout = pipeline->layout;
createInfo.renderPass = pimpl->swapchain.renderpass;
createInfo.subpass = 0;
createInfo.basePipelineHandle = VK_NULL_HANDLE;
createInfo.basePipelineIndex = -1;
2023-03-12 17:11:13 +00:00
res = vkCreateGraphicsPipelines(pimpl->device.device, VK_NULL_HANDLE, 1, &createInfo, nullptr, &pipeline->handle);
assert(res == VK_SUCCESS);
2023-03-12 17:11:13 +00:00
vkDestroyShaderModule(pimpl->device.device, fragShaderModule, nullptr);
vkDestroyShaderModule(pimpl->device.device, vertShaderModule, nullptr);
2022-10-23 23:19:07 +00:00
return pipeline;
}
void GFXDevice::destroyPipeline(const gfx::Pipeline* pipeline)
{
2023-03-12 17:11:13 +00:00
vkDestroyPipeline(pimpl->device.device, pipeline->handle, nullptr);
vkDestroyPipelineLayout(pimpl->device.device, pipeline->layout, nullptr);
2022-10-23 23:19:07 +00:00
delete pipeline;
2022-10-02 12:56:13 +00:00
}
gfx::DescriptorSetLayout* GFXDevice::createDescriptorSetLayout(const std::vector<gfx::DescriptorSetLayoutBinding>& bindings)
2023-03-13 01:19:32 +00:00
{
gfx::DescriptorSetLayout* out = new gfx::DescriptorSetLayout{};
std::vector<VkDescriptorSetLayoutBinding> vulkanBindings{};
uint32_t i = 0;
for (const auto& binding : bindings) {
auto& vulkanBinding = vulkanBindings.emplace_back();
vulkanBinding.binding = i; // This should be as low as possible to avoid wasting memory
vulkanBinding.descriptorType = converters::getDescriptorType(binding.descriptorType);
vulkanBinding.descriptorCount = 1; // if > 1, accessible as an array in the shader
vulkanBinding.stageFlags = converters::getShaderStageFlags(binding.stageFlags);
++i;
}
2023-03-13 01:19:32 +00:00
2023-03-13 17:35:22 +00:00
VkDescriptorSetLayoutCreateInfo info{};
info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
info.pNext = nullptr;
2023-03-13 01:19:32 +00:00
info.flags = 0;
info.bindingCount = (uint32_t)vulkanBindings.size();
info.pBindings = vulkanBindings.data();
2023-03-13 01:19:32 +00:00
VKCHECK(vkCreateDescriptorSetLayout(pimpl->device.device, &info, nullptr, &out->layout));
return out;
}
void GFXDevice::destroyDescriptorSetLayout(const gfx::DescriptorSetLayout* layout)
{
vkDestroyDescriptorSetLayout(pimpl->device.device, layout->layout, nullptr);
delete layout;
}
gfx::DescriptorSet* GFXDevice::allocateDescriptorSet(const gfx::DescriptorSetLayout* layout)
{
gfx::DescriptorSet* set = new gfx::DescriptorSet{};
2023-03-13 17:10:46 +00:00
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
VkDescriptorSetAllocateInfo allocInfo{
2023-03-13 01:19:32 +00:00
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = pimpl->descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &layout->layout
2023-03-13 17:10:46 +00:00
};
VkResult res;
res = vkAllocateDescriptorSets(pimpl->device.device, &allocInfo, &set->sets[i]);
if (res == VK_ERROR_FRAGMENTED_POOL) throw std::runtime_error("Descriptor pool is fragmented!");
if (res == VK_ERROR_OUT_OF_POOL_MEMORY) throw std::runtime_error("Descriptor pool is out of memory!");
VKCHECK(res);
}
2023-03-13 01:19:32 +00:00
return set;
}
void GFXDevice::updateDescriptorUniformBuffer(const gfx::DescriptorSet* set, uint32_t binding, const gfx::UniformBuffer* buffer, size_t offset, size_t range)
2023-03-13 01:19:32 +00:00
{
2023-03-13 17:10:46 +00:00
assert(pimpl->FRAMECOUNT == 0);
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
VkDescriptorBufferInfo bufferInfo{
2023-03-21 15:21:24 +00:00
.buffer = buffer->gpuBuffers[i].buffer,
.offset = offset,
.range = range
2023-03-13 17:10:46 +00:00
};
VkWriteDescriptorSet descriptorWrite{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = set->sets[i],
.dstBinding = binding,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pImageInfo = nullptr,
.pBufferInfo = &bufferInfo,
.pTexelBufferView = nullptr
};
vkUpdateDescriptorSets(pimpl->device.device, 1, &descriptorWrite, 0, nullptr);
}
2023-03-13 01:19:32 +00:00
}
2023-03-21 23:52:52 +00:00
void GFXDevice::updateDescriptorCombinedImageSampler(const gfx::DescriptorSet *set, uint32_t binding, const gfx::Image* image, const gfx::Sampler* sampler)
{
assert(pimpl->FRAMECOUNT == 0);
VkDescriptorImageInfo imageInfo{};
2023-03-21 23:52:52 +00:00
imageInfo.sampler = sampler->sampler;
imageInfo.imageView = image->view;
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
VkWriteDescriptorSet descriptorWrite{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = set->sets[i],
.dstBinding = binding,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &imageInfo,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr
};
vkUpdateDescriptorSets(pimpl->device.device, 1, &descriptorWrite, 0, nullptr);
}
}
gfx::UniformBuffer* GFXDevice::createUniformBuffer(uint64_t size, const void* initialData)
2022-10-31 16:21:07 +00:00
{
gfx::UniformBuffer* out = new gfx::UniformBuffer{};
2023-03-13 17:10:46 +00:00
/* first make staging buffer */
out->stagingBuffer.size = size;
out->stagingBuffer.type = gfx::BufferType::UNIFORM;
{
VkBufferCreateInfo stagingBufferInfo{};
stagingBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
stagingBufferInfo.size = out->stagingBuffer.size;
stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
stagingBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
stagingBufferInfo.flags = 0;
2023-03-13 17:10:46 +00:00
VmaAllocationCreateInfo stagingAllocInfo{};
stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
stagingAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
2022-11-07 20:15:26 +00:00
2023-03-13 17:10:46 +00:00
VKCHECK(vmaCreateBuffer(pimpl->allocator, &stagingBufferInfo, &stagingAllocInfo, &out->stagingBuffer.buffer, &out->stagingBuffer.allocation, nullptr));
2022-10-31 16:21:07 +00:00
2023-03-13 17:10:46 +00:00
void* dataDest;
VKCHECK(vmaMapMemory(pimpl->allocator, out->stagingBuffer.allocation, &dataDest));
memcpy(dataDest, initialData, out->stagingBuffer.size);
vmaUnmapMemory(pimpl->allocator, out->stagingBuffer.allocation);
}
/* create the device-local set of buffers */
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
out->gpuBuffers[i].size = out->stagingBuffer.size;
out->gpuBuffers[i].type = gfx::BufferType::UNIFORM;
VkBufferCreateInfo gpuBufferInfo{};
gpuBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
gpuBufferInfo.size = out->gpuBuffers[i].size;
gpuBufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
gpuBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
gpuBufferInfo.flags = 0;
VmaAllocationCreateInfo gpuAllocationInfo{};
gpuAllocationInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
gpuAllocationInfo.flags = 0;
VKCHECK(vmaCreateBuffer(pimpl->allocator, &gpuBufferInfo, &gpuAllocationInfo, &out->gpuBuffers[i].buffer, &out->gpuBuffers[i].allocation, nullptr));
/* copy staging buffer into both */
copyBuffer(pimpl->device.device, pimpl->transferCommandPool, pimpl->device.queues.transferQueues[0], out->stagingBuffer.buffer, out->gpuBuffers[i].buffer, out->stagingBuffer.size);
2023-03-13 17:10:46 +00:00
}
return out;
}
void GFXDevice::destroyUniformBuffer(const gfx::UniformBuffer* uniformBuffer)
2023-03-13 17:10:46 +00:00
{
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
vmaDestroyBuffer(pimpl->allocator, uniformBuffer->gpuBuffers[i].buffer, uniformBuffer->gpuBuffers[i].allocation);
2023-03-13 17:10:46 +00:00
}
vmaDestroyBuffer(pimpl->allocator, uniformBuffer->stagingBuffer.buffer, uniformBuffer->stagingBuffer.allocation);
2023-03-13 17:10:46 +00:00
delete uniformBuffer;
2023-03-13 17:10:46 +00:00
}
void GFXDevice::writeUniformBuffer(gfx::UniformBuffer* buffer, uint64_t offset, uint64_t size, const void* data)
2023-03-13 17:10:46 +00:00
{
assert(offset + size <= buffer->stagingBuffer.size);
/* first update the staging buffer */
void* dataDest;
VKCHECK(vmaMapMemory(pimpl->allocator, buffer->stagingBuffer.allocation, &dataDest));
memcpy(dataDest, (uint8_t*)data + offset, size);
vmaUnmapMemory(pimpl->allocator, buffer->stagingBuffer.allocation);
/* queue the writes to each gpu buffer */
// This is required as buffers cannot be updated if they are currently in use
for (uint32_t i = 0; i < FRAMES_IN_FLIGHT; i++) {
pimpl->uniformBufferWriteQueues[i].insert(buffer);
2022-10-31 16:21:07 +00:00
}
2023-03-13 01:19:32 +00:00
2022-10-31 16:21:07 +00:00
}
2022-10-24 00:10:48 +00:00
gfx::Buffer* GFXDevice::createBuffer(gfx::BufferType type, uint64_t size, const void* data)
2022-10-21 16:03:36 +00:00
{
2023-01-05 13:21:33 +00:00
[[maybe_unused]] VkResult res;
2022-10-23 11:05:09 +00:00
2022-10-24 00:10:48 +00:00
auto out = new gfx::Buffer{};
2022-10-22 12:15:25 +00:00
out->size = size;
2022-10-24 00:10:48 +00:00
out->type = type;
2022-10-23 11:05:09 +00:00
VkBuffer stagingBuffer;
VmaAllocation stagingAllocation;
2022-10-21 16:03:36 +00:00
2022-10-23 11:05:09 +00:00
// first create the staging buffer
{
2023-01-05 13:21:33 +00:00
VkBufferCreateInfo stagingBufferInfo{};
stagingBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
2022-10-23 11:05:09 +00:00
stagingBufferInfo.size = out->size;
stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
stagingBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
stagingBufferInfo.flags = 0;
VmaAllocationCreateInfo stagingAllocInfo{};
stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
stagingAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
2023-03-21 15:21:24 +00:00
VKCHECK(vmaCreateBuffer(pimpl->allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr));
2022-10-21 16:03:36 +00:00
2022-10-24 00:10:48 +00:00
void* dataDest;
2023-03-21 15:21:24 +00:00
VKCHECK(vmaMapMemory(pimpl->allocator, stagingAllocation, &dataDest));
2022-10-24 00:10:48 +00:00
memcpy(dataDest, data, out->size);
2022-10-23 11:05:09 +00:00
vmaUnmapMemory(pimpl->allocator, stagingAllocation);
}
2022-10-21 16:03:36 +00:00
2022-10-23 11:05:09 +00:00
// create the actual buffer on the GPU
{
2023-01-05 13:21:33 +00:00
VkBufferCreateInfo gpuBufferInfo{};
gpuBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
2022-10-23 11:05:09 +00:00
gpuBufferInfo.size = out->size;
2023-03-21 15:21:24 +00:00
gpuBufferInfo.usage = converters::getBufferUsageFlag(type) | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
2022-10-23 11:05:09 +00:00
gpuBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
gpuBufferInfo.flags = 0;
2023-03-13 01:19:32 +00:00
2022-10-23 11:05:09 +00:00
VmaAllocationCreateInfo gpuAllocationInfo{};
gpuAllocationInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
gpuAllocationInfo.flags = 0;
2023-03-21 15:21:24 +00:00
VKCHECK(vmaCreateBuffer(pimpl->allocator, &gpuBufferInfo, &gpuAllocationInfo, &out->buffer, &out->allocation, nullptr));
2022-10-23 11:05:09 +00:00
}
// copy the data from the staging buffer to the gpu buffer
copyBuffer(pimpl->device.device, pimpl->transferCommandPool, pimpl->device.queues.transferQueues[0], stagingBuffer, out->buffer, out->size);
2022-10-23 11:05:09 +00:00
// destroy staging buffer
vmaDestroyBuffer(pimpl->allocator, stagingBuffer, stagingAllocation);
2022-10-21 16:03:36 +00:00
return out;
2022-10-21 16:03:36 +00:00
}
2022-10-24 00:10:48 +00:00
void GFXDevice::destroyBuffer(const gfx::Buffer* buffer)
2022-10-08 11:28:36 +00:00
{
2022-10-21 16:03:36 +00:00
vmaDestroyBuffer(pimpl->allocator, buffer->buffer, buffer->allocation);
delete buffer;
2022-10-08 11:28:36 +00:00
}
2023-03-22 10:42:07 +00:00
// imageData must have pixel format R8G8B8A8_SRGB
gfx::Image* GFXDevice::createImage(uint32_t w, uint32_t h, const void* imageData)
{
2023-03-22 10:42:07 +00:00
assert(imageData != nullptr);
2023-03-21 23:52:52 +00:00
assert(pimpl->FRAMECOUNT == 0);
gfx::Image* out = new gfx::Image{};
2023-03-30 10:20:25 +00:00
uint32_t mipLevels = static_cast<uint32_t>(std::floor(std::log2(std::max(w, h)))) + 1;
VkFormat imageFormat = VK_FORMAT_R8G8B8A8_SRGB;
2023-03-22 10:42:07 +00:00
VkBuffer stagingBuffer = VK_NULL_HANDLE;
VmaAllocation stagingAllocation = VK_NULL_HANDLE;
VkDeviceSize stagingBufferSize = (VkDeviceSize)w * (VkDeviceSize)h * 4;
/* create staging buffer */
{
VkBufferCreateInfo stagingBufferInfo{};
stagingBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
stagingBufferInfo.size = stagingBufferSize;
stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
stagingBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
stagingBufferInfo.flags = 0;
VmaAllocationCreateInfo stagingAllocInfo{};
stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
stagingAllocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VKCHECK(vmaCreateBuffer(pimpl->allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr));
void* dataDest;
VKCHECK(vmaMapMemory(pimpl->allocator, stagingAllocation, &dataDest));
memcpy(dataDest, imageData, stagingBufferSize);
vmaUnmapMemory(pimpl->allocator, stagingAllocation);
}
2023-03-21 23:52:52 +00:00
VkImageCreateInfo imageInfo{};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.flags = 0;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
2023-03-30 10:20:25 +00:00
imageInfo.format = imageFormat;
2023-03-21 23:52:52 +00:00
imageInfo.extent.width = w;
imageInfo.extent.height = h;
imageInfo.extent.depth = 1;
2023-03-30 10:20:25 +00:00
imageInfo.mipLevels = mipLevels;
2023-03-21 23:52:52 +00:00
imageInfo.arrayLayers = 1;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
2023-03-30 10:20:25 +00:00
imageInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2023-03-21 23:52:52 +00:00
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VmaAllocationCreateInfo allocCreateInfo{};
allocCreateInfo.flags = 0;
allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
allocCreateInfo.priority = 0.5f;
VKCHECK(vmaCreateImage(pimpl->allocator, &imageInfo, &allocCreateInfo, &out->image, &out->allocation, nullptr));
VkImageViewCreateInfo viewInfo{};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = out->image;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2023-03-30 10:20:25 +00:00
viewInfo.format = imageFormat;
2023-03-21 23:52:52 +00:00
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
2023-03-30 10:20:25 +00:00
viewInfo.subresourceRange.levelCount = mipLevels;
2023-03-21 23:52:52 +00:00
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
VKCHECK(vkCreateImageView(pimpl->device.device, &viewInfo, nullptr, &out->view));
2023-03-30 10:20:25 +00:00
/* begin command buffer */
2023-03-21 23:52:52 +00:00
VkCommandBufferAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
2023-03-23 19:07:10 +00:00
allocInfo.commandPool = pimpl->graphicsCommandPool;
2023-03-21 23:52:52 +00:00
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
VKCHECK(vkAllocateCommandBuffers(pimpl->device.device, &allocInfo, &commandBuffer));
{ // record the command buffer
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
VKCHECK(vkBeginCommandBuffer(commandBuffer, &beginInfo));
2023-03-30 10:20:25 +00:00
// barrier: (all mip levels): UNDEFINED -> TRANSFER_DST_OPTIMAL
// Used for copying staging buffer AND blitting mipmaps
// Must happen before vkCmdCopyBufferToImage performs a TRANSFER_WRITE in the COPY stage.
VkImageMemoryBarrier2 beforeCopyBarrier{};
beforeCopyBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
beforeCopyBarrier.srcStageMask = VK_PIPELINE_STAGE_2_NONE;
beforeCopyBarrier.srcAccessMask = VK_ACCESS_2_NONE;
beforeCopyBarrier.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT;
beforeCopyBarrier.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
beforeCopyBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
beforeCopyBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
beforeCopyBarrier.srcQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
beforeCopyBarrier.dstQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
beforeCopyBarrier.image = out->image;
beforeCopyBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
beforeCopyBarrier.subresourceRange.baseMipLevel = 0;
beforeCopyBarrier.subresourceRange.levelCount = mipLevels;
beforeCopyBarrier.subresourceRange.baseArrayLayer = 0;
beforeCopyBarrier.subresourceRange.layerCount = 1;
VkDependencyInfo beforeCopyDependency{};
beforeCopyDependency.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
beforeCopyDependency.imageMemoryBarrierCount = 1;
beforeCopyDependency.pImageMemoryBarriers = &beforeCopyBarrier;
vkCmdPipelineBarrier2(commandBuffer, &beforeCopyDependency);
// copy staging buffer to mipLevel 0 (full res image)
2023-03-22 10:42:07 +00:00
VkBufferImageCopy region{};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
2023-03-30 10:20:25 +00:00
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
2023-03-22 10:42:07 +00:00
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset.x = 0;
region.imageOffset.y = 0;
region.imageOffset.z = 0;
region.imageExtent = imageInfo.extent;
vkCmdCopyBufferToImage(commandBuffer, stagingBuffer, out->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2023-03-30 10:20:25 +00:00
int32_t mipWidth = w;
int32_t mipHeight = h;
for (uint32_t i = 1; i < mipLevels; i++) {
// barrier: (i - 1) TRANSFER_DST_OPTIMAL -> TRANSFER_SRC_OPTIMAL
// Must happen after TRANSFER_WRITE in the COPY stage and BLIT stage.
VkImageMemoryBarrier2 beforeBlitBarrier{};
beforeBlitBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
beforeBlitBarrier.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT | VK_PIPELINE_STAGE_2_BLIT_BIT;
beforeBlitBarrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
beforeBlitBarrier.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT;
beforeBlitBarrier.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT;
beforeBlitBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
beforeBlitBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
beforeBlitBarrier.srcQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
beforeBlitBarrier.dstQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
beforeBlitBarrier.image = out->image;
beforeBlitBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
beforeBlitBarrier.subresourceRange.baseMipLevel = (i - 1);
beforeBlitBarrier.subresourceRange.levelCount = 1;
beforeBlitBarrier.subresourceRange.baseArrayLayer = 0;
beforeBlitBarrier.subresourceRange.layerCount = 1;
VkDependencyInfo beforeBlitDependency{};
beforeBlitDependency.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
beforeBlitDependency.imageMemoryBarrierCount = 1;
beforeBlitDependency.pImageMemoryBarriers = &beforeBlitBarrier;
vkCmdPipelineBarrier2(commandBuffer, &beforeBlitDependency);
VkImageBlit blit{};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.mipLevel = i - 1;
blit.srcSubresource.baseArrayLayer = 0;
blit.srcSubresource.layerCount = 1;
blit.srcOffsets[0] = { 0, 0, 0 };
blit.srcOffsets[1] = { mipWidth, mipHeight, 1 };
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.mipLevel = i;
blit.dstSubresource.baseArrayLayer = 0;
blit.dstSubresource.layerCount = 1;
blit.dstOffsets[0] = { 0, 0, 0};
blit.dstOffsets[1] = { mipWidth > 1 ? mipWidth / 2 : 1, mipHeight > 1 ? mipHeight / 2 : 1, 1 };
vkCmdBlitImage(commandBuffer,
out->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
out->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &blit, VK_FILTER_LINEAR);
// barrier: (i - 1) TRANSFER_SRC_OPTIMAL -> SHADER_READ_ONLY_OPTIMALs
// Must happen after usage in the BLIT stage.
// Must happen before SHADER_SAMPLED_READ in the FRAGMENT_SHADER stage
VkImageMemoryBarrier2 afterBlitBarrier{};
afterBlitBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
afterBlitBarrier.srcStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT;
afterBlitBarrier.srcAccessMask = 0;
afterBlitBarrier.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
afterBlitBarrier.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT;
afterBlitBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
afterBlitBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
afterBlitBarrier.srcQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
afterBlitBarrier.dstQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
afterBlitBarrier.image = out->image;
afterBlitBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
afterBlitBarrier.subresourceRange.baseMipLevel = (i - 1);
afterBlitBarrier.subresourceRange.levelCount = 1;
afterBlitBarrier.subresourceRange.baseArrayLayer = 0;
afterBlitBarrier.subresourceRange.layerCount = 1;
VkDependencyInfo afterBlitDependency{};
afterBlitDependency.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
afterBlitDependency.imageMemoryBarrierCount = 1;
afterBlitDependency.pImageMemoryBarriers = &afterBlitBarrier;
vkCmdPipelineBarrier2(commandBuffer, &afterBlitDependency);
if (mipWidth > 1) mipWidth /= 2;
if (mipHeight > 2) mipHeight /= 2;
}
2023-03-22 10:42:07 +00:00
2023-03-30 10:20:25 +00:00
// Final mipLevel is never transitioned from TRANSFER_DST_OPTIMAL
// barrier: (mipLevels - 1) TRANSFER_DST_OPTIMAL -> SHADER_READ_ONLY_OPTIMAL
VkImageMemoryBarrier2 finalBlitBarrier{};
finalBlitBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2;
finalBlitBarrier.srcStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT | VK_PIPELINE_STAGE_2_COPY_BIT;
finalBlitBarrier.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT;
finalBlitBarrier.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT;
finalBlitBarrier.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT;
finalBlitBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
finalBlitBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
finalBlitBarrier.srcQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
finalBlitBarrier.dstQueueFamilyIndex = pimpl->device.queues.presentAndDrawQueueFamily;
finalBlitBarrier.image = out->image;
finalBlitBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
finalBlitBarrier.subresourceRange.baseMipLevel = (mipLevels - 1);
finalBlitBarrier.subresourceRange.levelCount = 1;
finalBlitBarrier.subresourceRange.baseArrayLayer = 0;
finalBlitBarrier.subresourceRange.layerCount = 1;
VkDependencyInfo afterBlitDependency{};
afterBlitDependency.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO;
afterBlitDependency.imageMemoryBarrierCount = 1;
afterBlitDependency.pImageMemoryBarriers = &finalBlitBarrier;
vkCmdPipelineBarrier2(commandBuffer, &afterBlitDependency);
2023-03-22 10:42:07 +00:00
2023-03-21 23:52:52 +00:00
VKCHECK(vkEndCommandBuffer(commandBuffer));
}
// submit
VkSubmitInfo submitInfo{};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
2023-03-23 19:07:10 +00:00
VKCHECK(vkQueueSubmit(pimpl->device.queues.drawQueues[0], 1, &submitInfo, VK_NULL_HANDLE));
2023-03-21 23:52:52 +00:00
2023-03-23 19:07:10 +00:00
VKCHECK(vkQueueWaitIdle(pimpl->device.queues.drawQueues[0]));
2023-03-21 23:52:52 +00:00
2023-03-23 19:07:10 +00:00
vkFreeCommandBuffers(pimpl->device.device, pimpl->graphicsCommandPool, 1, &commandBuffer);
2023-03-21 23:52:52 +00:00
2023-03-22 10:42:07 +00:00
vmaDestroyBuffer(pimpl->allocator, stagingBuffer, stagingAllocation);
2023-03-21 23:52:52 +00:00
return out;
}
2023-03-21 23:52:52 +00:00
void GFXDevice::destroyImage(const gfx::Image *image)
{
vkDestroyImageView(pimpl->device.device, image->view, nullptr);
vmaDestroyImage(pimpl->allocator, image->image, image->allocation);
delete image;
}
const gfx::Sampler *GFXDevice::createSampler(const gfx::SamplerInfo& info)
2023-03-21 23:52:52 +00:00
{
gfx::Sampler* out = new gfx::Sampler{};
VkSamplerCreateInfo samplerInfo{};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = converters::getFilter(info.magnify);
samplerInfo.minFilter = converters::getFilter(info.minify);
samplerInfo.mipmapMode = converters::getSamplerMipmapMode(info.mipmap);
2023-03-21 23:52:52 +00:00
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
2023-03-23 19:07:10 +00:00
samplerInfo.mipLodBias = 0.0f;
samplerInfo.anisotropyEnable = info.anisotropicFiltering ? VK_TRUE : VK_FALSE;
2023-03-30 10:30:01 +00:00
samplerInfo.maxAnisotropy = pimpl->device.properties.limits.maxSamplerAnisotropy;
2023-03-21 23:52:52 +00:00
samplerInfo.minLod = 0.0f;
2023-03-23 19:07:10 +00:00
samplerInfo.maxLod = VK_LOD_CLAMP_NONE;
2023-03-21 23:52:52 +00:00
VKCHECK(vkCreateSampler(pimpl->device.device, &samplerInfo, nullptr, &out->sampler));
return out;
}
void GFXDevice::destroySampler(const gfx::Sampler *sampler)
{
vkDestroySampler(pimpl->device.device, sampler->sampler, nullptr);
delete sampler;
}
void GFXDevice::logPerformanceInfo()
{
VmaTotalStatistics pStats{};
vmaCalculateStatistics(pimpl->allocator, &pStats);
VkPhysicalDeviceMemoryProperties2 memProps{};
memProps.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2;
vkGetPhysicalDeviceMemoryProperties2(pimpl->device.physicalDevice, &memProps);
LOG_INFO("GPU Memory Statistics:");
for (uint32_t i = 0; i < memProps.memoryProperties.memoryHeapCount; i++) {
const VmaStatistics& statistics = pStats.memoryType[i].statistics;
VkMemoryHeap heap = memProps.memoryProperties.memoryHeaps[i];
2023-03-21 14:47:15 +00:00
LOG_INFO("Memory heap {}", i);
if (heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) {
LOG_INFO(" DEVICE_LOCAL");
}
2023-03-21 15:21:24 +00:00
LOG_INFO(" Memory blocks allocated: {} ({} MiB)", statistics.blockCount, statistics.allocationBytes / (1024 * 1024));
LOG_INFO(" Number of allocations: {} ({} MiB)", statistics.allocationCount, statistics.allocationBytes / (1024 * 1024));
LOG_INFO(" Max size: {} MiB", heap.size / (1024 * 1024));
}
}
2023-03-13 17:10:46 +00:00
uint64_t GFXDevice::getFrameCount()
{
return pimpl->FRAMECOUNT;
}
void GFXDevice::waitIdle()
{
vkDeviceWaitIdle(pimpl->device.device);
}
2023-03-23 19:07:10 +00:00
}