diff --git a/src/pt/hybrid_renderer.cpp b/src/pt/hybrid_renderer.cpp index f3de10a..7cb8736 100644 --- a/src/pt/hybrid_renderer.cpp +++ b/src/pt/hybrid_renderer.cpp @@ -68,7 +68,6 @@ HybridRenderer::HybridRenderer( mAlbedoTextureView(nullptr), mNormalTexture(nullptr), mNormalTextureView(nullptr), - mGbufferSampler(nullptr), mGbufferBindGroupLayout(), mGbufferBindGroup(), mGbufferPass(gpuContext, rendererDesc), @@ -132,49 +131,25 @@ HybridRenderer::HybridRenderer( mNormalTexture, "Gbuffer normal texture view", NORMAL_TEXTURE_FORMAT); NLRS_ASSERT(mNormalTextureView != nullptr); - { - const WGPUSamplerDescriptor desc{ - .nextInChain = nullptr, - .label = "Gbuffer sampler", - .addressModeU = WGPUAddressMode_ClampToEdge, - .addressModeV = WGPUAddressMode_ClampToEdge, - .addressModeW = WGPUAddressMode_ClampToEdge, - .magFilter = WGPUFilterMode_Nearest, - .minFilter = WGPUFilterMode_Nearest, - .mipmapFilter = WGPUMipmapFilterMode_Nearest, - .lodMinClamp = 0.f, - .lodMaxClamp = 32.f, - .compare = WGPUCompareFunction_Undefined, - .maxAnisotropy = 1, - }; - mGbufferSampler = wgpuDeviceCreateSampler(gpuContext.device, &desc); - NLRS_ASSERT(mGbufferSampler != nullptr); - } - mGbufferBindGroupLayout = GpuBindGroupLayout{ gpuContext.device, "Gbuffer bind group layout", - std::array{ - samplerBindGroupLayoutEntry(0), - textureBindGroupLayoutEntry(1), - textureBindGroupLayoutEntry(2)}}; + std::array{ + textureBindGroupLayoutEntry(0), textureBindGroupLayoutEntry(1)}}; mGbufferBindGroup = GpuBindGroup{ gpuContext.device, "Gbuffer bind group", mGbufferBindGroupLayout.ptr(), - std::array{ - samplerBindGroupEntry(0, mGbufferSampler), - textureBindGroupEntry(1, mAlbedoTextureView), - textureBindGroupEntry(2, mNormalTextureView)}}; + std::array{ + textureBindGroupEntry(0, mAlbedoTextureView), + textureBindGroupEntry(1, mNormalTextureView)}}; - mDebugPass = DebugPass{gpuContext, mGbufferBindGroupLayout}; + mDebugPass = DebugPass{gpuContext, mGbufferBindGroupLayout, rendererDesc.framebufferSize}; } HybridRenderer::~HybridRenderer() { - samplerSafeRelease(mGbufferSampler); - mGbufferSampler = nullptr; textureViewSafeRelease(mNormalTextureView); mNormalTextureView = nullptr; textureSafeRelease(mNormalTexture); @@ -228,6 +203,8 @@ void HybridRenderer::resize(const GpuContext& gpuContext, const Extent2u& newSiz { NLRS_ASSERT(newSize.x > 0 && newSize.y > 0); + mDebugPass.resize(gpuContext, newSize); + textureViewSafeRelease(mNormalTextureView); textureSafeRelease(mNormalTexture); textureViewSafeRelease(mAlbedoTextureView); @@ -304,10 +281,9 @@ void HybridRenderer::resize(const GpuContext& gpuContext, const Extent2u& newSiz gpuContext.device, "Gbuffer bind group", mGbufferBindGroupLayout.ptr(), - std::array{ - samplerBindGroupEntry(0, mGbufferSampler), - textureBindGroupEntry(1, mAlbedoTextureView), - textureBindGroupEntry(2, mNormalTextureView)}}; + std::array{ + textureBindGroupEntry(0, mAlbedoTextureView), + textureBindGroupEntry(1, mNormalTextureView)}}; } HybridRenderer::GbufferPass::GbufferPass( @@ -801,18 +777,42 @@ void HybridRenderer::GbufferPass::render( HybridRenderer::DebugPass::DebugPass( const GpuContext& gpuContext, - const GpuBindGroupLayout& gbufferBindGroupLayout) + const GpuBindGroupLayout& gbufferBindGroupLayout, + const Extent2u& framebufferSize) : mVertexBuffer( gpuContext.device, "Vertex buffer", WGPUBufferUsage_CopyDst | WGPUBufferUsage_Vertex, std::span(quadVertexData)), + mUniformBuffer(), + mUniformBindGroup(), mPipeline(nullptr) { + { + const auto uniformData = Extent2{framebufferSize}; + mUniformBuffer = GpuBuffer{ + gpuContext.device, + "Uniform buffer", + WGPUBufferUsage_CopyDst | WGPUBufferUsage_Uniform, + std::span(&uniformData.x, sizeof(Extent2))}; + } + + const GpuBindGroupLayout uniformBindGroupLayout{ + gpuContext.device, + "Uniform bind group layout", + mUniformBuffer.bindGroupLayoutEntry(0, WGPUShaderStage_Fragment, sizeof(Extent2))}; + + mUniformBindGroup = GpuBindGroup{ + gpuContext.device, + "Uniform bind group", + uniformBindGroupLayout.ptr(), + mUniformBuffer.bindGroupEntry(0)}; + { // Pipeline layout - const std::array bindGroupLayouts{gbufferBindGroupLayout.ptr()}; + const std::array bindGroupLayouts{ + uniformBindGroupLayout.ptr(), gbufferBindGroupLayout.ptr()}; const WGPUPipelineLayoutDescriptor pipelineLayoutDesc{ .nextInChain = nullptr, @@ -945,6 +945,8 @@ HybridRenderer::DebugPass::DebugPass(DebugPass&& other) noexcept if (this != &other) { mVertexBuffer = std::move(other.mVertexBuffer); + mUniformBuffer = std::move(other.mUniformBuffer); + mUniformBindGroup = std::move(other.mUniformBindGroup); mPipeline = other.mPipeline; other.mPipeline = nullptr; } @@ -955,6 +957,8 @@ HybridRenderer::DebugPass& HybridRenderer::DebugPass::operator=(DebugPass&& othe if (this != &other) { mVertexBuffer = std::move(other.mVertexBuffer); + mUniformBuffer = std::move(other.mUniformBuffer); + mUniformBindGroup = std::move(other.mUniformBindGroup); renderPipelineSafeRelease(mPipeline); mPipeline = other.mPipeline; other.mPipeline = nullptr; @@ -993,11 +997,19 @@ void HybridRenderer::DebugPass::render( NLRS_ASSERT(renderPass != nullptr); wgpuRenderPassEncoderSetPipeline(renderPass, mPipeline); - wgpuRenderPassEncoderSetBindGroup(renderPass, 0, gbufferBindGroup.ptr(), 0, nullptr); + wgpuRenderPassEncoderSetBindGroup(renderPass, 0, mUniformBindGroup.ptr(), 0, nullptr); + wgpuRenderPassEncoderSetBindGroup(renderPass, 1, gbufferBindGroup.ptr(), 0, nullptr); wgpuRenderPassEncoderSetVertexBuffer( renderPass, 0, mVertexBuffer.ptr(), 0, mVertexBuffer.byteSize()); wgpuRenderPassEncoderDraw(renderPass, 6, 1, 0, 0); wgpuRenderPassEncoderEnd(renderPass); } + +void HybridRenderer::DebugPass::resize(const GpuContext& gpuContext, const Extent2u& newSize) +{ + const auto uniformData = Extent2{newSize}; + wgpuQueueWriteBuffer( + gpuContext.queue, mUniformBuffer.ptr(), 0, &uniformData.x, sizeof(Extent2)); +} } // namespace nlrs diff --git a/src/pt/hybrid_renderer.hpp b/src/pt/hybrid_renderer.hpp index 8548a11..31fe5ac 100644 --- a/src/pt/hybrid_renderer.hpp +++ b/src/pt/hybrid_renderer.hpp @@ -99,11 +99,16 @@ class HybridRenderer { private: GpuBuffer mVertexBuffer = GpuBuffer{}; + GpuBuffer mUniformBuffer = GpuBuffer{}; + GpuBindGroup mUniformBindGroup = GpuBindGroup{}; WGPURenderPipeline mPipeline = nullptr; public: DebugPass() = default; - DebugPass(const GpuContext& gpuContext, const GpuBindGroupLayout& gbufferBindGroupLayout); + DebugPass( + const GpuContext& gpuContext, + const GpuBindGroupLayout& gbufferBindGroupLayout, + const Extent2u& framebufferSize); ~DebugPass(); DebugPass(const DebugPass&) = delete; @@ -116,6 +121,7 @@ class HybridRenderer const GpuBindGroup& gbufferBindGroup, WGPUCommandEncoder encoder, WGPUTextureView textureView); + void resize(const GpuContext&, const Extent2u&); }; WGPUTexture mDepthTexture; @@ -124,7 +130,6 @@ class HybridRenderer WGPUTextureView mAlbedoTextureView; WGPUTexture mNormalTexture; WGPUTextureView mNormalTextureView; - WGPUSampler mGbufferSampler; GpuBindGroupLayout mGbufferBindGroupLayout; GpuBindGroup mGbufferBindGroup; GbufferPass mGbufferPass; diff --git a/src/pt/hybrid_renderer_debug_pass.wgsl b/src/pt/hybrid_renderer_debug_pass.wgsl index 12a299a..4c2d03c 100644 --- a/src/pt/hybrid_renderer_debug_pass.wgsl +++ b/src/pt/hybrid_renderer_debug_pass.wgsl @@ -17,20 +17,18 @@ fn vsMain(in: VertexInput) -> VertexOutput { return out; } -@group(0) @binding(0) var textureSampler: sampler; -@group(0) @binding(1) var gbufferAlbedo: texture_2d; -@group(0) @binding(2) var gbufferNormal: texture_2d; +@group(0) @binding(0) var framebufferSize: vec2f; +@group(1) @binding(0) var gbufferAlbedo: texture_2d; +@group(1) @binding(1) var gbufferNormal: texture_2d; @fragment fn fsMain(in: VertexOutput) -> @location(0) vec4f { - // NOTE: textureSample can't be called from non-uniform control flow - // TODO: replace with textureLoad calls which can be called from non-uniform control flow and get rid of the sampler let c = in.texCoord; - let a = textureSample(gbufferAlbedo, textureSampler, c); - let n = textureSample(gbufferNormal, textureSampler, c); + let idx = vec2u(floor(c * framebufferSize)); if c.x < 0.5 { - return a; + return textureLoad(gbufferAlbedo, idx, 0); } else { + let n = textureLoad(gbufferNormal, idx, 0); return vec4(vec3(0.5) * (n.xyz + vec3(1f)), 1.0); } } diff --git a/src/pt/shader_source.hpp b/src/pt/shader_source.hpp index 263b158..02d984e 100644 --- a/src/pt/shader_source.hpp +++ b/src/pt/shader_source.hpp @@ -756,20 +756,18 @@ fn vsMain(in: VertexInput) -> VertexOutput { return out; } -@group(0) @binding(0) var textureSampler: sampler; -@group(0) @binding(1) var gbufferAlbedo: texture_2d; -@group(0) @binding(2) var gbufferNormal: texture_2d; +@group(0) @binding(0) var framebufferSize: vec2f; +@group(1) @binding(0) var gbufferAlbedo: texture_2d; +@group(1) @binding(1) var gbufferNormal: texture_2d; @fragment fn fsMain(in: VertexOutput) -> @location(0) vec4f { - // NOTE: textureSample can't be called from non-uniform control flow - // TODO: replace with textureLoad calls which can be called from non-uniform control flow and get rid of the sampler let c = in.texCoord; - let a = textureSample(gbufferAlbedo, textureSampler, c); - let n = textureSample(gbufferNormal, textureSampler, c); + let idx = vec2u(floor(c * framebufferSize)); if c.x < 0.5 { - return a; + return textureLoad(gbufferAlbedo, idx, 0); } else { + let n = textureLoad(gbufferNormal, idx, 0); return vec4(vec3(0.5) * (n.xyz + vec3(1f)), 1.0); } }