From 7436a6a3346efaf3ef767e7dc97014d5f0060db8 Mon Sep 17 00:00:00 2001 From: Basit Ayantunde Date: Wed, 13 Dec 2023 05:12:57 +0000 Subject: [PATCH] update --- ashura/gfx.h | 25 +++- ashura/math.h | 12 ++ ashura/primitives.h | 13 +- ashura/span.h | 175 ++++++++++++++++++++++ ashura/types.h | 345 ++++++++++++++++++++++++++++--------------- ashura/vulkan_gfx.cc | 43 +++--- ashura/vulkan_gfx.h | 5 +- 7 files changed, 466 insertions(+), 152 deletions(-) create mode 100644 ashura/math.h create mode 100644 ashura/span.h diff --git a/ashura/gfx.h b/ashura/gfx.h index 9010a30ae..a21210585 100644 --- a/ashura/gfx.h +++ b/ashura/gfx.h @@ -31,7 +31,12 @@ constexpr u32 MAX_COMPUTE_GROUP_COUNT_Y = 1024; constexpr u32 MAX_COMPUTE_GROUP_COUNT_Z = 1024; constexpr u32 MAX_SWAPCHAIN_IMAGES = 8; +typedef Vec2U Offset; +typedef Vec2U Extent; +typedef Vec3U Offset3D; +typedef Vec3U Extent3D; typedef u64 FrameId; +typedef u64 Generation; typedef struct Buffer_T *Buffer; typedef struct BufferView_T *BufferView; typedef struct Image_T *Image; @@ -761,7 +766,8 @@ struct MemoryRange struct Viewport { - Rect area; + Vec2 offset; + Vec2 extent; f32 min_depth = 0; f32 max_depth = 0; }; @@ -1224,7 +1230,7 @@ struct SwapchainDesc /// avoid storing pointers to its data members. struct SwapchainInfo { - u64 generation = 0; + Generation generation = 0; Extent extent = {}; SurfaceFormat format = {}; Span images = {}; @@ -1497,7 +1503,9 @@ struct DeviceInterface Result (*get_surface_formats)( Device self, Surface surface, Span formats) = nullptr; Result (*get_surface_present_modes)( - Device self, Surface surface, Span modes) = nullptr; + Device self, Surface surface, Span modes) = nullptr; + Result (*get_surface_usage)(Device self, + Surface surface) = nullptr; Result (*get_swapchain_info)( Device self, Swapchain swapchain) = nullptr; Result (*invalidate_swapchain)( @@ -1708,6 +1716,9 @@ struct DeviceImpl // // +// +// +// struct InstanceInterface { Result (*create)(AllocatorImpl allocator) = nullptr; @@ -1719,6 +1730,14 @@ struct InstanceInterface AllocatorImpl allocator) = nullptr; Result (*create_headless_surface)(Instance instance) = nullptr; + void create_win32_surface(); + void create_x11_surface(); + void create_wayland_surface(); + void create_xlib_surface(); + void create_metal_surface(); + void create_macos_surface(); + void create_ios_surface(); + void create_android_surface(); }; } // namespace gfx diff --git a/ashura/math.h b/ashura/math.h new file mode 100644 index 000000000..907966227 --- /dev/null +++ b/ashura/math.h @@ -0,0 +1,12 @@ +#pragma once + +#include "types.h" +#include + +namespace ash +{ +namespace math +{ + +} +} // namespace ash diff --git a/ashura/primitives.h b/ashura/primitives.h index c4d785b71..447f43a0c 100644 --- a/ashura/primitives.h +++ b/ashura/primitives.h @@ -14,18 +14,6 @@ namespace ash { -template -constexpr bool has_bits(T src, T cmp) -{ - return (src & cmp) == cmp; -} - -template -constexpr bool has_any_bit(T src, T cmp) -{ - return (src & cmp) != (T) 0; -} - using Clock = std::chrono::steady_clock; // monotonic system clock using timepoint = Clock::time_point; using nanoseconds = std::chrono::nanoseconds; @@ -297,6 +285,7 @@ struct tri }; // each coordinate is an edge of the quad +// TODO(lamarrr): rename this struct Quad { Vec2 p0, p1, p2, p3; diff --git a/ashura/span.h b/ashura/span.h new file mode 100644 index 000000000..81ad39692 --- /dev/null +++ b/ashura/span.h @@ -0,0 +1,175 @@ +#pragma once +#include "ashura/types.h" + +namespace ash +{ + +template +struct Span +{ + T *data = nullptr; + usize size = 0; + + constexpr usize size_bytes() const + { + return sizeof(T) * size; + } + + constexpr bool is_empty() const + { + return size == 0; + } + + constexpr T *begin() const + { + return data; + } + + constexpr T *end() const + { + return data + size; + } + + constexpr T &operator[](usize index) const + { + return data[index]; + } + + constexpr Span operator[](Slice slice) const + { + // written such that overflow will not occur even if both offset and size + // are set to USIZE_MAX + slice.offset = slice.offset > size ? size : slice.offset; + slice.size = + (size - slice.offset) > slice.size ? slice.size : (size - slice.offset); + return Span{data + slice.offset, slice.size}; + } + + constexpr operator Span() const + { + return Span{data, size}; + } +}; + +// A span with bit access semantics +template +struct BitSpan +{ + static constexpr u32 NUM_BITS_PER_PACK = sizeof(UnsignedInteger) << 3; + + Span body = {}; + Slice current_slice = {}; + + constexpr bool is_empty() const + { + return current_slice.size == 0; + } + + constexpr bool operator[](usize offset) const + { + offset = current_slice.offset + offset; + bool bit = (body.data[offset / NUM_BITS_PER_PACK] >> + (offset % NUM_BITS_PER_PACK)) & + 1U; + return bit; + } + + constexpr void set(usize offset, bool bit) const + { + offset = current_slice.offset + offset; + UnsignedInteger &out = body.data[offset / NUM_BITS_PER_PACK]; + usize bit_offset = offset % NUM_BITS_PER_PACK; + out = (out & ~((UnsignedInteger) 1 << bit_offset)) | + ((UnsignedInteger) bit << bit_offset); + } + + constexpr void toggle(usize offset) const + { + offset = current_slice.offset + offset; + UnsignedInteger &out = body.data[offset / NUM_BITS_PER_PACK]; + out = out ^ (1ULL << (offset % NUM_BITS_PER_PACK)); + } + + constexpr BitSpan operator[](Slice slice) const + { + slice.offset += current_slice.offset; + slice.offset = + slice.offset > current_slice.size ? current_slice.size : slice.offset; + slice.size = (current_slice.size - slice.offset) > slice.size ? + slice.size : + (current_slice.size - slice.offset); + return BitSpan{body, slice}; + } +}; + +namespace span +{ +template +constexpr Span slice(Span self, usize offset) +{ + return self[Slice{offset, USIZE_MAX}]; +} + +template +constexpr Span slice(Span self, usize offset, usize size) +{ + return self[Slice{offset, size}]; +} + +template +constexpr BitSpan slice(BitSpan span, usize offset) +{ + return span[Slice{offset, USIZE_MAX}]; +} + +template +constexpr BitSpan slice(BitSpan span, usize bit_offset, usize bit_size) +{ + return span[Slice{bit_offset, bit_size}]; +} + +template +constexpr Span as_const(Span self) +{ + return Span{self.data, self.size}; +} + +template +constexpr Span as_u8(Span self) +{ + return Span{reinterpret_cast(self.data), self.size_bytes()}; +} + +template +constexpr Span as_u8(Span self) +{ + return Span{reinterpret_cast(self.data), + self.size_bytes()}; +} + +template +constexpr Span as_char(Span self) +{ + return Span{reinterpret_cast(self.data), self.size_bytes()}; +} + +template +constexpr Span as_char(Span self) +{ + return Span{reinterpret_cast(self.data), + self.size_bytes()}; +} + +template +constexpr Span from_array(T (&array)[N]) +{ + return Span{array, N}; +} + +template +constexpr auto from_std_container(StdContainer &container) +{ + return Span{container.data(), container.size()}; +} +} // namespace span +} // namespace ash diff --git a/ashura/types.h b/ashura/types.h index 148067a8b..654ad5a9c 100644 --- a/ashura/types.h +++ b/ashura/types.h @@ -64,172 +64,283 @@ constexpr usize MAX_STANDARD_ALIGNMENT = alignof(max_align_t); constexpr f32 PI = 3.14159265358979323846f; -struct Slice +template +constexpr bool has_bits(T src, T cmp) { - usize offset = 0; - usize size = 0; -}; + return (src & cmp) == cmp; +} template -struct Span +constexpr bool has_any_bit(T src, T cmp) { - T *data = nullptr; - usize size = 0; + return (src & cmp) != (T) 0; +} - constexpr usize size_bytes() const - { - return sizeof(T) * size; - } +typedef struct Vec2 Vec2; +typedef struct Vec2 Complex; +typedef struct Vec3 Vec3; +typedef struct Vec4 Vec4; +typedef struct Vec4 Quaternion; +typedef struct Vec4 Color; +typedef struct Vec4U8 Vec4U8; +typedef struct Vec4U8 ColorU8; +typedef struct Vec2I Vec2I; +typedef struct Vec3I Vec3I; +typedef struct Vec2U Vec2U; +typedef struct Vec3U Vec3U; +typedef struct Vec4U Vec4U; +typedef struct Vec4U ColorU; +typedef struct Mat2 Mat2; +typedef struct Mat3 Mat3; +typedef struct Mat3Affine Mat3Affine; +typedef struct Mat4 Mat4; +typedef struct Mat4Affine Mat4Affine; +typedef struct Slice Slice; +template +struct Span; - constexpr bool is_empty() const +struct Vec2 +{ + union { - return size == 0; - } + f32 c = 0; + f32 i; + f32 width; + }; - constexpr T *begin() const + union { - return data; - } + f32 y = 0; + f32 j; + f32 height; + }; +}; - constexpr T *end() const +struct Vec3 +{ + union { - return data + size; - } + f32 x = 0; + f32 r; + f32 width; + }; - constexpr T &operator[](usize i) const + union { - return data[i]; - } + f32 y = 0; + f32 g; + f32 height; + }; - constexpr Span operator[](Slice slice) const + union { - // written such that overflow will not occur even if both offset and size - // are set to USIZE_MAX - slice.offset = slice.offset > size ? size : slice.offset; - slice.size = - (size - slice.offset) > slice.size ? slice.size : (size - slice.offset); - return Span{data + slice.offset, slice.size}; - } + f32 z = 0; + f32 b; + f32 depth; + }; +}; - constexpr operator Span() const +struct Vec4 +{ + union { - return Span{data, size}; - } + f32 x = 0; + f32 c; + f32 r; + f32 width; + }; - constexpr Span slice(usize offset) const + union { - return (*this)[Slice{offset, USIZE_MAX}]; - } + f32 y = 0; + f32 i; + f32 g; + f32 height; + }; - constexpr Span slice(usize offset, usize size) const + union { - return (*this)[Slice{offset, size}]; - } + f32 z = 0; + f32 j; + f32 b; + f32 depth; + }; - constexpr Span slice(Slice s) const + union { - return (*this)[s]; - } + f32 w = 0; + f32 k; + f32 a; + f32 hyper; + }; }; -template -constexpr Span as_const(Span span) +struct Vec4U8 { - return Span{span.data, span.size}; -} + union + { + u8 x = 0; + u8 r; + u8 width; + }; -template -constexpr Span as_u8(Span span) -{ - return Span{reinterpret_cast(span.data), span.size_bytes()}; -} + union + { + u8 y = 0; + u8 g; + u8 height; + }; -template -constexpr Span as_char(Span span) -{ - return Span{reinterpret_cast(span.data), span.size_bytes()}; -} + union + { + u8 z = 0; + u8 b; + u8 depth; + }; -template -constexpr Span as_u8(Span span) -{ - return Span{reinterpret_cast(span.data), - span.size_bytes()}; -} + union + { + u8 w = 0; + u8 a; + u8 hyper; + }; +}; -template -constexpr Span as_char(Span span) +struct Vec2I { - return Span{reinterpret_cast(span.data), - span.size_bytes()}; -} + union + { + i32 x = 0; + i32 width; + }; -template -constexpr Span span_from_array(T (&array)[N]) -{ - return Span{array, N}; -} + union + { + i32 y = 0; + i32 height; + }; +}; -template -constexpr auto span_from_std_container(StdContainer &container) +struct Vec3I { - return Span{container.data(), container.size()}; -} + union + { + i32 x = 0; + i32 width; + }; -// A span with bit access semantics -template -struct BitSpan + union + { + i32 y = 0; + i32 height; + }; + + union + { + i32 z = 0; + i32 depth; + }; +}; + +struct Vec2U { - static constexpr u32 NUM_BITS_PER_PACK = sizeof(UnsignedInteger) << 3; + union + { + u32 x = 0; + u32 width; + }; + + union + { + u32 y = 0; + u32 height; + }; +}; - Span body = {}; - Slice current_slice = {}; +struct Vec3U +{ + union + { + u32 x = 0; + u32 r; + u32 width; + }; - constexpr bool is_empty() const + union { - return current_slice.size == 0; - } + u32 y = 0; + u32 g; + u32 height; + }; - constexpr bool operator[](usize offset) const + union { - offset = current_slice.offset + offset; - bool bit = (body.data[offset / NUM_BITS_PER_PACK] >> - (offset % NUM_BITS_PER_PACK)) & - 1U; - return bit; - } + u32 z = 0; + u32 b; + u32 depth; + }; +}; - constexpr void set(usize offset, bool bit) const +struct Vec4U +{ + union { - offset = current_slice.offset + offset; - UnsignedInteger &out = body.data[offset / NUM_BITS_PER_PACK]; - usize bit_offset = offset % NUM_BITS_PER_PACK; - out = (out & ~((UnsignedInteger) 1 << bit_offset)) | - ((UnsignedInteger) bit << bit_offset); - } + u32 x = 0; + u32 r; + u32 width; + }; - constexpr void toggle(usize offset) const + union { - offset = current_slice.offset + offset; - UnsignedInteger &out = body.data[offset / NUM_BITS_PER_PACK]; - out = out ^ (1ULL << (offset % NUM_BITS_PER_PACK)); - } + u32 y = 0; + u32 g; + u32 height; + }; - constexpr BitSpan operator[](Slice slice) const + union { - slice.offset += current_slice.offset; - slice.offset = - slice.offset > current_slice.size ? current_slice.size : slice.offset; - slice.size = (current_slice.size - slice.offset) > slice.size ? - slice.size : - (current_slice.size - slice.offset); - return BitSpan{body, slice}; - } + u32 z = 0; + u32 b; + u32 depth; + }; - constexpr BitSpan slice(Slice s) const + union { - return (*this)[s]; - } + u32 w = 0; + u32 a; + u32 hyper; + }; +}; + +struct Mat2 +{ + Vec2 rows[2] = {}; +}; + +struct Mat3 +{ + Vec3 rows[3] = {}; +}; + +struct Mat3Affine +{ + Vec3 rows[2] = {}; +}; + +struct Mat4 +{ + Vec4 rows[3] = {}; +}; + +struct Mat4Affine +{ + Vec4 rows[3] = {}; +}; + +struct Slice +{ + usize offset = 0; + usize size = 0; }; } // namespace ash diff --git a/ashura/vulkan_gfx.cc b/ashura/vulkan_gfx.cc index 34354173d..f56489557 100644 --- a/ashura/vulkan_gfx.cc +++ b/ashura/vulkan_gfx.cc @@ -100,6 +100,7 @@ static gfx::DeviceInterface const device_interface{ .get_frame_info = DeviceInterface::get_frame_info, .get_surface_formats = DeviceInterface::get_surface_formats, .get_surface_present_modes = DeviceInterface::get_surface_present_modes, + .get_surface_usage = DeviceInterface::get_surface_usage, .get_swapchain_info = DeviceInterface::get_swapchain_info, .invalidate_swapchain = DeviceInterface::invalidate_swapchain, .begin_frame = DeviceInterface::begin_frame, @@ -184,14 +185,16 @@ void CmdDebugMarkerInsertEXT_Stub(VkCommandBuffer, { } -bool load_instance_table(VkInstance instance, InstanceTable &vk_instance_table) +bool load_instance_table(VkInstance instance, + PFN_vkGetInstanceProcAddr GetInstanceProcAddr, + InstanceTable *vk_instance_table) { bool all_loaded = true; -#define LOAD_VK(function) \ - vk_instance_table.function = \ - (PFN_vk##function) vkGetInstanceProcAddr(instance, "vk" #function); \ - all_loaded = all_loaded && (vk_instance_table.function != nullptr) +#define LOAD_VK(function) \ + vk_instance_table->function = \ + (PFN_vk##function) GetInstanceProcAddr(instance, "vk" #function); \ + all_loaded = all_loaded && (vk_instance_table->function != nullptr) LOAD_VK(CreateDebugReportCallbackEXT); LOAD_VK(CreateDebugUtilsMessengerEXT); @@ -217,20 +220,22 @@ bool load_instance_table(VkInstance instance, InstanceTable &vk_instance_table) LOAD_VK(GetPhysicalDeviceSurfaceCapabilitiesKHR); LOAD_VK(GetPhysicalDeviceSurfaceFormatsKHR); LOAD_VK(GetPhysicalDeviceSurfacePresentModesKHR); + // TODO(lamarrr): platform surface functions loaders #undef LOAD_VK return all_loaded; } -bool load_device_table(VkDevice device, DeviceTable &vk_table, - VmaVulkanFunctions &vma_table) +bool load_device_table(VkDevice device, + PFN_vkGetDeviceProcAddr GetDeviceProcAddr, + DeviceTable *vk_table, VmaVulkanFunctions *vma_table) { bool all_loaded = true; -#define LOAD_VK(function) \ - vk_table.function = \ - (PFN_vk##function) vkGetDeviceProcAddr(device, "vk" #function); \ - all_loaded = all_loaded && (vk_table.function != nullptr) +#define LOAD_VK(function) \ + vk_table->function = \ + (PFN_vk##function) GetDeviceProcAddr(device, "vk" #function); \ + all_loaded = all_loaded && (vk_table->function != nullptr) // DEVICE OBJECT FUNCTIONS LOAD_VK(AllocateCommandBuffers); @@ -362,11 +367,11 @@ bool load_device_table(VkDevice device, DeviceTable &vk_table, #undef LOAD_VK -#define LOAD_VK_STUBBED(function) \ - vk_table.function = \ - (PFN_vk##function) vkGetDeviceProcAddr(device, "vk" #function); \ - vk_table.function = \ - (vk_table.function != nullptr) ? vk_table.function : function##_Stub; +#define LOAD_VK_STUBBED(function) \ + vk_table->function = \ + (PFN_vk##function) GetDeviceProcAddr(device, "vk" #function); \ + vk_table->function = \ + (vk_table->function != nullptr) ? vk_table->function : function##_Stub; LOAD_VK_STUBBED(DebugMarkerSetObjectTagEXT); LOAD_VK_STUBBED(DebugMarkerSetObjectNameEXT); @@ -377,7 +382,7 @@ bool load_device_table(VkDevice device, DeviceTable &vk_table, #undef LOAD_VK_STUBBED -#define SET_VMA(function) vma_table.vk##function = vk_table.function +#define SET_VMA(function) vma_table->vk##function = vk_table->function SET_VMA(AllocateMemory); SET_VMA(FreeMemory); SET_VMA(UnmapMemory); @@ -910,8 +915,8 @@ Result { Device *const self = (Device *) self_; VkFormatProperties props; - vkGetPhysicalDeviceFormatProperties(self->vk_phy_device, (VkFormat) format, - &props); + self->vk_instance_table.GetPhysicalDeviceFormatProperties( + self->vk_phy_device, (VkFormat) format, &props); return Ok(gfx::FormatProperties{ .linear_tiling_features = (gfx::FormatFeatures) props.linearTilingFeatures, diff --git a/ashura/vulkan_gfx.h b/ashura/vulkan_gfx.h index f0a0db0cc..c150378cc 100644 --- a/ashura/vulkan_gfx.h +++ b/ashura/vulkan_gfx.h @@ -1,6 +1,7 @@ #pragma once #define VMA_STATIC_VULKAN_FUNCTIONS 0 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 0 +#define VK_NO_PROTOTYPES #include "ashura/allocator.h" #include "ashura/gfx.h" @@ -454,7 +455,7 @@ struct FrameContext struct Swapchain { - u64 generation = 0; + gfx::Generation generation = 0; gfx::SwapchainDesc desc = {}; bool is_valid = false; bool is_optimal = false; @@ -597,6 +598,8 @@ struct DeviceInterface static Result get_surface_present_modes(gfx::Device self, gfx::Surface surface, Span modes); + static Result (*get_surface_usage)( + gfx::Device self, gfx::Surface surface); static Result get_swapchain_info(gfx::Device self, gfx::Swapchain swapchain); static Result