mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-15 22:54:00 +00:00
shader_ir: Turn classes into data structures
This commit is contained in:
parent
26f2820ae3
commit
72deb773fd
13 changed files with 197 additions and 299 deletions
|
@ -59,14 +59,12 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
|
|||
template <typename Engine, typename Entry>
|
||||
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
|
||||
ShaderType shader_type, std::size_t index = 0) {
|
||||
if (entry.IsBindless()) {
|
||||
const Tegra::Texture::TextureHandle tex_handle =
|
||||
engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset());
|
||||
if (entry.is_bindless) {
|
||||
const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
|
||||
return engine.GetTextureInfo(tex_handle);
|
||||
}
|
||||
const auto& gpu_profile = engine.AccessGuestDriverProfile();
|
||||
const u32 offset =
|
||||
entry.GetOffset() + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
|
||||
const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
|
||||
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
|
||||
return engine.GetStageTexture(shader_type, offset);
|
||||
} else {
|
||||
|
@ -829,9 +827,9 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
|
|||
|
||||
u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
|
||||
for (const auto& entry : shader->GetEntries().global_memory_entries) {
|
||||
const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()};
|
||||
const auto gpu_addr{memory_manager.Read<u64>(addr)};
|
||||
const auto size{memory_manager.Read<u32>(addr + 8)};
|
||||
const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset};
|
||||
const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)};
|
||||
const u32 size{memory_manager.Read<u32>(addr + 8)};
|
||||
SetupGlobalMemory(binding++, entry, gpu_addr, size);
|
||||
}
|
||||
}
|
||||
|
@ -843,7 +841,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
|
|||
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : kernel->GetEntries().global_memory_entries) {
|
||||
const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()};
|
||||
const auto addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset};
|
||||
const auto gpu_addr{memory_manager.Read<u64>(addr)};
|
||||
const auto size{memory_manager.Read<u32>(addr + 8)};
|
||||
SetupGlobalMemory(binding++, entry, gpu_addr, size);
|
||||
|
@ -854,7 +852,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
|
|||
GPUVAddr gpu_addr, std::size_t size) {
|
||||
const auto alignment{device.GetShaderStorageBufferAlignment()};
|
||||
const auto [ssbo, buffer_offset] =
|
||||
buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
|
||||
buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.is_written);
|
||||
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
|
||||
static_cast<GLsizeiptr>(size));
|
||||
}
|
||||
|
@ -865,7 +863,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
|
|||
u32 binding = device.GetBaseBindings(stage_index).sampler;
|
||||
for (const auto& entry : shader->GetEntries().samplers) {
|
||||
const auto shader_type = static_cast<ShaderType>(stage_index);
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
for (std::size_t i = 0; i < entry.size; ++i) {
|
||||
const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
|
||||
SetupTexture(binding++, texture, entry);
|
||||
}
|
||||
|
@ -877,7 +875,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
|
|||
const auto& compute = system.GPU().KeplerCompute();
|
||||
u32 binding = 0;
|
||||
for (const auto& entry : kernel->GetEntries().samplers) {
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
for (std::size_t i = 0; i < entry.size; ++i) {
|
||||
const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
|
||||
SetupTexture(binding++, texture, entry);
|
||||
}
|
||||
|
@ -934,7 +932,7 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
|
|||
if (!tic.IsBuffer()) {
|
||||
view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
|
||||
}
|
||||
if (entry.IsWritten()) {
|
||||
if (entry.is_written) {
|
||||
view->MarkAsModified(texture_cache.Tick());
|
||||
}
|
||||
glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE,
|
||||
|
|
|
@ -870,13 +870,13 @@ private:
|
|||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
const std::string name = GetSampler(sampler);
|
||||
const std::string description = fmt::format("layout (binding = {}) uniform", binding);
|
||||
binding += sampler.IsIndexed() ? sampler.Size() : 1;
|
||||
binding += sampler.is_indexed ? sampler.size : 1;
|
||||
|
||||
std::string sampler_type = [&]() {
|
||||
if (sampler.IsBuffer()) {
|
||||
if (sampler.is_buffer) {
|
||||
return "samplerBuffer";
|
||||
}
|
||||
switch (sampler.GetType()) {
|
||||
switch (sampler.type) {
|
||||
case Tegra::Shader::TextureType::Texture1D:
|
||||
return "sampler1D";
|
||||
case Tegra::Shader::TextureType::Texture2D:
|
||||
|
@ -890,17 +890,17 @@ private:
|
|||
return "sampler2D";
|
||||
}
|
||||
}();
|
||||
if (sampler.IsArray()) {
|
||||
if (sampler.is_array) {
|
||||
sampler_type += "Array";
|
||||
}
|
||||
if (sampler.IsShadow()) {
|
||||
if (sampler.is_shadow) {
|
||||
sampler_type += "Shadow";
|
||||
}
|
||||
|
||||
if (!sampler.IsIndexed()) {
|
||||
if (!sampler.is_indexed) {
|
||||
code.AddLine("{} {} {};", description, sampler_type, name);
|
||||
} else {
|
||||
code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.Size());
|
||||
code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.size);
|
||||
}
|
||||
}
|
||||
if (!ir.GetSamplers().empty()) {
|
||||
|
@ -946,14 +946,14 @@ private:
|
|||
u32 binding = device.GetBaseBindings(stage).image;
|
||||
for (const auto& image : ir.GetImages()) {
|
||||
std::string qualifier = "coherent volatile";
|
||||
if (image.IsRead() && !image.IsWritten()) {
|
||||
if (image.is_read && !image.is_written) {
|
||||
qualifier += " readonly";
|
||||
} else if (image.IsWritten() && !image.IsRead()) {
|
||||
} else if (image.is_written && !image.is_read) {
|
||||
qualifier += " writeonly";
|
||||
}
|
||||
|
||||
const char* format = image.IsAtomic() ? "r32ui, " : "";
|
||||
const char* type_declaration = GetImageTypeDeclaration(image.GetType());
|
||||
const char* format = image.is_atomic ? "r32ui, " : "";
|
||||
const char* type_declaration = GetImageTypeDeclaration(image.type);
|
||||
code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++,
|
||||
qualifier, type_declaration, GetImage(image));
|
||||
}
|
||||
|
@ -1337,8 +1337,8 @@ private:
|
|||
ASSERT(meta);
|
||||
|
||||
const std::size_t count = operation.GetOperandsCount();
|
||||
const bool has_array = meta->sampler.IsArray();
|
||||
const bool has_shadow = meta->sampler.IsShadow();
|
||||
const bool has_array = meta->sampler.is_array;
|
||||
const bool has_shadow = meta->sampler.is_shadow;
|
||||
|
||||
std::string expr = "texture" + function_suffix;
|
||||
if (!meta->aoffi.empty()) {
|
||||
|
@ -1346,7 +1346,7 @@ private:
|
|||
} else if (!meta->ptp.empty()) {
|
||||
expr += "Offsets";
|
||||
}
|
||||
if (!meta->sampler.IsIndexed()) {
|
||||
if (!meta->sampler.is_indexed) {
|
||||
expr += '(' + GetSampler(meta->sampler) + ", ";
|
||||
} else {
|
||||
expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], ";
|
||||
|
@ -1974,7 +1974,7 @@ private:
|
|||
|
||||
std::string expr = GenerateTexture(
|
||||
operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
|
||||
if (meta->sampler.IsShadow()) {
|
||||
if (meta->sampler.is_shadow) {
|
||||
expr = "vec4(" + expr + ')';
|
||||
}
|
||||
return {expr + GetSwizzle(meta->element), Type::Float};
|
||||
|
@ -1986,7 +1986,7 @@ private:
|
|||
|
||||
std::string expr = GenerateTexture(
|
||||
operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
|
||||
if (meta->sampler.IsShadow()) {
|
||||
if (meta->sampler.is_shadow) {
|
||||
expr = "vec4(" + expr + ')';
|
||||
}
|
||||
return {expr + GetSwizzle(meta->element), Type::Float};
|
||||
|
@ -1995,11 +1995,11 @@ private:
|
|||
Expression TextureGather(Operation operation) {
|
||||
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
|
||||
|
||||
const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int;
|
||||
const bool separate_dc = meta.sampler.IsShadow();
|
||||
const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int;
|
||||
const bool separate_dc = meta.sampler.is_shadow;
|
||||
|
||||
std::vector<TextureIR> ir;
|
||||
if (meta.sampler.IsShadow()) {
|
||||
if (meta.sampler.is_shadow) {
|
||||
ir = {TextureOffset{}};
|
||||
} else {
|
||||
ir = {TextureOffset{}, TextureArgument{type, meta.component}};
|
||||
|
@ -2044,7 +2044,7 @@ private:
|
|||
constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
ASSERT(meta);
|
||||
UNIMPLEMENTED_IF(meta->sampler.IsArray());
|
||||
UNIMPLEMENTED_IF(meta->sampler.is_array);
|
||||
const std::size_t count = operation.GetOperandsCount();
|
||||
|
||||
std::string expr = "texelFetch(";
|
||||
|
@ -2065,7 +2065,7 @@ private:
|
|||
}
|
||||
expr += ')';
|
||||
|
||||
if (meta->lod && !meta->sampler.IsBuffer()) {
|
||||
if (meta->lod && !meta->sampler.is_buffer) {
|
||||
expr += ", ";
|
||||
expr += Visit(meta->lod).AsInt();
|
||||
}
|
||||
|
@ -2076,12 +2076,10 @@ private:
|
|||
}
|
||||
|
||||
Expression TextureGradient(Operation operation) {
|
||||
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
|
||||
ASSERT(meta);
|
||||
|
||||
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
|
||||
std::string expr =
|
||||
GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
|
||||
return {std::move(expr) + GetSwizzle(meta->element), Type::Float};
|
||||
return {std::move(expr) + GetSwizzle(meta.element), Type::Float};
|
||||
}
|
||||
|
||||
Expression ImageLoad(Operation operation) {
|
||||
|
@ -2598,11 +2596,11 @@ private:
|
|||
}
|
||||
|
||||
std::string GetSampler(const Sampler& sampler) const {
|
||||
return AppendSuffix(static_cast<u32>(sampler.GetIndex()), "sampler");
|
||||
return AppendSuffix(sampler.index, "sampler");
|
||||
}
|
||||
|
||||
std::string GetImage(const Image& image) const {
|
||||
return AppendSuffix(static_cast<u32>(image.GetIndex()), "image");
|
||||
return AppendSuffix(image.index, "image");
|
||||
}
|
||||
|
||||
std::string AppendSuffix(u32 index, std::string_view name) const {
|
||||
|
|
|
@ -33,36 +33,19 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
u32 index{};
|
||||
u32 index = 0;
|
||||
};
|
||||
|
||||
class GlobalMemoryEntry {
|
||||
public:
|
||||
explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, bool is_written)
|
||||
struct GlobalMemoryEntry {
|
||||
constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read,
|
||||
bool is_written)
|
||||
: cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{
|
||||
is_written} {}
|
||||
|
||||
u32 GetCbufIndex() const {
|
||||
return cbuf_index;
|
||||
}
|
||||
|
||||
u32 GetCbufOffset() const {
|
||||
return cbuf_offset;
|
||||
}
|
||||
|
||||
bool IsRead() const {
|
||||
return is_read;
|
||||
}
|
||||
|
||||
bool IsWritten() const {
|
||||
return is_written;
|
||||
}
|
||||
|
||||
private:
|
||||
u32 cbuf_index{};
|
||||
u32 cbuf_offset{};
|
||||
bool is_read{};
|
||||
bool is_written{};
|
||||
u32 cbuf_index = 0;
|
||||
u32 cbuf_offset = 0;
|
||||
bool is_read = false;
|
||||
bool is_written = false;
|
||||
};
|
||||
|
||||
struct ShaderEntries {
|
||||
|
|
|
@ -133,7 +133,7 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
|
|||
u32 count = 1;
|
||||
if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
|
||||
// Combined image samplers can be arrayed.
|
||||
count = container[i].Size();
|
||||
count = container[i].size;
|
||||
}
|
||||
VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
|
||||
entry.binding = binding++;
|
||||
|
@ -393,7 +393,7 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
|
|||
|
||||
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
const u32 num_samplers = container[i].Size();
|
||||
const u32 num_samplers = container[i].size;
|
||||
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
|
||||
entry.dstBinding = binding;
|
||||
entry.dstArrayElement = 0;
|
||||
|
|
|
@ -119,14 +119,13 @@ template <typename Engine, typename Entry>
|
|||
Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
|
||||
std::size_t stage, std::size_t index = 0) {
|
||||
const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
|
||||
if (entry.IsBindless()) {
|
||||
const Tegra::Texture::TextureHandle tex_handle =
|
||||
engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
|
||||
if (entry.is_bindless) {
|
||||
const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
|
||||
return engine.GetTextureInfo(tex_handle);
|
||||
}
|
||||
const auto& gpu_profile = engine.AccessGuestDriverProfile();
|
||||
const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
|
||||
const u32 offset = entry.GetOffset() + entry_offset;
|
||||
const u32 offset = entry.offset + entry_offset;
|
||||
if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
|
||||
return engine.GetStageTexture(stage_type, offset);
|
||||
} else {
|
||||
|
@ -961,7 +960,7 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
|
|||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().Maxwell3D();
|
||||
for (const auto& entry : entries.samplers) {
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
for (std::size_t i = 0; i < entry.size; ++i) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, stage, i);
|
||||
SetupTexture(texture, entry);
|
||||
}
|
||||
|
@ -1013,7 +1012,7 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
|
|||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().KeplerCompute();
|
||||
for (const auto& entry : entries.samplers) {
|
||||
for (std::size_t i = 0; i < entry.Size(); ++i) {
|
||||
for (std::size_t i = 0; i < entry.size; ++i) {
|
||||
const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
|
||||
SetupTexture(texture, entry);
|
||||
}
|
||||
|
@ -1094,7 +1093,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
|
|||
void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
|
||||
auto view = texture_cache.GetImageSurface(tic, entry);
|
||||
|
||||
if (entry.IsWritten()) {
|
||||
if (entry.is_written) {
|
||||
view->MarkAsModified(texture_cache.Tick());
|
||||
}
|
||||
|
||||
|
|
|
@ -103,8 +103,8 @@ struct GenericVaryingDescription {
|
|||
};
|
||||
|
||||
spv::Dim GetSamplerDim(const Sampler& sampler) {
|
||||
ASSERT(!sampler.IsBuffer());
|
||||
switch (sampler.GetType()) {
|
||||
ASSERT(!sampler.is_buffer);
|
||||
switch (sampler.type) {
|
||||
case Tegra::Shader::TextureType::Texture1D:
|
||||
return spv::Dim::Dim1D;
|
||||
case Tegra::Shader::TextureType::Texture2D:
|
||||
|
@ -114,13 +114,13 @@ spv::Dim GetSamplerDim(const Sampler& sampler) {
|
|||
case Tegra::Shader::TextureType::TextureCube:
|
||||
return spv::Dim::Cube;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<u32>(sampler.GetType()));
|
||||
UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<int>(sampler.type));
|
||||
return spv::Dim::Dim2D;
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
|
||||
switch (image.GetType()) {
|
||||
switch (image.type) {
|
||||
case Tegra::Shader::ImageType::Texture1D:
|
||||
return {spv::Dim::Dim1D, false};
|
||||
case Tegra::Shader::ImageType::TextureBuffer:
|
||||
|
@ -134,7 +134,7 @@ std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
|
|||
case Tegra::Shader::ImageType::Texture3D:
|
||||
return {spv::Dim::Dim3D, false};
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType()));
|
||||
UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<int>(image.type));
|
||||
return {spv::Dim::Dim2D, false};
|
||||
}
|
||||
}
|
||||
|
@ -879,11 +879,11 @@ private:
|
|||
|
||||
u32 DeclareTexelBuffers(u32 binding) {
|
||||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
if (!sampler.IsBuffer()) {
|
||||
if (!sampler.is_buffer) {
|
||||
continue;
|
||||
}
|
||||
ASSERT(!sampler.IsArray());
|
||||
ASSERT(!sampler.IsShadow());
|
||||
ASSERT(!sampler.is_array);
|
||||
ASSERT(!sampler.is_shadow);
|
||||
|
||||
constexpr auto dim = spv::Dim::Buffer;
|
||||
constexpr int depth = 0;
|
||||
|
@ -894,23 +894,23 @@ private:
|
|||
const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
|
||||
AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
|
||||
texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id});
|
||||
texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
||||
u32 DeclareSamplers(u32 binding) {
|
||||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
if (sampler.IsBuffer()) {
|
||||
if (sampler.is_buffer) {
|
||||
continue;
|
||||
}
|
||||
const auto dim = GetSamplerDim(sampler);
|
||||
const int depth = sampler.IsShadow() ? 1 : 0;
|
||||
const int arrayed = sampler.IsArray() ? 1 : 0;
|
||||
const int depth = sampler.is_shadow ? 1 : 0;
|
||||
const int arrayed = sampler.is_array ? 1 : 0;
|
||||
constexpr bool ms = false;
|
||||
constexpr int sampled = 1;
|
||||
constexpr auto format = spv::ImageFormat::Unknown;
|
||||
|
@ -918,17 +918,17 @@ private:
|
|||
const Id sampler_type = TypeSampledImage(image_type);
|
||||
const Id sampler_pointer_type =
|
||||
TypePointer(spv::StorageClass::UniformConstant, sampler_type);
|
||||
const Id type = sampler.IsIndexed()
|
||||
? TypeArray(sampler_type, Constant(t_uint, sampler.Size()))
|
||||
const Id type = sampler.is_indexed
|
||||
? TypeArray(sampler_type, Constant(t_uint, sampler.size))
|
||||
: sampler_type;
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex())));
|
||||
AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
|
||||
sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type,
|
||||
sampler_pointer_type, id});
|
||||
sampled_images.emplace(
|
||||
sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
@ -943,17 +943,17 @@ private:
|
|||
const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex())));
|
||||
AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
|
||||
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
if (image.IsRead() && !image.IsWritten()) {
|
||||
if (image.is_read && !image.is_written) {
|
||||
Decorate(id, spv::Decoration::NonWritable);
|
||||
} else if (image.IsWritten() && !image.IsRead()) {
|
||||
} else if (image.is_written && !image.is_read) {
|
||||
Decorate(id, spv::Decoration::NonReadable);
|
||||
}
|
||||
|
||||
images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id});
|
||||
images.emplace(image.index, StorageImage{image_type, id});
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
@ -1611,11 +1611,11 @@ private:
|
|||
|
||||
Id GetTextureSampler(Operation operation) {
|
||||
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
|
||||
ASSERT(!meta.sampler.IsBuffer());
|
||||
ASSERT(!meta.sampler.is_buffer);
|
||||
|
||||
const auto& entry = sampled_images.at(meta.sampler.GetIndex());
|
||||
const auto& entry = sampled_images.at(meta.sampler.index);
|
||||
Id sampler = entry.variable;
|
||||
if (meta.sampler.IsIndexed()) {
|
||||
if (meta.sampler.is_indexed) {
|
||||
const Id index = AsInt(Visit(meta.index));
|
||||
sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
|
||||
}
|
||||
|
@ -1624,8 +1624,8 @@ private:
|
|||
|
||||
Id GetTextureImage(Operation operation) {
|
||||
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
|
||||
const u32 index = meta.sampler.GetIndex();
|
||||
if (meta.sampler.IsBuffer()) {
|
||||
const u32 index = meta.sampler.index;
|
||||
if (meta.sampler.is_buffer) {
|
||||
const auto& entry = texel_buffers.at(index);
|
||||
return OpLoad(entry.image_type, entry.image);
|
||||
} else {
|
||||
|
@ -1636,7 +1636,7 @@ private:
|
|||
|
||||
Id GetImage(Operation operation) {
|
||||
const auto& meta = std::get<MetaImage>(operation.GetMeta());
|
||||
const auto entry = images.at(meta.image.GetIndex());
|
||||
const auto entry = images.at(meta.image.index);
|
||||
return OpLoad(entry.image_type, entry.image);
|
||||
}
|
||||
|
||||
|
@ -1652,7 +1652,7 @@ private:
|
|||
}
|
||||
if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
|
||||
// Add array coordinate for textures
|
||||
if (meta->sampler.IsArray()) {
|
||||
if (meta->sampler.is_array) {
|
||||
Id array = AsInt(Visit(meta->array));
|
||||
if (type == Type::Float) {
|
||||
array = OpConvertSToF(t_float, array);
|
||||
|
@ -1758,7 +1758,7 @@ private:
|
|||
operands.push_back(GetOffsetCoordinates(operation));
|
||||
}
|
||||
|
||||
if (meta.sampler.IsShadow()) {
|
||||
if (meta.sampler.is_shadow) {
|
||||
const Id dref = AsFloat(Visit(meta.depth_compare));
|
||||
return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
|
||||
Type::Float};
|
||||
|
@ -1773,7 +1773,7 @@ private:
|
|||
|
||||
const Id coords = GetCoordinates(operation, Type::Float);
|
||||
Id texture{};
|
||||
if (meta.sampler.IsShadow()) {
|
||||
if (meta.sampler.is_shadow) {
|
||||
texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
|
||||
AsFloat(Visit(meta.depth_compare)));
|
||||
} else {
|
||||
|
@ -1800,8 +1800,8 @@ private:
|
|||
}
|
||||
|
||||
const Id lod = AsUint(Visit(operation[0]));
|
||||
const std::size_t coords_count = [&]() {
|
||||
switch (const auto type = meta.sampler.GetType(); type) {
|
||||
const std::size_t coords_count = [&meta] {
|
||||
switch (const auto type = meta.sampler.type) {
|
||||
case Tegra::Shader::TextureType::Texture1D:
|
||||
return 1;
|
||||
case Tegra::Shader::TextureType::Texture2D:
|
||||
|
@ -1810,7 +1810,7 @@ private:
|
|||
case Tegra::Shader::TextureType::Texture3D:
|
||||
return 3;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid texture type={}", static_cast<u32>(type));
|
||||
UNREACHABLE_MSG("Invalid texture type={}", static_cast<int>(type));
|
||||
return 2;
|
||||
}
|
||||
}();
|
||||
|
@ -1853,7 +1853,7 @@ private:
|
|||
const Id image = GetTextureImage(operation);
|
||||
const Id coords = GetCoordinates(operation, Type::Int);
|
||||
Id fetch;
|
||||
if (meta.lod && !meta.sampler.IsBuffer()) {
|
||||
if (meta.lod && !meta.sampler.is_buffer) {
|
||||
fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod,
|
||||
AsInt(Visit(meta.lod)));
|
||||
} else {
|
||||
|
@ -2969,7 +2969,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
|||
entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written);
|
||||
}
|
||||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
if (sampler.IsBuffer()) {
|
||||
if (sampler.is_buffer) {
|
||||
entries.texel_buffers.emplace_back(sampler);
|
||||
} else {
|
||||
entries.samplers.emplace_back(sampler);
|
||||
|
|
|
@ -42,11 +42,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
|
|||
u32 count{};
|
||||
std::vector<u32> bound_offsets;
|
||||
for (const auto& sampler : used_samplers) {
|
||||
if (sampler.IsBindless()) {
|
||||
if (sampler.is_bindless) {
|
||||
continue;
|
||||
}
|
||||
++count;
|
||||
bound_offsets.emplace_back(sampler.GetOffset());
|
||||
bound_offsets.emplace_back(sampler.offset);
|
||||
}
|
||||
if (count > 1) {
|
||||
gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
|
||||
|
@ -56,14 +56,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
|
|||
std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
|
||||
VideoCore::GuestDriverProfile& gpu_driver,
|
||||
const std::list<Sampler>& used_samplers) {
|
||||
const u32 base_offset = sampler_to_deduce.GetOffset();
|
||||
const u32 base_offset = sampler_to_deduce.offset;
|
||||
u32 max_offset{std::numeric_limits<u32>::max()};
|
||||
for (const auto& sampler : used_samplers) {
|
||||
if (sampler.IsBindless()) {
|
||||
if (sampler.is_bindless) {
|
||||
continue;
|
||||
}
|
||||
if (sampler.GetOffset() > base_offset) {
|
||||
max_offset = std::min(sampler.GetOffset(), max_offset);
|
||||
if (sampler.offset > base_offset) {
|
||||
max_offset = std::min(sampler.offset, max_offset);
|
||||
}
|
||||
}
|
||||
if (max_offset == std::numeric_limits<u32>::max()) {
|
||||
|
@ -363,14 +363,14 @@ void ShaderIR::PostDecode() {
|
|||
return;
|
||||
}
|
||||
for (auto& sampler : used_samplers) {
|
||||
if (!sampler.IsIndexed()) {
|
||||
if (!sampler.is_indexed) {
|
||||
continue;
|
||||
}
|
||||
if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
|
||||
sampler.SetSize(*size);
|
||||
sampler.size = *size;
|
||||
} else {
|
||||
LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
|
||||
sampler.SetSize(1);
|
||||
sampler.size = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
|
|||
Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
|
||||
const auto offset = static_cast<u32>(image.index.Value());
|
||||
|
||||
const auto it =
|
||||
std::find_if(std::begin(used_images), std::end(used_images),
|
||||
[offset](const Image& entry) { return entry.GetOffset() == offset; });
|
||||
const auto it = std::find_if(std::begin(used_images), std::end(used_images),
|
||||
[offset](const Image& entry) { return entry.offset == offset; });
|
||||
if (it != std::end(used_images)) {
|
||||
ASSERT(!it->IsBindless() && it->GetType() == it->GetType());
|
||||
ASSERT(!it->is_bindless && it->type == type);
|
||||
return *it;
|
||||
}
|
||||
|
||||
|
@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
|
|||
const auto buffer = std::get<1>(result);
|
||||
const auto offset = std::get<2>(result);
|
||||
|
||||
const auto it =
|
||||
std::find_if(std::begin(used_images), std::end(used_images),
|
||||
[buffer = buffer, offset = offset](const Image& entry) {
|
||||
return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
|
||||
});
|
||||
const auto it = std::find_if(std::begin(used_images), std::end(used_images),
|
||||
[buffer, offset](const Image& entry) {
|
||||
return entry.buffer == buffer && entry.offset == offset;
|
||||
});
|
||||
if (it != std::end(used_images)) {
|
||||
ASSERT(it->IsBindless() && it->GetType() == it->GetType());
|
||||
ASSERT(it->is_bindless && it->type == type);
|
||||
return *it;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,14 +140,13 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
|
|||
const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
|
||||
|
||||
const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare, false};
|
||||
const Sampler& sampler = *GetSampler(instr.sampler, info);
|
||||
const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
|
||||
|
||||
Node4 values;
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
auto coords_copy = coords;
|
||||
MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {},
|
||||
{}, {}, component, element, {}};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
|
||||
MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
|
||||
{}, {}, component, element, {}};
|
||||
values[element] = Operation(OperationCode::TextureGather, meta, coords);
|
||||
}
|
||||
|
||||
if (instr.tld4s.fp16_flag) {
|
||||
|
@ -170,13 +169,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
|
|||
const auto texture_type = instr.txd.texture_type.Value();
|
||||
const auto coord_count = GetCoordCount(texture_type);
|
||||
Node index_var{};
|
||||
const Sampler* sampler =
|
||||
const std::optional<Sampler> sampler =
|
||||
is_bindless
|
||||
? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false, false}})
|
||||
: GetSampler(instr.sampler, {{texture_type, is_array, false, false}});
|
||||
Node4 values;
|
||||
if (sampler == nullptr) {
|
||||
std::generate(values.begin(), values.end(), [] { return Immediate(0); });
|
||||
if (!sampler) {
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
values[element] = Immediate(0);
|
||||
}
|
||||
WriteTexInstructionFloat(bb, instr, values);
|
||||
break;
|
||||
}
|
||||
|
@ -218,10 +219,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
|
|||
// Sadly, not all texture instructions specify the type of texture their sampler
|
||||
// uses. This must be fixed at a later instance.
|
||||
Node index_var{};
|
||||
const Sampler* sampler =
|
||||
const std::optional<Sampler> sampler =
|
||||
is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler);
|
||||
|
||||
if (sampler == nullptr) {
|
||||
if (!sampler) {
|
||||
u32 indexer = 0;
|
||||
for (u32 element = 0; element < 4; ++element) {
|
||||
if (!instr.txq.IsComponentEnabled(element)) {
|
||||
|
@ -269,10 +270,10 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
|
|||
|
||||
auto texture_type = instr.tmml.texture_type.Value();
|
||||
Node index_var{};
|
||||
const Sampler* sampler =
|
||||
const std::optional<Sampler> sampler =
|
||||
is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler);
|
||||
|
||||
if (sampler == nullptr) {
|
||||
if (!sampler) {
|
||||
u32 indexer = 0;
|
||||
for (u32 element = 0; element < 2; ++element) {
|
||||
if (!instr.tmml.IsComponentEnabled(element)) {
|
||||
|
@ -368,35 +369,34 @@ ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sample
|
|||
sampler->is_buffer != 0};
|
||||
}
|
||||
|
||||
const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler,
|
||||
std::optional<SamplerInfo> sampler_info) {
|
||||
std::optional<Sampler> ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler,
|
||||
std::optional<SamplerInfo> sampler_info) {
|
||||
const auto offset = static_cast<u32>(sampler.index.Value());
|
||||
const auto info = GetSamplerInfo(sampler_info, offset);
|
||||
|
||||
// If this sampler has already been used, return the existing mapping.
|
||||
const auto it =
|
||||
std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[offset](const Sampler& entry) { return entry.GetOffset() == offset; });
|
||||
const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[offset](const Sampler& entry) { return entry.offset == offset; });
|
||||
if (it != used_samplers.end()) {
|
||||
ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array &&
|
||||
it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer);
|
||||
return &*it;
|
||||
ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
|
||||
it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
|
||||
return *it;
|
||||
}
|
||||
|
||||
// Otherwise create a new mapping for this sampler
|
||||
const auto next_index = static_cast<u32>(used_samplers.size());
|
||||
return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow,
|
||||
info.is_buffer, false);
|
||||
return used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow,
|
||||
info.is_buffer, false);
|
||||
}
|
||||
|
||||
const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
|
||||
std::optional<SamplerInfo> sampler_info) {
|
||||
std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
|
||||
std::optional<SamplerInfo> sampler_info) {
|
||||
const Node sampler_register = GetRegister(reg);
|
||||
const auto [base_node, tracked_sampler_info] =
|
||||
TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
|
||||
ASSERT(base_node != nullptr);
|
||||
if (base_node == nullptr) {
|
||||
return nullptr;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (const auto bindless_sampler_info =
|
||||
|
@ -406,23 +406,22 @@ const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& i
|
|||
const auto info = GetSamplerInfo(sampler_info, offset, buffer);
|
||||
|
||||
// If this sampler has already been used, return the existing mapping.
|
||||
const auto it =
|
||||
std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[buffer = buffer, offset = offset](const Sampler& entry) {
|
||||
return entry.GetBuffer() == buffer && entry.GetOffset() == offset;
|
||||
});
|
||||
const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
|
||||
[buffer = buffer, offset = offset](const Sampler& entry) {
|
||||
return entry.buffer == buffer && entry.offset == offset;
|
||||
});
|
||||
if (it != used_samplers.end()) {
|
||||
ASSERT(it->IsBindless() && it->GetType() == info.type &&
|
||||
it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow);
|
||||
return &*it;
|
||||
ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
|
||||
it->is_shadow == info.is_shadow);
|
||||
return *it;
|
||||
}
|
||||
|
||||
// Otherwise create a new mapping for this sampler
|
||||
const auto next_index = static_cast<u32>(used_samplers.size());
|
||||
return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array,
|
||||
info.is_shadow, info.is_buffer, false);
|
||||
} else if (const auto array_sampler_info =
|
||||
std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
|
||||
return used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array,
|
||||
info.is_shadow, info.is_buffer, false);
|
||||
}
|
||||
if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
|
||||
const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
|
||||
index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
|
||||
const auto info = GetSamplerInfo(sampler_info, base_offset);
|
||||
|
@ -430,21 +429,21 @@ const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& i
|
|||
// If this sampler has already been used, return the existing mapping.
|
||||
const auto it = std::find_if(
|
||||
used_samplers.begin(), used_samplers.end(),
|
||||
[base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; });
|
||||
[base_offset](const Sampler& entry) { return entry.offset == base_offset; });
|
||||
if (it != used_samplers.end()) {
|
||||
ASSERT(!it->IsBindless() && it->GetType() == info.type &&
|
||||
it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow &&
|
||||
it->IsBuffer() == info.is_buffer && it->IsIndexed());
|
||||
return &*it;
|
||||
ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
|
||||
it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
|
||||
it->is_indexed);
|
||||
return *it;
|
||||
}
|
||||
|
||||
uses_indexed_samplers = true;
|
||||
// Otherwise create a new mapping for this sampler
|
||||
const auto next_index = static_cast<u32>(used_samplers.size());
|
||||
return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array,
|
||||
info.is_shadow, info.is_buffer, true);
|
||||
return used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array,
|
||||
info.is_shadow, info.is_buffer, true);
|
||||
}
|
||||
return nullptr;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
|
||||
|
@ -531,7 +530,8 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
|
|||
|
||||
const SamplerInfo info{texture_type, is_array, is_shadow, false};
|
||||
Node index_var;
|
||||
const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info)
|
||||
std::optional<Sampler> sampler = is_bindless
|
||||
? GetBindlessSampler(*bindless_reg, index_var, info)
|
||||
: GetSampler(instr.sampler, info);
|
||||
if (!sampler) {
|
||||
return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
|
||||
|
@ -685,10 +685,11 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
|
|||
|
||||
const SamplerInfo info{texture_type, is_array, depth_compare, false};
|
||||
Node index_var{};
|
||||
const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info)
|
||||
: GetSampler(instr.sampler, info);
|
||||
const std::optional<Sampler> sampler =
|
||||
is_bindless ? GetBindlessSampler(parameter_register++, index_var, info)
|
||||
: GetSampler(instr.sampler, info);
|
||||
Node4 values;
|
||||
if (sampler == nullptr) {
|
||||
if (!sampler) {
|
||||
for (u32 element = 0; element < values.size(); ++element) {
|
||||
values[element] = Immediate(0);
|
||||
}
|
||||
|
|
|
@ -265,76 +265,30 @@ class ArraySamplerNode;
|
|||
using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
|
||||
using TrackSampler = std::shared_ptr<TrackSamplerData>;
|
||||
|
||||
class Sampler {
|
||||
public:
|
||||
/// This constructor is for bound samplers
|
||||
struct Sampler {
|
||||
/// Bound samplers constructor
|
||||
constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
|
||||
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
|
||||
: index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
|
||||
is_buffer{is_buffer}, is_indexed{is_indexed} {}
|
||||
|
||||
/// This constructor is for bindless samplers
|
||||
/// Bindless samplers constructor
|
||||
constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
|
||||
bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
|
||||
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
|
||||
is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
|
||||
|
||||
constexpr u32 GetIndex() const {
|
||||
return index;
|
||||
}
|
||||
|
||||
constexpr u32 GetOffset() const {
|
||||
return offset;
|
||||
}
|
||||
|
||||
constexpr u32 GetBuffer() const {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
constexpr Tegra::Shader::TextureType GetType() const {
|
||||
return type;
|
||||
}
|
||||
|
||||
constexpr bool IsArray() const {
|
||||
return is_array;
|
||||
}
|
||||
|
||||
constexpr bool IsShadow() const {
|
||||
return is_shadow;
|
||||
}
|
||||
|
||||
constexpr bool IsBuffer() const {
|
||||
return is_buffer;
|
||||
}
|
||||
|
||||
constexpr bool IsBindless() const {
|
||||
return is_bindless;
|
||||
}
|
||||
|
||||
constexpr bool IsIndexed() const {
|
||||
return is_indexed;
|
||||
}
|
||||
|
||||
constexpr u32 Size() const {
|
||||
return size;
|
||||
}
|
||||
|
||||
constexpr void SetSize(u32 new_size) {
|
||||
size = new_size;
|
||||
}
|
||||
|
||||
private:
|
||||
u32 index{}; ///< Emulated index given for the this sampler.
|
||||
u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
|
||||
u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
|
||||
u32 size{1}; ///< Size of the sampler.
|
||||
u32 index = 0; ///< Emulated index given for the this sampler.
|
||||
u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
|
||||
u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
|
||||
u32 size = 1; ///< Size of the sampler.
|
||||
|
||||
Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
|
||||
bool is_array{}; ///< Whether the texture is being sampled as an array texture or not.
|
||||
bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not.
|
||||
bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler.
|
||||
bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not.
|
||||
bool is_indexed{}; ///< Whether this sampler is an indexed array of textures.
|
||||
bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
|
||||
bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
|
||||
bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
|
||||
bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
|
||||
bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
|
||||
};
|
||||
|
||||
/// Represents a tracked bindless sampler into a direct const buffer
|
||||
|
@ -379,13 +333,13 @@ private:
|
|||
u32 offset;
|
||||
};
|
||||
|
||||
class Image final {
|
||||
struct Image {
|
||||
public:
|
||||
/// This constructor is for bound images
|
||||
/// Bound images constructor
|
||||
constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
|
||||
: index{index}, offset{offset}, type{type} {}
|
||||
|
||||
/// This constructor is for bindless samplers
|
||||
/// Bindless samplers constructor
|
||||
constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
|
||||
: index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
|
||||
|
||||
|
@ -403,53 +357,20 @@ public:
|
|||
is_atomic = true;
|
||||
}
|
||||
|
||||
constexpr u32 GetIndex() const {
|
||||
return index;
|
||||
}
|
||||
|
||||
constexpr u32 GetOffset() const {
|
||||
return offset;
|
||||
}
|
||||
|
||||
constexpr u32 GetBuffer() const {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
constexpr Tegra::Shader::ImageType GetType() const {
|
||||
return type;
|
||||
}
|
||||
|
||||
constexpr bool IsBindless() const {
|
||||
return is_bindless;
|
||||
}
|
||||
|
||||
constexpr bool IsWritten() const {
|
||||
return is_written;
|
||||
}
|
||||
|
||||
constexpr bool IsRead() const {
|
||||
return is_read;
|
||||
}
|
||||
|
||||
constexpr bool IsAtomic() const {
|
||||
return is_atomic;
|
||||
}
|
||||
|
||||
private:
|
||||
u32 index{};
|
||||
u32 offset{};
|
||||
u32 buffer{};
|
||||
u32 index = 0;
|
||||
u32 offset = 0;
|
||||
u32 buffer = 0;
|
||||
|
||||
Tegra::Shader::ImageType type{};
|
||||
bool is_bindless{};
|
||||
bool is_written{};
|
||||
bool is_read{};
|
||||
bool is_atomic{};
|
||||
bool is_bindless = false;
|
||||
bool is_written = false;
|
||||
bool is_read = false;
|
||||
bool is_atomic = false;
|
||||
};
|
||||
|
||||
struct GlobalMemoryBase {
|
||||
u32 cbuf_index{};
|
||||
u32 cbuf_offset{};
|
||||
u32 cbuf_index = 0;
|
||||
u32 cbuf_offset = 0;
|
||||
|
||||
bool operator<(const GlobalMemoryBase& rhs) const {
|
||||
return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
|
||||
|
@ -463,7 +384,7 @@ struct MetaArithmetic {
|
|||
|
||||
/// Parameters describing a texture sampler
|
||||
struct MetaTexture {
|
||||
const Sampler& sampler;
|
||||
Sampler sampler;
|
||||
Node array;
|
||||
Node depth_compare;
|
||||
std::vector<Node> aoffi;
|
||||
|
|
|
@ -29,12 +29,11 @@ using ProgramCode = std::vector<u64>;
|
|||
|
||||
constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
|
||||
|
||||
class ConstBuffer {
|
||||
public:
|
||||
explicit ConstBuffer(u32 max_offset, bool is_indirect)
|
||||
struct ConstBuffer {
|
||||
constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
|
||||
: max_offset{max_offset}, is_indirect{is_indirect} {}
|
||||
|
||||
ConstBuffer() = default;
|
||||
constexpr ConstBuffer() = default;
|
||||
|
||||
void MarkAsUsed(u64 offset) {
|
||||
max_offset = std::max(max_offset, static_cast<u32>(offset));
|
||||
|
@ -57,8 +56,8 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
u32 max_offset{};
|
||||
bool is_indirect{};
|
||||
u32 max_offset = 0;
|
||||
bool is_indirect = false;
|
||||
};
|
||||
|
||||
struct GlobalMemoryUsage {
|
||||
|
@ -332,12 +331,13 @@ private:
|
|||
std::optional<u32> buffer = std::nullopt);
|
||||
|
||||
/// Accesses a texture sampler
|
||||
const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler,
|
||||
std::optional<SamplerInfo> sampler_info = std::nullopt);
|
||||
std::optional<Sampler> GetSampler(const Tegra::Shader::Sampler& sampler,
|
||||
std::optional<SamplerInfo> sampler_info = std::nullopt);
|
||||
|
||||
/// Accesses a texture sampler for a bindless texture.
|
||||
const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var,
|
||||
std::optional<SamplerInfo> sampler_info = std::nullopt);
|
||||
std::optional<Sampler> GetBindlessSampler(
|
||||
Tegra::Shader::Register reg, Node& index_var,
|
||||
std::optional<SamplerInfo> sampler_info = std::nullopt);
|
||||
|
||||
/// Accesses an image.
|
||||
Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
|
||||
|
|
|
@ -81,7 +81,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
|
|||
params.pixel_format = lookup_table.GetPixelFormat(
|
||||
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
|
||||
params.type = GetFormatType(params.pixel_format);
|
||||
if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) {
|
||||
if (entry.is_shadow && params.type == SurfaceType::ColorTexture) {
|
||||
switch (params.pixel_format) {
|
||||
case PixelFormat::R16U:
|
||||
case PixelFormat::R16F:
|
||||
|
@ -108,7 +108,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
|
|||
params.emulated_levels = 1;
|
||||
params.is_layered = false;
|
||||
} else {
|
||||
params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
|
||||
params.target = TextureTypeToSurfaceTarget(entry.type, entry.is_array);
|
||||
params.width = tic.Width();
|
||||
params.height = tic.Height();
|
||||
params.depth = tic.Depth();
|
||||
|
@ -138,7 +138,7 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
|
|||
tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
|
||||
params.type = GetFormatType(params.pixel_format);
|
||||
params.type = GetFormatType(params.pixel_format);
|
||||
params.target = ImageTypeToSurfaceTarget(entry.GetType());
|
||||
params.target = ImageTypeToSurfaceTarget(entry.type);
|
||||
// TODO: on 1DBuffer we should use the tic info.
|
||||
if (tic.IsBuffer()) {
|
||||
params.target = SurfaceTarget::TextureBuffer;
|
||||
|
@ -248,12 +248,12 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
|
|||
|
||||
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
|
||||
const VideoCommon::Shader::Sampler& entry) {
|
||||
return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray());
|
||||
return TextureTypeToSurfaceTarget(entry.type, entry.is_array);
|
||||
}
|
||||
|
||||
VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
|
||||
const VideoCommon::Shader::Image& entry) {
|
||||
return ImageTypeToSurfaceTarget(entry.GetType());
|
||||
return ImageTypeToSurfaceTarget(entry.type);
|
||||
}
|
||||
|
||||
bool SurfaceParams::IsLayered() const {
|
||||
|
|
|
@ -1156,7 +1156,7 @@ private:
|
|||
/// Returns true the shader sampler entry is compatible with the TIC texture type.
|
||||
static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type,
|
||||
const VideoCommon::Shader::Sampler& entry) {
|
||||
const auto shader_type = entry.GetType();
|
||||
const auto shader_type = entry.type;
|
||||
switch (tic_type) {
|
||||
case Tegra::Texture::TextureType::Texture1D:
|
||||
case Tegra::Texture::TextureType::Texture1DArray:
|
||||
|
@ -1177,7 +1177,7 @@ private:
|
|||
if (shader_type == Tegra::Shader::TextureType::TextureCube) {
|
||||
return true;
|
||||
}
|
||||
return shader_type == Tegra::Shader::TextureType::Texture2D && entry.IsArray();
|
||||
return shader_type == Tegra::Shader::TextureType::Texture2D && entry.is_array;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return true;
|
||||
|
|
Loading…
Reference in a new issue