early-access version 3173

This commit is contained in:
pineappleEA 2022-12-01 17:38:02 +01:00
parent 0b26e6367b
commit 37e45db751
13 changed files with 66 additions and 77 deletions

View File

@ -474,22 +474,7 @@ if (UNIX AND NOT APPLE)
endif() endif()
if (NOT YUZU_USE_BUNDLED_FFMPEG) if (NOT YUZU_USE_BUNDLED_FFMPEG)
# Use system installed FFmpeg # Use system installed FFmpeg
find_package(FFmpeg 4.3 QUIET COMPONENTS ${FFmpeg_COMPONENTS}) find_package(FFmpeg 4.3 REQUIRED QUIET COMPONENTS ${FFmpeg_COMPONENTS})
if (FFmpeg_FOUND)
# Overwrite aggregate defines from FFmpeg module to avoid over-linking libraries.
# Prevents shipping too many libraries with the AppImage.
set(FFmpeg_LIBRARIES "")
set(FFmpeg_INCLUDE_DIR "")
foreach(COMPONENT ${FFmpeg_COMPONENTS})
set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${FFmpeg_LIBRARY_${COMPONENT}} CACHE PATH "Paths to FFmpeg libraries" FORCE)
set(FFmpeg_INCLUDE_DIR ${FFmpeg_INCLUDE_DIR} ${FFmpeg_INCLUDE_${COMPONENT}} CACHE PATH "Path to FFmpeg headers" FORCE)
endforeach()
else()
message(WARNING "FFmpeg not found or too old, falling back to externals")
set(YUZU_USE_BUNDLED_FFMPEG ON)
endif()
endif() endif()
# Prefer the -pthread flag on Linux. # Prefer the -pthread flag on Linux.

View File

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 3172. This is the source code for early-access 3173.
## Legal Notice ## Legal Notice

View File

@ -185,3 +185,11 @@ foreach(c ${_FFmpeg_ALL_COMPONENTS})
endforeach() endforeach()
unset(_FFmpeg_ALL_COMPONENTS) unset(_FFmpeg_ALL_COMPONENTS)
unset(_FFmpeg_REQUIRED_VARS) unset(_FFmpeg_REQUIRED_VARS)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(FFmpeg
REQUIRED_VARS
FFmpeg_LIBRARIES
FFmpeg_INCLUDE_DIR
HANDLE_COMPONENTS
)

View File

@ -27,8 +27,6 @@ struct Program {
u32 local_memory_size{}; u32 local_memory_size{};
u32 shared_memory_size{}; u32 shared_memory_size{};
bool is_geometry_passthrough{}; bool is_geometry_passthrough{};
bool requires_layer_emulation{};
Attribute emulated_layer{};
}; };
[[nodiscard]] std::string DumpProgram(const Program& program); [[nodiscard]] std::string DumpProgram(const Program& program);

View File

@ -334,7 +334,7 @@ void ConvertLegacyToGeneric(IR::Program& program, const Shader::RuntimeInfo& run
} }
} }
IR::Program GenerateLayerPassthrough(ObjectPool<IR::Inst>& inst_pool, IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
ObjectPool<IR::Block>& block_pool, ObjectPool<IR::Block>& block_pool,
const HostTranslateInfo& host_info, const HostTranslateInfo& host_info,
IR::Program& source_program, IR::Program& source_program,
@ -357,8 +357,8 @@ IR::Program GenerateLayerPassthrough(ObjectPool<IR::Inst>& inst_pool,
program.is_geometry_passthrough = false; program.is_geometry_passthrough = false;
program.info.loads.mask = source_program.info.stores.mask; program.info.loads.mask = source_program.info.stores.mask;
program.info.stores.mask = source_program.info.stores.mask; program.info.stores.mask = source_program.info.stores.mask;
program.info.stores.Set(IR::Attribute::Layer); program.info.stores.Set(IR::Attribute::Layer, true);
program.info.stores.Set(source_program.emulated_layer, false); program.info.stores.Set(source_program.info.emulated_layer, false);
IR::Block* current_block = block_pool.Create(inst_pool); IR::Block* current_block = block_pool.Create(inst_pool);
auto& node{program.syntax_list.emplace_back()}; auto& node{program.syntax_list.emplace_back()};
@ -388,7 +388,7 @@ IR::Program GenerateLayerPassthrough(ObjectPool<IR::Inst>& inst_pool,
ir.SetAttribute(attr + 3, ir.GetAttribute(attr + 3, ir.Imm32(i)), ir.Imm32(0)); ir.SetAttribute(attr + 3, ir.GetAttribute(attr + 3, ir.Imm32(i)), ir.Imm32(0));
// Assign layer // Assign layer
ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(source_program.emulated_layer), ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(source_program.info.emulated_layer),
ir.Imm32(0)); ir.Imm32(0));
// Emit vertex // Emit vertex

View File

@ -28,7 +28,7 @@ void ConvertLegacyToGeneric(IR::Program& program, const RuntimeInfo& runtime_inf
// Maxwell v1 and older Nvidia cards don't support setting gl_Layer from non-geometry stages. // Maxwell v1 and older Nvidia cards don't support setting gl_Layer from non-geometry stages.
// This creates a workaround by setting the layer as a generic output and creating a // This creates a workaround by setting the layer as a generic output and creating a
// passthrough geometry shader that reads the generic and sets the layer. // passthrough geometry shader that reads the generic and sets the layer.
[[nodiscard]] IR::Program GenerateLayerPassthrough(ObjectPool<IR::Inst>& inst_pool, [[nodiscard]] IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
ObjectPool<IR::Block>& block_pool, ObjectPool<IR::Block>& block_pool,
const HostTranslateInfo& host_info, const HostTranslateInfo& host_info,
IR::Program& source_program, IR::Program& source_program,

View File

@ -14,7 +14,7 @@ struct HostTranslateInfo {
bool support_int64{}; ///< True when the device supports 64-bit integers bool support_int64{}; ///< True when the device supports 64-bit integers
bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered
bool support_snorm_render_buffer{}; ///< True when the device supports SNORM render buffers bool support_snorm_render_buffer{}; ///< True when the device supports SNORM render buffers
bool requires_layer_emulation{}; ///< True when the device doesn't support gl_Layer in VS bool support_viewport_index_layer{}; ///< True when the device supports gl_Layer in VS
}; };
} // namespace Shader } // namespace Shader

View File

@ -39,12 +39,12 @@ static bool PermittedProgramStage(Stage stage) {
} }
void LayerPass(IR::Program& program, const HostTranslateInfo& host_info) { void LayerPass(IR::Program& program, const HostTranslateInfo& host_info) {
if (!host_info.requires_layer_emulation || !PermittedProgramStage(program.stage)) { if (host_info.support_viewport_index_layer || !PermittedProgramStage(program.stage)) {
return; return;
} }
const auto end{program.post_order_blocks.end()}; const auto end{program.post_order_blocks.end()};
const auto emulated_layer = EmulatedLayerAttribute(program.info.stores); const auto layer_attribute = EmulatedLayerAttribute(program.info.stores);
bool requires_layer_emulation = false; bool requires_layer_emulation = false;
for (auto block = program.post_order_blocks.begin(); block != end; ++block) { for (auto block = program.post_order_blocks.begin(); block != end; ++block) {
@ -52,16 +52,16 @@ void LayerPass(IR::Program& program, const HostTranslateInfo& host_info) {
if (inst.GetOpcode() == IR::Opcode::SetAttribute && if (inst.GetOpcode() == IR::Opcode::SetAttribute &&
inst.Arg(0).Attribute() == IR::Attribute::Layer) { inst.Arg(0).Attribute() == IR::Attribute::Layer) {
requires_layer_emulation = true; requires_layer_emulation = true;
inst.SetArg(0, IR::Value{emulated_layer}); inst.SetArg(0, IR::Value{layer_attribute});
} }
} }
} }
if (requires_layer_emulation) { if (requires_layer_emulation) {
program.requires_layer_emulation = true; program.info.requires_layer_emulation = true;
program.emulated_layer = emulated_layer; program.info.emulated_layer = layer_attribute;
program.info.stores.Set(IR::Attribute::Layer, false); program.info.stores.Set(IR::Attribute::Layer, false);
program.info.stores.Set(emulated_layer, true); program.info.stores.Set(layer_attribute, true);
} }
} }

View File

@ -204,6 +204,9 @@ struct Info {
u32 nvn_buffer_base{}; u32 nvn_buffer_base{};
std::bitset<16> nvn_buffer_used{}; std::bitset<16> nvn_buffer_used{};
bool requires_layer_emulation{};
IR::Attribute emulated_layer{};
boost::container::static_vector<ConstantBufferDescriptor, MAX_CBUFS> boost::container::static_vector<ConstantBufferDescriptor, MAX_CBUFS>
constant_buffer_descriptors; constant_buffer_descriptors;
boost::container::static_vector<StorageBufferDescriptor, MAX_SSBOS> storage_buffers_descriptors; boost::container::static_vector<StorageBufferDescriptor, MAX_SSBOS> storage_buffers_descriptors;

View File

@ -126,7 +126,6 @@ void Maxwell3D::InitializeRegisterDefaults() {
draw_command[MAXWELL3D_REG_INDEX(draw_inline_index)] = true; draw_command[MAXWELL3D_REG_INDEX(draw_inline_index)] = true;
draw_command[MAXWELL3D_REG_INDEX(inline_index_2x16.even)] = true; draw_command[MAXWELL3D_REG_INDEX(inline_index_2x16.even)] = true;
draw_command[MAXWELL3D_REG_INDEX(inline_index_4x8.index0)] = true; draw_command[MAXWELL3D_REG_INDEX(inline_index_4x8.index0)] = true;
draw_command[MAXWELL3D_REG_INDEX(draw.instance_id)] = true;
} }
void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) { void Maxwell3D::ProcessMacro(u32 method, const u32* base_start, u32 amount, bool is_last_call) {
@ -218,16 +217,19 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
regs.index_buffer.count = regs.index_buffer32_first.count; regs.index_buffer.count = regs.index_buffer32_first.count;
regs.index_buffer.first = regs.index_buffer32_first.first; regs.index_buffer.first = regs.index_buffer32_first.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
draw_indexed = true;
return ProcessDraw(); return ProcessDraw();
case MAXWELL3D_REG_INDEX(index_buffer16_first): case MAXWELL3D_REG_INDEX(index_buffer16_first):
regs.index_buffer.count = regs.index_buffer16_first.count; regs.index_buffer.count = regs.index_buffer16_first.count;
regs.index_buffer.first = regs.index_buffer16_first.first; regs.index_buffer.first = regs.index_buffer16_first.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
draw_indexed = true;
return ProcessDraw(); return ProcessDraw();
case MAXWELL3D_REG_INDEX(index_buffer8_first): case MAXWELL3D_REG_INDEX(index_buffer8_first):
regs.index_buffer.count = regs.index_buffer8_first.count; regs.index_buffer.count = regs.index_buffer8_first.count;
regs.index_buffer.first = regs.index_buffer8_first.first; regs.index_buffer.first = regs.index_buffer8_first.first;
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
draw_indexed = true;
return ProcessDraw(); return ProcessDraw();
case MAXWELL3D_REG_INDEX(topology_override): case MAXWELL3D_REG_INDEX(topology_override):
use_topology_override = true; use_topology_override = true;
@ -300,21 +302,33 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
draw_mode = DrawMode::InlineIndex; draw_mode = DrawMode::InlineIndex;
}; };
switch (method) { switch (method) {
case MAXWELL3D_REG_INDEX(draw.begin): {
draw_mode =
(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)
? DrawMode::Instance
: DrawMode::General;
break;
}
case MAXWELL3D_REG_INDEX(draw.end): case MAXWELL3D_REG_INDEX(draw.end):
switch (draw_mode) { switch (draw_mode) {
case DrawMode::General: case DrawMode::General:
ProcessDraw(1); ProcessDraw();
break; break;
case DrawMode::InlineIndex: case DrawMode::InlineIndex:
regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4);
regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; regs.index_buffer.format = Regs::IndexFormat::UnsignedInt;
ProcessDraw(1); draw_indexed = true;
ProcessDraw();
inline_index_draw_indexes.clear(); inline_index_draw_indexes.clear();
break; break;
case DrawMode::Instance: case DrawMode::Instance:
break; break;
} }
break; break;
case MAXWELL3D_REG_INDEX(index_buffer.count):
draw_indexed = true;
break;
case MAXWELL3D_REG_INDEX(draw_inline_index): case MAXWELL3D_REG_INDEX(draw_inline_index):
update_inline_index(method_argument); update_inline_index(method_argument);
break; break;
@ -328,13 +342,6 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
update_inline_index(regs.inline_index_4x8.index2); update_inline_index(regs.inline_index_4x8.index2);
update_inline_index(regs.inline_index_4x8.index3); update_inline_index(regs.inline_index_4x8.index3);
break; break;
case MAXWELL3D_REG_INDEX(draw.instance_id):
draw_mode =
(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) ||
(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged)
? DrawMode::Instance
: DrawMode::General;
break;
} }
} else { } else {
ProcessDeferredDraw(); ProcessDeferredDraw();
@ -624,27 +631,16 @@ void Maxwell3D::ProcessClearBuffers(u32 layer_count) {
void Maxwell3D::ProcessDraw(u32 instance_count) { void Maxwell3D::ProcessDraw(u32 instance_count) {
LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(), LOG_TRACE(HW_GPU, "called, topology={}, count={}", regs.draw.topology.Value(),
regs.vertex_buffer.count); draw_indexed ? regs.index_buffer.count : regs.vertex_buffer.count);
ASSERT_MSG(!(regs.index_buffer.count && regs.vertex_buffer.count), "Both indexed and direct?");
// Both instance configuration registers can not be set at the same time.
ASSERT_MSG(regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::First ||
regs.draw.instance_id != Maxwell3D::Regs::Draw::InstanceId::Unchanged,
"Illegal combination of instancing parameters");
ProcessTopologyOverride(); ProcessTopologyOverride();
const bool is_indexed = regs.index_buffer.count && !regs.vertex_buffer.count;
if (ShouldExecute()) { if (ShouldExecute()) {
rasterizer->Draw(is_indexed, instance_count); rasterizer->Draw(draw_indexed, instance_count);
} }
if (is_indexed) { draw_indexed = false;
regs.index_buffer.count = 0; deferred_draw_method.clear();
} else {
regs.vertex_buffer.count = 0;
}
} }
void Maxwell3D::ProcessDeferredDraw() { void Maxwell3D::ProcessDeferredDraw() {
@ -667,8 +663,6 @@ void Maxwell3D::ProcessDeferredDraw() {
ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), "Instance both indexed and direct?"); ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), "Instance both indexed and direct?");
ProcessDraw(instance_count); ProcessDraw(instance_count);
deferred_draw_method.clear();
} }
} // namespace Tegra::Engines } // namespace Tegra::Engines

View File

@ -3159,6 +3159,7 @@ private:
std::vector<u32> deferred_draw_method; std::vector<u32> deferred_draw_method;
enum class DrawMode : u32 { General = 0, Instance, InlineIndex }; enum class DrawMode : u32 { General = 0, Instance, InlineIndex };
DrawMode draw_mode{DrawMode::General}; DrawMode draw_mode{DrawMode::General};
bool draw_indexed{};
}; };
#define ASSERT_REG_POSITION(field_name, position) \ #define ASSERT_REG_POSITION(field_name, position) \

View File

@ -39,7 +39,7 @@ using Shader::Backend::GLASM::EmitGLASM;
using Shader::Backend::GLSL::EmitGLSL; using Shader::Backend::GLSL::EmitGLSL;
using Shader::Backend::SPIRV::EmitSPIRV; using Shader::Backend::SPIRV::EmitSPIRV;
using Shader::Maxwell::ConvertLegacyToGeneric; using Shader::Maxwell::ConvertLegacyToGeneric;
using Shader::Maxwell::GenerateLayerPassthrough; using Shader::Maxwell::GenerateGeometryPassthrough;
using Shader::Maxwell::MergeDualVertexPrograms; using Shader::Maxwell::MergeDualVertexPrograms;
using Shader::Maxwell::TranslateProgram; using Shader::Maxwell::TranslateProgram;
using VideoCommon::ComputeEnvironment; using VideoCommon::ComputeEnvironment;
@ -232,7 +232,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
.support_int64 = device.HasShaderInt64(), .support_int64 = device.HasShaderInt64(),
.needs_demote_reorder = device.IsAmd(), .needs_demote_reorder = device.IsAmd(),
.support_snorm_render_buffer = false, .support_snorm_render_buffer = false,
.requires_layer_emulation = !device.HasVertexViewportLayer(), .support_viewport_index_layer = device.HasVertexViewportLayer(),
} { } {
if (use_asynchronous_shaders) { if (use_asynchronous_shaders) {
workers = CreateWorkers(); workers = CreateWorkers();
@ -435,7 +435,7 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
index == static_cast<u32>(Maxwell::ShaderType::Geometry); index == static_cast<u32>(Maxwell::ShaderType::Geometry);
if (key.unique_hashes[index] == 0 && is_emulated_stage) { if (key.unique_hashes[index] == 0 && is_emulated_stage) {
auto topology = MaxwellToOutputTopology(key.gs_input_topology); auto topology = MaxwellToOutputTopology(key.gs_input_topology);
programs[index] = GenerateLayerPassthrough(pools.inst, pools.block, host_info, programs[index] = GenerateGeometryPassthrough(pools.inst, pools.block, host_info,
*layer_source_program, topology); *layer_source_program, topology);
continue; continue;
} }
@ -467,7 +467,7 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
programs[index] = MergeDualVertexPrograms(program_va, program_vb, env); programs[index] = MergeDualVertexPrograms(program_va, program_vb, env);
} }
if (programs[index].requires_layer_emulation) { if (programs[index].info.requires_layer_emulation) {
layer_source_program = &programs[index]; layer_source_program = &programs[index];
} }
} }

View File

@ -46,7 +46,7 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
namespace { namespace {
using Shader::Backend::SPIRV::EmitSPIRV; using Shader::Backend::SPIRV::EmitSPIRV;
using Shader::Maxwell::ConvertLegacyToGeneric; using Shader::Maxwell::ConvertLegacyToGeneric;
using Shader::Maxwell::GenerateLayerPassthrough; using Shader::Maxwell::GenerateGeometryPassthrough;
using Shader::Maxwell::MergeDualVertexPrograms; using Shader::Maxwell::MergeDualVertexPrograms;
using Shader::Maxwell::TranslateProgram; using Shader::Maxwell::TranslateProgram;
using VideoCommon::ComputeEnvironment; using VideoCommon::ComputeEnvironment;
@ -339,7 +339,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device
.needs_demote_reorder = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY_KHR || .needs_demote_reorder = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY_KHR ||
driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR, driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR,
.support_snorm_render_buffer = true, .support_snorm_render_buffer = true,
.requires_layer_emulation = !device.IsExtShaderViewportIndexLayerSupported(), .support_viewport_index_layer = device.IsExtShaderViewportIndexLayerSupported(),
}; };
} }
@ -531,7 +531,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
index == static_cast<u32>(Maxwell::ShaderType::Geometry); index == static_cast<u32>(Maxwell::ShaderType::Geometry);
if (key.unique_hashes[index] == 0 && is_emulated_stage) { if (key.unique_hashes[index] == 0 && is_emulated_stage) {
auto topology = MaxwellToOutputTopology(key.state.topology); auto topology = MaxwellToOutputTopology(key.state.topology);
programs[index] = GenerateLayerPassthrough(pools.inst, pools.block, host_info, programs[index] = GenerateGeometryPassthrough(pools.inst, pools.block, host_info,
*layer_source_program, topology); *layer_source_program, topology);
continue; continue;
} }
@ -556,7 +556,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
programs[index] = MergeDualVertexPrograms(program_va, program_vb, env); programs[index] = MergeDualVertexPrograms(program_va, program_vb, env);
} }
if (programs[index].requires_layer_emulation) { if (programs[index].info.requires_layer_emulation) {
layer_source_program = &programs[index]; layer_source_program = &programs[index];
} }
} }