2023-05-01 21:05:12 +02:00
|
|
|
using OpenTK.Graphics.OpenGL;
|
2021-07-16 23:10:20 +02:00
|
|
|
using Ryujinx.Common.Logging;
|
|
|
|
using Ryujinx.Graphics.GAL;
|
|
|
|
using Ryujinx.Graphics.OpenGL.Image;
|
2023-03-04 14:43:08 +01:00
|
|
|
using System;
|
2023-05-01 21:05:12 +02:00
|
|
|
using System.Collections.Generic;
|
2023-03-04 14:43:08 +01:00
|
|
|
using System.Runtime.CompilerServices;
|
|
|
|
using System.Runtime.InteropServices;
|
2021-07-16 23:10:20 +02:00
|
|
|
|
|
|
|
namespace Ryujinx.Graphics.OpenGL
|
|
|
|
{
|
|
|
|
class PersistentBuffers : IDisposable
|
|
|
|
{
|
2023-06-28 18:10:55 +02:00
|
|
|
private readonly PersistentBuffer _main = new();
|
|
|
|
private readonly PersistentBuffer _background = new();
|
2021-07-16 23:10:20 +02:00
|
|
|
|
2023-06-28 18:10:55 +02:00
|
|
|
private readonly Dictionary<BufferHandle, IntPtr> _maps = new();
|
2023-05-01 21:05:12 +02:00
|
|
|
|
2021-07-16 23:10:20 +02:00
|
|
|
public PersistentBuffer Default => BackgroundContextWorker.InBackground ? _background : _main;
|
|
|
|
|
|
|
|
public void Dispose()
|
|
|
|
{
|
|
|
|
_main?.Dispose();
|
|
|
|
_background?.Dispose();
|
|
|
|
}
|
2023-05-01 21:05:12 +02:00
|
|
|
|
|
|
|
public void Map(BufferHandle handle, int size)
|
|
|
|
{
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, handle.ToInt32());
|
|
|
|
IntPtr ptr = GL.MapBufferRange(BufferTarget.CopyWriteBuffer, IntPtr.Zero, size, BufferAccessMask.MapReadBit | BufferAccessMask.MapPersistentBit);
|
|
|
|
|
|
|
|
_maps[handle] = ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
public void Unmap(BufferHandle handle)
|
|
|
|
{
|
|
|
|
if (_maps.ContainsKey(handle))
|
|
|
|
{
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, handle.ToInt32());
|
|
|
|
GL.UnmapBuffer(BufferTarget.CopyWriteBuffer);
|
|
|
|
|
|
|
|
_maps.Remove(handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
public bool TryGet(BufferHandle handle, out IntPtr ptr)
|
|
|
|
{
|
|
|
|
return _maps.TryGetValue(handle, out ptr);
|
|
|
|
}
|
2021-07-16 23:10:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
class PersistentBuffer : IDisposable
|
|
|
|
{
|
|
|
|
private IntPtr _bufferMap;
|
|
|
|
private int _copyBufferHandle;
|
|
|
|
private int _copyBufferSize;
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
private byte[] _data;
|
|
|
|
private IntPtr _dataMap;
|
|
|
|
|
2021-07-16 23:10:20 +02:00
|
|
|
private void EnsureBuffer(int requiredSize)
|
|
|
|
{
|
|
|
|
if (_copyBufferSize < requiredSize && _copyBufferHandle != 0)
|
|
|
|
{
|
|
|
|
GL.DeleteBuffer(_copyBufferHandle);
|
|
|
|
|
|
|
|
_copyBufferHandle = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_copyBufferHandle == 0)
|
|
|
|
{
|
|
|
|
_copyBufferHandle = GL.GenBuffer();
|
|
|
|
_copyBufferSize = requiredSize;
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, _copyBufferHandle);
|
|
|
|
GL.BufferStorage(BufferTarget.CopyWriteBuffer, requiredSize, IntPtr.Zero, BufferStorageFlags.MapReadBit | BufferStorageFlags.MapPersistentBit);
|
|
|
|
|
|
|
|
_bufferMap = GL.MapBufferRange(BufferTarget.CopyWriteBuffer, IntPtr.Zero, requiredSize, BufferAccessMask.MapReadBit | BufferAccessMask.MapPersistentBit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
public unsafe IntPtr GetHostArray(int requiredSize)
|
|
|
|
{
|
|
|
|
if (_data == null || _data.Length < requiredSize)
|
|
|
|
{
|
|
|
|
_data = GC.AllocateUninitializedArray<byte>(requiredSize, true);
|
|
|
|
|
|
|
|
_dataMap = (IntPtr)Unsafe.AsPointer(ref MemoryMarshal.GetArrayDataReference(_data));
|
|
|
|
}
|
|
|
|
|
|
|
|
return _dataMap;
|
|
|
|
}
|
|
|
|
|
2023-06-28 18:10:55 +02:00
|
|
|
private static void Sync()
|
2021-07-16 23:10:20 +02:00
|
|
|
{
|
|
|
|
GL.MemoryBarrier(MemoryBarrierFlags.ClientMappedBufferBarrierBit);
|
|
|
|
|
|
|
|
IntPtr sync = GL.FenceSync(SyncCondition.SyncGpuCommandsComplete, WaitSyncFlags.None);
|
|
|
|
WaitSyncStatus syncResult = GL.ClientWaitSync(sync, ClientWaitSyncFlags.SyncFlushCommandsBit, 1000000000);
|
|
|
|
|
|
|
|
if (syncResult == WaitSyncStatus.TimeoutExpired)
|
|
|
|
{
|
|
|
|
Logger.Error?.PrintMsg(LogClass.Gpu, $"Failed to sync persistent buffer state within 1000ms. Continuing...");
|
|
|
|
}
|
|
|
|
|
|
|
|
GL.DeleteSync(sync);
|
|
|
|
}
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
public unsafe ReadOnlySpan<byte> GetTextureData(TextureView view, int size)
|
2021-07-16 23:10:20 +02:00
|
|
|
{
|
|
|
|
EnsureBuffer(size);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.PixelPackBuffer, _copyBufferHandle);
|
|
|
|
|
|
|
|
view.WriteToPbo(0, false);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.PixelPackBuffer, 0);
|
|
|
|
|
|
|
|
Sync();
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
return new ReadOnlySpan<byte>(_bufferMap.ToPointer(), size);
|
2021-07-16 23:10:20 +02:00
|
|
|
}
|
|
|
|
|
2022-01-09 17:28:48 +01:00
|
|
|
public unsafe ReadOnlySpan<byte> GetTextureData(TextureView view, int size, int layer, int level)
|
|
|
|
{
|
|
|
|
EnsureBuffer(size);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.PixelPackBuffer, _copyBufferHandle);
|
|
|
|
|
|
|
|
int offset = view.WriteToPbo2D(0, layer, level);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.PixelPackBuffer, 0);
|
|
|
|
|
|
|
|
Sync();
|
|
|
|
|
2023-06-28 18:10:55 +02:00
|
|
|
return new ReadOnlySpan<byte>(_bufferMap.ToPointer(), size)[offset..];
|
2022-01-09 17:28:48 +01:00
|
|
|
}
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
public unsafe ReadOnlySpan<byte> GetBufferData(BufferHandle buffer, int offset, int size)
|
2021-07-16 23:10:20 +02:00
|
|
|
{
|
|
|
|
EnsureBuffer(size);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.CopyReadBuffer, buffer.ToInt32());
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, _copyBufferHandle);
|
|
|
|
|
|
|
|
GL.CopyBufferSubData(BufferTarget.CopyReadBuffer, BufferTarget.CopyWriteBuffer, (IntPtr)offset, IntPtr.Zero, size);
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, 0);
|
|
|
|
|
|
|
|
Sync();
|
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-20 00:10:54 +02:00
|
|
|
return new ReadOnlySpan<byte>(_bufferMap.ToPointer(), size);
|
2021-07-16 23:10:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void Dispose()
|
|
|
|
{
|
|
|
|
if (_copyBufferHandle != 0)
|
|
|
|
{
|
|
|
|
GL.DeleteBuffer(_copyBufferHandle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|