Ryujinx-ryubing/src/Ryujinx.Memory/SparseMemoryBlock.cs
LotP1 68525ab7f1 JIT Sparse Function Table (#250)
More up to date build of the JIT Sparse PR for continued development.
JIT Sparse Function Table was originally developed by riperiperi for the
original Ryujinx project, and decreased the amount of layers in the
Function Table structure, to decrease lookup times at the cost of
slightly higher RAM usage.
This PR rebalances the JIT Sparse Function Table to be a bit more RAM
intensive, but faster in workloads where the JIT Function Table is a
bottleneck. Faster RAM will see a bigger impact and slower RAM (DDR3 and
potentially slow DDR4) will see a slight performance decrease.
This PR also implements a base for a PPTC profile system that could
allow for PPTC with ExeFS mods enabled in the future.
This PR also potentially fixes a strange issue where Avalonia would time
out in some rare instances, e.g. when running ExeFS mods with TotK and a
strange controller configuration.

---------

Co-authored-by: Evan Husted <gr33m11@gmail.com>
2024-12-19 00:26:45 -06:00

126 lines
3.9 KiB
C#

using Ryujinx.Common;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
namespace Ryujinx.Memory
{
public delegate void PageInitDelegate(Span<byte> page);
public class SparseMemoryBlock : IDisposable
{
private const ulong MapGranularity = 1UL << 17;
private readonly PageInitDelegate _pageInit;
private readonly object _lock = new object();
private readonly ulong _pageSize;
private readonly MemoryBlock _reservedBlock;
private readonly List<MemoryBlock> _mappedBlocks;
private ulong _mappedBlockUsage;
private readonly ulong[] _mappedPageBitmap;
public MemoryBlock Block => _reservedBlock;
public SparseMemoryBlock(ulong size, PageInitDelegate pageInit, MemoryBlock fill)
{
_pageSize = MemoryBlock.GetPageSize();
_reservedBlock = new MemoryBlock(size, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible);
_mappedBlocks = new List<MemoryBlock>();
_pageInit = pageInit;
int pages = (int)BitUtils.DivRoundUp(size, _pageSize);
int bitmapEntries = BitUtils.DivRoundUp(pages, 64);
_mappedPageBitmap = new ulong[bitmapEntries];
if (fill != null)
{
// Fill the block with mappings from the fill block.
if (fill.Size % _pageSize != 0)
{
throw new ArgumentException("Fill memory block should be page aligned.", nameof(fill));
}
int repeats = (int)BitUtils.DivRoundUp(size, fill.Size);
ulong offset = 0;
for (int i = 0; i < repeats; i++)
{
_reservedBlock.MapView(fill, 0, offset, Math.Min(fill.Size, size - offset));
offset += fill.Size;
}
}
// If a fill block isn't provided, the pages that aren't EnsureMapped are unmapped.
// The caller can rely on signal handler to fill empty pages instead.
}
private void MapPage(ulong pageOffset)
{
// Take a page from the latest mapped block.
MemoryBlock block = _mappedBlocks.LastOrDefault();
if (block == null || _mappedBlockUsage == MapGranularity)
{
// Need to map some more memory.
block = new MemoryBlock(MapGranularity, MemoryAllocationFlags.Mirrorable);
_mappedBlocks.Add(block);
_mappedBlockUsage = 0;
}
_pageInit(block.GetSpan(_mappedBlockUsage, (int)_pageSize));
_reservedBlock.MapView(block, _mappedBlockUsage, pageOffset, _pageSize);
_mappedBlockUsage += _pageSize;
}
public void EnsureMapped(ulong offset)
{
int pageIndex = (int)(offset / _pageSize);
int bitmapIndex = pageIndex >> 6;
ref ulong entry = ref _mappedPageBitmap[bitmapIndex];
ulong bit = 1UL << (pageIndex & 63);
if ((Volatile.Read(ref entry) & bit) == 0)
{
// Not mapped.
lock (_lock)
{
// Check the bit while locked to make sure that this only happens once.
ulong lockedEntry = Volatile.Read(ref entry);
if ((lockedEntry & bit) == 0)
{
MapPage(offset & ~(_pageSize - 1));
lockedEntry |= bit;
Interlocked.Exchange(ref entry, lockedEntry);
}
}
}
}
public void Dispose()
{
_reservedBlock.Dispose();
foreach (MemoryBlock block in _mappedBlocks)
{
block.Dispose();
}
GC.SuppressFinalize(this);
}
}
}