diff --git a/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs b/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
index a807eed51..5610b7749 100644
--- a/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
+++ b/src/ARMeilleure/Instructions/InstEmitMemoryHelper.cs
@@ -157,7 +157,7 @@ namespace ARMeilleure.Instructions
context.Copy(temp, value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -198,7 +198,7 @@ namespace ARMeilleure.Instructions
SetInt(context, rt, value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -265,7 +265,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(rt), value);
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
break;
}
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -385,7 +385,7 @@ namespace ARMeilleure.Instructions
break;
}
- if (!context.Memory.Type.IsHostMapped())
+ if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@@ -403,6 +403,21 @@ namespace ARMeilleure.Instructions
{
return EmitHostMappedPointer(context, address);
}
+ else if (context.Memory.Type == MemoryManagerType.HostTracked)
+ {
+ Operand ptBase = !context.HasPtc
+ ? Const(context.Memory.PageTablePointer.ToInt64())
+ : Const(context.Memory.PageTablePointer.ToInt64(), Ptc.PageTableSymbol);
+
+ Operand ptOffset = context.ShiftRightUI(address, Const(PageBits));
+
+ if (ptOffset.Type == OperandType.I32)
+ {
+ ptOffset = context.ZeroExtend32(OperandType.I64, ptOffset);
+ }
+
+ return context.Add(address, context.Load(OperandType.I64, context.Add(ptBase, context.ShiftLeft(ptOffset, Const(3)))));
+ }
int ptLevelBits = context.Memory.AddressSpaceBits - PageBits;
int ptLevelSize = 1 << ptLevelBits;
diff --git a/src/ARMeilleure/Memory/MemoryManagerType.cs b/src/ARMeilleure/Memory/MemoryManagerType.cs
index 1e656ba27..757322b4b 100644
--- a/src/ARMeilleure/Memory/MemoryManagerType.cs
+++ b/src/ARMeilleure/Memory/MemoryManagerType.cs
@@ -18,6 +18,12 @@ namespace ARMeilleure.Memory
///
SoftwarePageTable,
+ ///
+ /// High level implementation using a software flat page table for address translation,
+ /// no support for handling invalid or non-contiguous memory access.
+ ///
+ HostTracked,
+
///
/// High level implementation with mappings managed by the host OS, effectively using hardware
/// page tables. No address translation is performed in software and the memory is just accessed directly.
@@ -37,5 +43,10 @@ namespace ARMeilleure.Memory
{
return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
}
+
+ public static bool IsHostMappedOrTracked(this MemoryManagerType type)
+ {
+ return type == MemoryManagerType.HostTracked || type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
+ }
}
}
diff --git a/src/ARMeilleure/Signal/NativeSignalHandler.cs b/src/ARMeilleure/Signal/NativeSignalHandler.cs
index 31ec16cb1..3f0e9e4bf 100644
--- a/src/ARMeilleure/Signal/NativeSignalHandler.cs
+++ b/src/ARMeilleure/Signal/NativeSignalHandler.cs
@@ -1,4 +1,4 @@
-using ARMeilleure.IntermediateRepresentation;
+using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Memory;
using ARMeilleure.Translation;
using ARMeilleure.Translation.Cache;
diff --git a/src/ARMeilleure/Translation/Cache/JitUnwindWindows.cs b/src/ARMeilleure/Translation/Cache/JitUnwindWindows.cs
index 3957a7559..3c2a60a1a 100644
--- a/src/ARMeilleure/Translation/Cache/JitUnwindWindows.cs
+++ b/src/ARMeilleure/Translation/Cache/JitUnwindWindows.cs
@@ -114,7 +114,7 @@ namespace ARMeilleure.Translation.Cache
{
int stackOffset = entry.StackOffsetOrAllocSize;
- Debug.Assert(stackOffset % 16 == 0);
+ // Debug.Assert(stackOffset % 16 == 0);
if (stackOffset <= 0xFFFF0)
{
@@ -135,7 +135,7 @@ namespace ARMeilleure.Translation.Cache
{
int allocSize = entry.StackOffsetOrAllocSize;
- Debug.Assert(allocSize % 8 == 0);
+ // Debug.Assert(allocSize % 8 == 0);
if (allocSize <= 128)
{
diff --git a/src/ARMeilleure/Translation/Translator.cs b/src/ARMeilleure/Translation/Translator.cs
index dc18038ba..018332080 100644
--- a/src/ARMeilleure/Translation/Translator.cs
+++ b/src/ARMeilleure/Translation/Translator.cs
@@ -80,7 +80,7 @@ namespace ARMeilleure.Translation
FunctionTable.Fill = (ulong)Stubs.SlowDispatchStub;
- if (memory.Type.IsHostMapped())
+ if (memory.Type.IsHostMappedOrTracked())
{
NativeSignalHandler.InitializeSignalHandler(allocator.GetPageSize());
}
diff --git a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
index 4e3723d55..86936c592 100644
--- a/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
+++ b/src/Ryujinx.Cpu/AppleHv/HvMemoryBlockAllocator.cs
@@ -38,7 +38,7 @@ namespace Ryujinx.Cpu.AppleHv
private readonly HvIpaAllocator _ipaAllocator;
- public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
+ public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, ulong blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
{
_ipaAllocator = ipaAllocator;
}
diff --git a/src/Ryujinx.Cpu/Jit/AddressSpacePartition.cs b/src/Ryujinx.Cpu/Jit/AddressSpacePartition.cs
new file mode 100644
index 000000000..da3b2a3d2
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/AddressSpacePartition.cs
@@ -0,0 +1,612 @@
+using Ryujinx.Common;
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using System;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu.Jit
+{
+ readonly struct PrivateRange
+ {
+ public readonly MemoryBlock Memory;
+ public readonly ulong Offset;
+ public readonly ulong Size;
+
+ public static PrivateRange Empty => new(null, 0, 0);
+
+ public PrivateRange(MemoryBlock memory, ulong offset, ulong size)
+ {
+ Memory = memory;
+ Offset = offset;
+ Size = size;
+ }
+ }
+
+ class AddressSpacePartition : IDisposable
+ {
+ private const ulong GuestPageSize = 0x1000;
+
+ private const int DefaultBlockAlignment = 1 << 20;
+
+ private enum MappingType : byte
+ {
+ None,
+ Private,
+ }
+
+ private class Mapping : IntrusiveRedBlackTreeNode, IComparable
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public MappingType Type { get; private set; }
+
+ public Mapping(ulong address, ulong size, MappingType type)
+ {
+ Address = address;
+ Size = size;
+ Type = type;
+ }
+
+ public Mapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Mapping left = new(Address, leftSize, Type);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void UpdateState(MappingType newType)
+ {
+ Type = newType;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private class PrivateMapping : IntrusiveRedBlackTreeNode, IComparable
+ {
+ public ulong Address { get; private set; }
+ public ulong Size { get; private set; }
+ public ulong EndAddress => Address + Size;
+ public PrivateMemoryAllocation PrivateAllocation { get; private set; }
+
+ public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
+ {
+ if (size == 0)
+ {
+ throw new Exception("huh? size is 0");
+ }
+ Address = address;
+ Size = size;
+ PrivateAllocation = privateAllocation;
+ }
+
+ public PrivateMapping Split(ulong splitAddress)
+ {
+ ulong leftSize = splitAddress - Address;
+ ulong rightSize = EndAddress - splitAddress;
+
+ Debug.Assert(leftSize > 0);
+ Debug.Assert(rightSize > 0);
+
+ (var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
+
+ PrivateMapping left = new(Address, leftSize, leftAllocation);
+
+ Address = splitAddress;
+ Size = rightSize;
+
+ return left;
+ }
+
+ public void Map(AddressSpacePartitionAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation)
+ {
+ baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size);
+ PrivateAllocation = newAllocation;
+ }
+
+ public void Unmap(AddressSpacePartitionAllocation baseBlock, ulong baseAddress)
+ {
+ if (PrivateAllocation.IsValid)
+ {
+ baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size);
+ PrivateAllocation.Dispose();
+ }
+
+ PrivateAllocation = default;
+ }
+
+ public void Extend(ulong sizeDelta)
+ {
+ Size += sizeDelta;
+ }
+
+ public int CompareTo(PrivateMapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly AddressSpacePartitionAllocation _baseMemory;
+ private readonly PrivateMemoryAllocator _privateMemoryAllocator;
+ private readonly IntrusiveRedBlackTree _mappingTree;
+ private readonly IntrusiveRedBlackTree _privateTree;
+
+ private readonly object _treeLock;
+
+ private readonly ulong _hostPageSize;
+
+ private ulong? _firstPagePa;
+ private ulong? _lastPagePa;
+ private ulong _cachedFirstPagePa;
+ private ulong _cachedLastPagePa;
+ private bool _hasBridgeAtEnd;
+ private MemoryPermission _lastPageProtection;
+
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddress => Address + Size;
+
+ public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size)
+ {
+ _privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable);
+ _mappingTree = new IntrusiveRedBlackTree();
+ _privateTree = new IntrusiveRedBlackTree();
+ _treeLock = new object();
+
+ _mappingTree.Add(new Mapping(address, size, MappingType.None));
+ _privateTree.Add(new PrivateMapping(address, size, default));
+
+ _hostPageSize = MemoryBlock.GetPageSize();
+
+ _backingMemory = backingMemory;
+ _baseMemory = baseMemory;
+
+ _cachedFirstPagePa = ulong.MaxValue;
+ _cachedLastPagePa = ulong.MaxValue;
+ _lastPageProtection = MemoryPermission.ReadAndWrite;
+
+ Address = address;
+ Size = size;
+ }
+
+ public bool IsEmpty()
+ {
+ lock (_treeLock)
+ {
+ Mapping map = _mappingTree.GetNode(new Mapping(Address, Size, MappingType.None));
+
+ return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None;
+ }
+ }
+
+ public void Map(ulong va, ulong pa, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ if (va == Address)
+ {
+ _firstPagePa = pa;
+ }
+
+ if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
+ {
+ _lastPagePa = pa + ((EndAddress - GuestPageSize) - va);
+ }
+
+ lock (_treeLock)
+ {
+ Update(va, pa, size, MappingType.Private);
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ if (va == Address)
+ {
+ _firstPagePa = null;
+ }
+
+ if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
+ {
+ _lastPagePa = null;
+ }
+
+ lock (_treeLock)
+ {
+ Update(va, 0UL, size, MappingType.None);
+ }
+ }
+
+ public void Reprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ _baseMemory.Reprotect(va - Address, size, protection, false);
+
+ if (va == EndAddress - _hostPageSize)
+ {
+ // Protections at the last page also applies to the bridge, if we have one.
+ // (This is because last page access is always done on the bridge, not on our base mapping,
+ // for the cases where access crosses a page boundary and reaches the non-contiguous next mapping).
+
+ if (_hasBridgeAtEnd)
+ {
+ _baseMemory.Reprotect(Size, size, protection, false);
+ }
+
+ _lastPageProtection = protection;
+ }
+ }
+
+ public IntPtr GetPointer(ulong va, ulong size)
+ {
+ Debug.Assert(va >= Address);
+ Debug.Assert(va + size <= EndAddress);
+
+ if (va >= EndAddress - _hostPageSize && _hasBridgeAtEnd)
+ {
+ return _baseMemory.GetPointer(Size + va - (EndAddress - _hostPageSize), size);
+ }
+
+ return _baseMemory.GetPointer(va - Address, size);
+ }
+
+ public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, Action updatePtCallback)
+ {
+ ulong firstPagePa = partitionAfter._firstPagePa.HasValue ? partitionAfter._firstPagePa.Value : ulong.MaxValue;
+ ulong lastPagePa = _lastPagePa.HasValue ? _lastPagePa.Value : ulong.MaxValue;
+
+ if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa)
+ {
+ if (partitionAfter._firstPagePa.HasValue && _lastPagePa.HasValue)
+ {
+ (MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset();
+ (MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
+
+ _baseMemory.MapView(lastPageMemory, lastPageOffset, Size, _hostPageSize);
+ _baseMemory.MapView(firstPageMemory, firstPageOffset, Size + _hostPageSize, _hostPageSize);
+
+ _baseMemory.Reprotect(Size, _hostPageSize, _lastPageProtection, false);
+
+ updatePtCallback(EndAddress - _hostPageSize, _baseMemory.GetPointer(Size, _hostPageSize), _hostPageSize);
+
+ _hasBridgeAtEnd = true;
+ }
+ else
+ {
+ if (_lastPagePa.HasValue)
+ {
+ (MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
+
+ updatePtCallback(EndAddress - _hostPageSize, lastPageMemory.GetPointer(lastPageOffset, _hostPageSize), _hostPageSize);
+ }
+
+ _hasBridgeAtEnd = false;
+ }
+
+ _cachedFirstPagePa = firstPagePa;
+ _cachedLastPagePa = lastPagePa;
+ }
+ }
+
+ public void RemoveBridgeFromEnd(Action updatePtCallback)
+ {
+ if (_lastPagePa.HasValue)
+ {
+ (MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
+
+ updatePtCallback(EndAddress - _hostPageSize, lastPageMemory.GetPointer(lastPageOffset, _hostPageSize), _hostPageSize);
+ }
+
+ _cachedFirstPagePa = ulong.MaxValue;
+ _cachedLastPagePa = ulong.MaxValue;
+
+ _hasBridgeAtEnd = false;
+ }
+
+ private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset()
+ {
+ lock (_treeLock)
+ {
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(Address, 1UL, default));
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address));
+ }
+ }
+
+ return (_backingMemory, _firstPagePa.Value);
+ }
+
+ private (MemoryBlock, ulong) GetLastPageMemoryAndOffset()
+ {
+ lock (_treeLock)
+ {
+ ulong pageAddress = EndAddress - _hostPageSize;
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(pageAddress, 1UL, default));
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (pageAddress - map.Address));
+ }
+ }
+
+ return (_backingMemory, _lastPagePa.Value & ~(_hostPageSize - 1));
+ }
+
+ private void Update(ulong va, ulong pa, ulong size, MappingType type)
+ {
+ Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
+
+ Update(map, va, pa, size, type);
+ }
+
+ private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
+ {
+ ulong endAddress = va + size;
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.Address < va)
+ {
+ _mappingTree.Add(map.Split(va));
+ }
+
+ if (map.EndAddress > endAddress)
+ {
+ Mapping newMap = map.Split(endAddress);
+ _mappingTree.Add(newMap);
+ map = newMap;
+ }
+
+ switch (type)
+ {
+ case MappingType.None:
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ bool unmappedBefore = map.Predecessor == null ||
+ (map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment));
+
+ bool unmappedAfter = map.Successor == null ||
+ (map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment));
+
+ UnmapPrivate(va, size, unmappedBefore, unmappedAfter);
+ break;
+ case MappingType.Private:
+ MapPrivate(va, size);
+ break;
+ }
+
+ map.UpdateState(type);
+ map = TryCoalesce(map);
+
+ if (map.EndAddress >= endAddress)
+ {
+ break;
+ }
+ }
+
+ return map;
+ }
+
+ private Mapping TryCoalesce(Mapping map)
+ {
+ Mapping previousMap = map.Predecessor;
+ Mapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _mappingTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _mappingTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(Mapping left, Mapping right)
+ {
+ return left.Type == right.Type;
+ }
+
+ private void MapPrivate(ulong va, ulong size)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // Expand the range outwards based on page size to ensure that at least the requested region is mapped.
+ ulong vaAligned = BitUtils.AlignDown(va, alignment);
+ ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (!map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize()));
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter)
+ {
+ ulong endAddress = va + size;
+
+ ulong alignment = MemoryBlock.GetPageSize();
+
+ // If the adjacent mappings are unmapped, expand the range outwards,
+ // otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use.
+ ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment);
+ ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment);
+
+ if (endAddressAligned <= vaAligned)
+ {
+ return;
+ }
+
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(vaAligned, 1UL, default));
+
+ for (; map != null; map = map.Successor)
+ {
+ if (map.PrivateAllocation.IsValid)
+ {
+ if (map.Address < vaAligned)
+ {
+ _privateTree.Add(map.Split(vaAligned));
+ }
+
+ if (map.EndAddress > endAddressAligned)
+ {
+ PrivateMapping newMap = map.Split(endAddressAligned);
+ _privateTree.Add(newMap);
+ map = newMap;
+ }
+
+ map.Unmap(_baseMemory, Address);
+ map = TryCoalesce(map);
+ }
+
+ if (map.EndAddress >= endAddressAligned)
+ {
+ break;
+ }
+ }
+ }
+
+ private PrivateMapping TryCoalesce(PrivateMapping map)
+ {
+ PrivateMapping previousMap = map.Predecessor;
+ PrivateMapping nextMap = map.Successor;
+
+ if (previousMap != null && CanCoalesce(previousMap, map))
+ {
+ previousMap.Extend(map.Size);
+ _privateTree.Remove(map);
+ map = previousMap;
+ }
+
+ if (nextMap != null && CanCoalesce(map, nextMap))
+ {
+ map.Extend(nextMap.Size);
+ _privateTree.Remove(nextMap);
+ }
+
+ return map;
+ }
+
+ private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
+ {
+ return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
+ }
+
+ public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
+ {
+ lock (_treeLock)
+ {
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
+
+ nextVa = map.EndAddress;
+
+ if (map != null && map.PrivateAllocation.IsValid)
+ {
+ ulong startOffset = va - map.Address;
+
+ return new(
+ map.PrivateAllocation.Memory,
+ map.PrivateAllocation.Offset + startOffset,
+ Math.Min(map.PrivateAllocation.Size - startOffset, size));
+ }
+ }
+
+ return PrivateRange.Empty;
+ }
+
+ public bool HasPrivateAllocation(ulong va, ulong size)
+ {
+ lock (_treeLock)
+ {
+ PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, size, default));
+
+ return map != null && map.PrivateAllocation.IsValid;
+ }
+ }
+
+ public void Dispose()
+ {
+ GC.SuppressFinalize(this);
+
+ _privateMemoryAllocator?.Dispose();
+ _baseMemory.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/Jit/AddressSpacePartitionAllocator.cs b/src/Ryujinx.Cpu/Jit/AddressSpacePartitionAllocator.cs
new file mode 100644
index 000000000..f09f4e744
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/AddressSpacePartitionAllocator.cs
@@ -0,0 +1,174 @@
+using Ryujinx.Common.Collections;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+
+namespace Ryujinx.Cpu.Jit
+{
+ readonly struct AddressSpacePartitionAllocation : IDisposable
+ {
+ private readonly AddressSpacePartitionAllocator _owner;
+ private readonly PrivateMemoryAllocatorImpl.Allocation _allocation;
+
+ public IntPtr Pointer => (IntPtr)((ulong)_allocation.Block.Memory.Pointer + _allocation.Offset);
+
+ public AddressSpacePartitionAllocation(
+ AddressSpacePartitionAllocator owner,
+ PrivateMemoryAllocatorImpl.Allocation allocation)
+ {
+ _owner = owner;
+ _allocation = allocation;
+ }
+
+ public void RegisterMapping(ulong va, ulong endVa, int bridgeSize)
+ {
+ _allocation.Block.AddMapping(_allocation.Offset, _allocation.Size, va, endVa, bridgeSize);
+ }
+
+ public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
+ {
+ _allocation.Block.Memory.MapView(srcBlock, srcOffset, _allocation.Offset + dstOffset, size);
+ }
+
+ public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
+ {
+ _allocation.Block.Memory.UnmapView(srcBlock, _allocation.Offset + offset, size);
+ }
+
+ public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
+ {
+ _allocation.Block.Memory.Reprotect(_allocation.Offset + offset, size, permission, throwOnFail);
+ }
+
+ public IntPtr GetPointer(ulong offset, ulong size)
+ {
+ return _allocation.Block.Memory.GetPointer(_allocation.Offset + offset, size);
+ }
+
+ public void Dispose()
+ {
+ lock (_owner.Lock)
+ {
+ _allocation.Block.RemoveMapping(_allocation.Offset, _allocation.Size);
+ _owner.Free(_allocation.Block, _allocation.Offset, _allocation.Size);
+ }
+ }
+ }
+
+ class AddressSpacePartitionAllocator : PrivateMemoryAllocatorImpl
+ {
+ private const ulong DefaultBlockAlignment = 1UL << 32; // 4GB
+
+ public class Block : PrivateMemoryAllocator.Block
+ {
+ private readonly MemoryTracking _tracking;
+ private readonly MemoryEhMeilleure _memoryEh;
+
+ private class Mapping : IntrusiveRedBlackTreeNode, IComparable
+ {
+ public ulong Address { get; }
+ public ulong Size { get; }
+ public ulong EndAddress => Address + Size;
+ public ulong Va { get; }
+ public ulong EndVa { get; }
+ public int BridgeSize { get; }
+
+ public Mapping(ulong address, ulong size, ulong va, ulong endVa, int bridgeSize)
+ {
+ Address = address;
+ Size = size;
+ Va = va;
+ EndVa = endVa;
+ BridgeSize = bridgeSize;
+ }
+
+ public int CompareTo(Mapping other)
+ {
+ if (Address < other.Address)
+ {
+ return -1;
+ }
+ else if (Address <= other.EndAddress - 1UL)
+ {
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+ }
+ }
+
+ private readonly IntrusiveRedBlackTree _mappingTree;
+
+ public Block(MemoryTracking tracking, MemoryBlock memory, ulong size) : base(memory, size)
+ {
+ _tracking = tracking;
+ _memoryEh = new(memory, null, tracking, VirtualMemoryEvent);
+ _mappingTree = new();
+ }
+
+ public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa, int bridgeSize)
+ {
+ _mappingTree.Add(new(offset, size, va, endVa, bridgeSize));
+ }
+
+ public void RemoveMapping(ulong offset, ulong size)
+ {
+ _mappingTree.Remove(_mappingTree.GetNode(new Mapping(offset, size, 0, 0, 0)));
+ }
+
+ private bool VirtualMemoryEvent(ulong address, ulong size, bool write)
+ {
+ Mapping map = _mappingTree.GetNode(new Mapping(address, size, 0, 0, 0));
+
+ if (map == null)
+ {
+ return false;
+ }
+
+ address -= map.Address;
+
+ if (address >= (map.EndVa - map.Va))
+ {
+ address -= (ulong)(map.BridgeSize / 2);
+ }
+
+ return _tracking.VirtualMemoryEvent(map.Va + address, size, write);
+ }
+
+ public override void Destroy()
+ {
+ _memoryEh.Dispose();
+
+ base.Destroy();
+ }
+ }
+
+ private readonly MemoryTracking _tracking;
+
+ public object Lock { get; }
+
+ public AddressSpacePartitionAllocator(MemoryTracking tracking) : base(DefaultBlockAlignment, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible)
+ {
+ _tracking = tracking;
+ Lock = new();
+ }
+
+ public AddressSpacePartitionAllocation Allocate(ulong va, ulong size, int bridgeSize)
+ {
+ lock (Lock)
+ {
+ AddressSpacePartitionAllocation allocation = new(this, Allocate(size + (ulong)bridgeSize, MemoryBlock.GetPageSize(), CreateBlock));
+ allocation.RegisterMapping(va, va + size, bridgeSize);
+
+ return allocation;
+ }
+ }
+
+ private Block CreateBlock(MemoryBlock memory, ulong size)
+ {
+ return new Block(_tracking, memory, size);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Jit/AddressSpacePartitioned.cs b/src/Ryujinx.Cpu/Jit/AddressSpacePartitioned.cs
new file mode 100644
index 000000000..7ef008690
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/AddressSpacePartitioned.cs
@@ -0,0 +1,362 @@
+using Ryujinx.Common;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+
+namespace Ryujinx.Cpu.Jit
+{
+ class AddressSpacePartitioned : IDisposable
+ {
+ private const int PartitionBits = 25;
+ private const ulong PartitionSize = 1UL << PartitionBits;
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly List _partitions;
+ private readonly AddressSpacePartitionAllocator _asAllocator;
+ private readonly Action _updatePtCallback;
+
+ public AddressSpacePartitioned(MemoryTracking tracking, MemoryBlock backingMemory, Action updatePtCallback)
+ {
+ _backingMemory = backingMemory;
+ _partitions = new();
+ _asAllocator = new(tracking);
+ _updatePtCallback = updatePtCallback;
+ }
+
+ public void Map(ulong va, ulong pa, ulong size)
+ {
+ EnsurePartitions(va, size);
+
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ int partitionIndex = FindPartitionIndex(va);
+ AddressSpacePartition partition = _partitions[partitionIndex];
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ partition.Map(clampedVa, pa, clampedEndVa - clampedVa);
+
+ ulong currentSize = clampedEndVa - clampedVa;
+
+ va += currentSize;
+ pa += currentSize;
+
+ InsertBridgeIfNeeded(partitionIndex);
+ }
+ }
+
+ public void Unmap(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ int partitionIndex = FindPartitionIndex(va);
+ AddressSpacePartition partition = _partitions[partitionIndex];
+
+ if (partition == null)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ partition.Unmap(clampedVa, clampedEndVa - clampedVa);
+
+ va += clampedEndVa - clampedVa;
+
+ RemoveBridgeIfNeeded(partitionIndex);
+
+ if (partition.IsEmpty())
+ {
+ lock (_partitions)
+ {
+ _partitions.Remove(partition);
+ partition.Dispose();
+ }
+ }
+ }
+ }
+
+ public void Reprotect(ulong va, ulong size, MemoryPermission protection, MemoryTracking tracking)
+ {
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ partition.Reprotect(clampedVa, clampedEndVa - clampedVa, protection);
+
+ va += clampedEndVa - clampedVa;
+ }
+ }
+
+ public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ nextVa = (va & ~(PartitionSize - 1)) + PartitionSize;
+
+ return PrivateRange.Empty;
+ }
+
+ return partition.GetFirstPrivateAllocation(va, size, out nextVa);
+ }
+
+ public bool HasAnyPrivateAllocation(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+
+ while (va < endVa)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ if (partition == null)
+ {
+ va += PartitionSize - (va & (PartitionSize - 1));
+
+ continue;
+ }
+
+ (ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
+
+ if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa))
+ {
+ return true;
+ }
+
+ va += clampedEndVa - clampedVa;
+ }
+
+ return false;
+ }
+
+ private void InsertBridgeIfNeeded(int partitionIndex)
+ {
+ if (partitionIndex > 0 && _partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
+ {
+ _partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _updatePtCallback);
+ }
+
+ if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
+ {
+ _partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _updatePtCallback);
+ }
+ }
+
+ private void RemoveBridgeIfNeeded(int partitionIndex)
+ {
+ if (partitionIndex > 0 && _partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
+ {
+ _partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _updatePtCallback);
+ }
+
+ if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
+ {
+ _partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _updatePtCallback);
+ }
+ else
+ {
+ _partitions[partitionIndex].RemoveBridgeFromEnd(_updatePtCallback);
+ }
+ }
+
+ public IntPtr GetPointer(ulong va, ulong size)
+ {
+ AddressSpacePartition partition = FindPartition(va);
+
+ return partition.GetPointer(va, size);
+ }
+
+ private static (ulong, ulong) ClampRange(AddressSpacePartition partition, ulong va, ulong endVa)
+ {
+ if (va < partition.Address)
+ {
+ va = partition.Address;
+ }
+
+ if (endVa > partition.EndAddress)
+ {
+ endVa = partition.EndAddress;
+ }
+
+ return (va, endVa);
+ }
+
+ private void EnsurePartitions(ulong va, ulong size)
+ {
+ lock (_partitions)
+ {
+ EnsurePartitionsForRange(va, size);
+ }
+ }
+
+ private AddressSpacePartition FindPartition(ulong va)
+ {
+ lock (_partitions)
+ {
+ int index = FindPartitionIndex(va);
+ if (index >= 0)
+ {
+ return _partitions[index];
+ }
+ }
+
+ return null;
+ }
+
+ private int FindPartitionIndex(ulong va)
+ {
+ lock (_partitions)
+ {
+ int left = 0;
+ int middle = 0;
+ int right = _partitions.Count - 1;
+
+ while (left <= right)
+ {
+ middle = left + ((right - left) >> 1);
+
+ AddressSpacePartition partition = _partitions[middle];
+
+ if (partition.Address <= va && partition.EndAddress > va)
+ {
+ return middle;
+ }
+
+ if (partition.Address >= va)
+ {
+ right = middle - 1;
+ }
+ else
+ {
+ left = middle + 1;
+ }
+ }
+ }
+
+ return -1;
+ }
+
+ private void EnsurePartitionsForRange(ulong va, ulong size)
+ {
+ ulong endVa = BitUtils.AlignUp(va + size, PartitionSize);
+ va = BitUtils.AlignDown(va, PartitionSize);
+
+ for (int i = 0; i < _partitions.Count && va < endVa; i++)
+ {
+ AddressSpacePartition partition = _partitions[i];
+
+ if (partition.Address <= va && partition.EndAddress > va)
+ {
+ if (partition.EndAddress >= endVa)
+ {
+ // Fully mapped already.
+ va = endVa;
+
+ break;
+ }
+
+ ulong gapSize;
+
+ if (i + 1 < _partitions.Count)
+ {
+ AddressSpacePartition nextPartition = _partitions[i + 1];
+
+ if (partition.EndAddress == nextPartition.Address)
+ {
+ va = partition.EndAddress;
+
+ continue;
+ }
+
+ gapSize = Math.Min(endVa, nextPartition.Address) - partition.EndAddress;
+ }
+ else
+ {
+ gapSize = endVa - partition.EndAddress;
+ }
+
+ _partitions.Insert(i + 1, new(CreateAsPartitionAllocation(partition.EndAddress, gapSize), _backingMemory, partition.EndAddress, gapSize));
+ va = partition.EndAddress + gapSize;
+ i++;
+ }
+ else if (partition.EndAddress > va)
+ {
+ Debug.Assert(partition.Address > va);
+
+ ulong gapSize;
+
+ if (partition.Address < endVa)
+ {
+ gapSize = partition.Address - va;
+ }
+ else
+ {
+ gapSize = endVa - va;
+ }
+
+ _partitions.Insert(i, new(CreateAsPartitionAllocation(va, gapSize), _backingMemory, va, gapSize));
+ va = Math.Min(partition.EndAddress, endVa);
+ i++;
+ }
+ }
+
+ if (va < endVa)
+ {
+ _partitions.Add(new(CreateAsPartitionAllocation(va, endVa - va), _backingMemory, va, endVa - va));
+ }
+
+ for (int i = 1; i < _partitions.Count; i++)
+ {
+ Debug.Assert(_partitions[i].Address > _partitions[i - 1].Address);
+ Debug.Assert(_partitions[i].EndAddress > _partitions[i - 1].EndAddress);
+ }
+ }
+
+ private AddressSpacePartitionAllocation CreateAsPartitionAllocation(ulong va, ulong size)
+ {
+ ulong bridgeSize = MemoryBlock.GetPageSize() * 2;
+
+ return _asAllocator.Allocate(va, size, (int)bridgeSize);
+ }
+
+ protected virtual void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ foreach (AddressSpacePartition partition in _partitions)
+ {
+ partition.Dispose();
+ }
+
+ _partitions.Clear();
+ _asAllocator.Dispose();
+ }
+ }
+
+ public void Dispose()
+ {
+ Dispose(disposing: true);
+ GC.SuppressFinalize(this);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs b/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
index 2b315e841..6d32787ac 100644
--- a/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
+++ b/src/Ryujinx.Cpu/Jit/MemoryManagerHostMapped.cs
@@ -1,4 +1,4 @@
-using ARMeilleure.Memory;
+using ARMeilleure.Memory;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;
diff --git a/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs b/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs
new file mode 100644
index 000000000..70175ce72
--- /dev/null
+++ b/src/Ryujinx.Cpu/Jit/MemoryManagerHostTracked.cs
@@ -0,0 +1,1009 @@
+using ARMeilleure.Memory;
+using Ryujinx.Memory;
+using Ryujinx.Memory.Range;
+using Ryujinx.Memory.Tracking;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Ryujinx.Cpu.Jit
+{
+ ///
+ /// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region.
+ ///
+ public sealed class MemoryManagerHostTracked : MemoryManagerBase, IWritableBlock, IMemoryManager, IVirtualMemoryManagerTracked
+ {
+ public const int PageBits = 12;
+ public const int PageSize = 1 << PageBits;
+ public const int PageMask = PageSize - 1;
+
+ public const int PageToPteShift = 5; // 32 pages (2 bits each) in one ulong page table entry.
+ public const ulong BlockMappedMask = 0x5555555555555555; // First bit of each table entry set.
+
+ private enum HostMappedPtBits : ulong
+ {
+ Unmapped = 0,
+ Mapped,
+ WriteTracked,
+ ReadWriteTracked,
+
+ MappedReplicated = 0x5555555555555555,
+ WriteTrackedReplicated = 0xaaaaaaaaaaaaaaaa,
+ ReadWriteTrackedReplicated = ulong.MaxValue
+ }
+
+ private readonly InvalidAccessHandler _invalidAccessHandler;
+
+ private readonly MemoryBlock _backingMemory;
+ private readonly PageTable _pageTable;
+
+ private readonly ulong[] _pageBitmap;
+
+ public int AddressSpaceBits { get; }
+
+ public MemoryTracking Tracking { get; private set; }
+
+ private const int PteSize = 8;
+
+ private readonly AddressSpacePartitioned _addressSpace;
+
+ public ulong AddressSpaceSize { get; }
+
+ private readonly MemoryBlock _flatPageTable;
+
+ ///
+ public bool Supports4KBPages => false;
+
+ public IntPtr PageTablePointer => _flatPageTable.Pointer;
+
+ public MemoryManagerType Type => MemoryManagerType.HostTracked;
+
+ public event Action UnmapEvent;
+
+ ///
+ /// Creates a new instance of the host mapped memory manager.
+ ///
+ /// Physical backing memory where virtual memory will be mapped to
+ /// Size of the address space
+ /// Optional function to handle invalid memory accesses
+ public MemoryManagerHostTracked(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler)
+ {
+ Tracking = new MemoryTracking(this, (int)MemoryBlock.GetPageSize(), invalidAccessHandler);
+
+ _backingMemory = backingMemory;
+ _pageTable = new PageTable();
+ _invalidAccessHandler = invalidAccessHandler;
+ _addressSpace = new(Tracking, backingMemory, UpdatePt);
+ AddressSpaceSize = addressSpaceSize;
+
+ ulong asSize = PageSize;
+ int asBits = PageBits;
+
+ while (asSize < AddressSpaceSize)
+ {
+ asSize <<= 1;
+ asBits++;
+ }
+
+ AddressSpaceBits = asBits;
+
+ _pageBitmap = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))];
+ _flatPageTable = new MemoryBlock((asSize / PageSize) * PteSize);
+ }
+
+ ///
+ public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (flags.HasFlag(MemoryMapFlags.Private))
+ {
+ _addressSpace.Map(va, pa, size);
+ }
+
+ AddMapping(va, size);
+ PtMap(va, pa, size, flags.HasFlag(MemoryMapFlags.Private));
+
+ Tracking.Map(va, size);
+ }
+
+ private void PtMap(ulong va, ulong pa, ulong size, bool privateMap)
+ {
+ while (size != 0)
+ {
+ _pageTable.Map(va, pa);
+
+ if (privateMap)
+ {
+ _flatPageTable.Write((va / PageSize) * PteSize, (ulong)_addressSpace.GetPointer(va, PageSize) - va);
+ }
+ else
+ {
+ _flatPageTable.Write((va / PageSize) * PteSize, (ulong)_backingMemory.GetPointer(pa, PageSize) - va);
+ }
+
+ va += PageSize;
+ pa += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ private void UpdatePt(ulong va, IntPtr ptr, ulong size)
+ {
+ ulong remainingSize = size;
+ while (remainingSize != 0)
+ {
+ _flatPageTable.Write((va / PageSize) * PteSize, (ulong)ptr - va);
+
+ va += PageSize;
+ ptr += PageSize;
+ remainingSize -= PageSize;
+ }
+ }
+
+ ///
+ public void MapForeign(ulong va, nuint hostPointer, ulong size)
+ {
+ throw new NotSupportedException();
+ }
+
+ ///
+ public void Unmap(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ _addressSpace.Unmap(va, size);
+
+ UnmapEvent?.Invoke(va, size);
+ Tracking.Unmap(va, size);
+
+ RemoveMapping(va, size);
+ PtUnmap(va, size);
+ }
+
+ private void PtUnmap(ulong va, ulong size)
+ {
+ while (size != 0)
+ {
+ _pageTable.Unmap(va);
+ _flatPageTable.Write((va / PageSize) * PteSize, 0UL);
+
+ va += PageSize;
+ size -= PageSize;
+ }
+ }
+
+ ///
+ /// Checks if the virtual address is part of the addressable space.
+ ///
+ /// Virtual address
+ /// True if the virtual address is part of the addressable space
+ private bool ValidateAddress(ulong va)
+ {
+ return va < AddressSpaceSize;
+ }
+
+ ///
+ /// Checks if the combination of virtual address and size is part of the addressable space.
+ ///
+ /// Virtual address of the range
+ /// Size of the range in bytes
+ /// True if the combination of virtual address and size is part of the addressable space
+ private bool ValidateAddressAndSize(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+ return endVa >= va && endVa >= size && endVa <= AddressSpaceSize;
+ }
+
+ ///
+ /// Ensures the combination of virtual address and size is part of the addressable space.
+ ///
+ /// Virtual address of the range
+ /// Size of the range in bytes
+ /// Throw when the memory region specified outside the addressable space
+ private void AssertValidAddressAndSize(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+
+ public T Read(ulong va) where T : unmanaged
+ {
+ return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf()))[0];
+ }
+
+ public T ReadTracked(ulong va) where T : unmanaged
+ {
+ try
+ {
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), false);
+
+ return Read(va);
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+
+ return default;
+ }
+ }
+
+ public void Read(ulong va, Span data)
+ {
+ ReadImpl(va, data);
+ }
+
+ public void Write(ulong va, T value) where T : unmanaged
+ {
+ Write(va, MemoryMarshal.Cast(MemoryMarshal.CreateSpan(ref value, 1)));
+ }
+
+ public void Write(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, true);
+
+ WriteImpl(va, data);
+ }
+
+ public void WriteUntracked(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ WriteImpl(va, data);
+ }
+
+ public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan data)
+ {
+ if (data.Length == 0)
+ {
+ return false;
+ }
+
+ SignalMemoryTracking(va, (ulong)data.Length, false);
+
+ if (TryGetVirtualContiguous(va, data.Length, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ var target = memoryBlock.GetSpan(offset, data.Length);
+
+ bool changed = !data.SequenceEqual(target);
+
+ if (changed)
+ {
+ data.CopyTo(target);
+ }
+
+ return changed;
+ }
+ else
+ {
+ WriteImpl(va, data);
+
+ return true;
+ }
+ }
+
+ private void WriteImpl(ulong va, ReadOnlySpan data)
+ {
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ ulong endVa = va + (ulong)data.Length;
+ int offset = 0;
+
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
+
+ data.Slice(offset, (int)copySize).CopyTo(memory.GetSpan(rangeOffset, (int)copySize));
+
+ va += copySize;
+ offset += (int)copySize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ public ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return ReadOnlySpan.Empty;
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, false);
+ }
+
+ if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ return memoryBlock.GetSpan(offset, size);
+ }
+ else
+ {
+ Span data = new byte[size];
+
+ ReadImpl(va, data);
+
+ return data;
+ }
+ }
+
+ public WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
+ {
+ if (size == 0)
+ {
+ return new WritableRegion(null, va, Memory.Empty);
+ }
+
+ if (tracked)
+ {
+ SignalMemoryTracking(va, (ulong)size, true);
+ }
+
+ if (TryGetVirtualContiguous(va, size, out MemoryBlock memoryBlock, out ulong offset))
+ {
+ return new WritableRegion(null, va, memoryBlock.GetMemory(offset, size));
+ }
+ else
+ {
+ Memory memory = new byte[size];
+
+ ReadImpl(va, memory.Span);
+
+ return new WritableRegion(this, va, memory);
+ }
+ }
+
+ public ref T GetRef(ulong va) where T : unmanaged
+ {
+ if (!TryGetVirtualContiguous(va, Unsafe.SizeOf(), out MemoryBlock memory, out ulong offset))
+ {
+ ThrowMemoryNotContiguous();
+ }
+
+ SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true);
+
+ return ref memory.GetRef(offset);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsMapped(ulong va)
+ {
+ return ValidateAddress(va) && IsMappedImpl(va);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsMappedImpl(ulong va)
+ {
+ ulong page = va >> PageBits;
+
+ int bit = (int)((page & 31) << 1);
+
+ int pageIndex = (int)(page >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+
+ return ((pte >> bit) & 3) != 0;
+ }
+
+ public bool IsRangeMapped(ulong va, ulong size)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ return IsRangeMappedImpl(va, size);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void GetPageBlockRange(ulong pageStart, ulong pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex)
+ {
+ startMask = ulong.MaxValue << ((int)(pageStart & 31) << 1);
+ endMask = ulong.MaxValue >> (64 - ((int)(pageEnd & 31) << 1));
+
+ pageIndex = (int)(pageStart >> PageToPteShift);
+ pageEndIndex = (int)((pageEnd - 1) >> PageToPteShift);
+ }
+
+ private bool IsRangeMappedImpl(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+
+ if (pages == 1)
+ {
+ return IsMappedImpl(va);
+ }
+
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ // Check if either bit in each 2 bit page entry is set.
+ // OR the block with itself shifted down by 1, and check the first bit of each entry.
+
+ ulong mask = BlockMappedMask & startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte = Volatile.Read(ref pageRef);
+
+ pte |= pte >> 1;
+ if ((pte & mask) != mask)
+ {
+ return false;
+ }
+
+ mask = BlockMappedMask;
+ }
+
+ return true;
+ }
+
+ private static void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
+
+ private bool TryGetVirtualContiguous(ulong va, int size, out MemoryBlock memory, out ulong offset)
+ {
+ if (_addressSpace.HasAnyPrivateAllocation(va, (ulong)size))
+ {
+ // If we have a private allocation overlapping the range,
+ // this the access is only considered contiguous if it covers the entire range.
+
+ PrivateRange range = _addressSpace.GetFirstPrivateAllocation(va, (ulong)size, out _);
+
+ if (range.Memory != null && range.Size == (ulong)size)
+ {
+ memory = range.Memory;
+ offset = range.Offset;
+
+ return true;
+ }
+
+ memory = null;
+ offset = 0;
+
+ return false;
+ }
+
+ memory = _backingMemory;
+ offset = GetPhysicalAddressInternal(va);
+
+ return IsPhysicalContiguous(va, size);
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsPhysicalContiguousAndMapped(ulong va, int size) => IsPhysicalContiguous(va, size) && IsMapped(va);
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private bool IsPhysicalContiguous(ulong va, int size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
+ {
+ return false;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return false;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return false;
+ }
+
+ va += PageSize;
+ }
+
+ return true;
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private ulong GetContiguousSize(ulong va, ulong size)
+ {
+ ulong contiguousSize = PageSize - (va & PageMask);
+
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return contiguousSize;
+ }
+
+ int pages = GetPagesCount(va, size, out va);
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return contiguousSize;
+ }
+
+ if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
+ {
+ return contiguousSize;
+ }
+
+ va += PageSize;
+ contiguousSize += PageSize;
+ }
+
+ return Math.Min(contiguousSize, size);
+ }
+
+ private (MemoryBlock, ulong, ulong) GetMemoryOffsetAndSize(ulong va, ulong size)
+ {
+ ulong endVa = va + size;
+
+ PrivateRange privateRange = _addressSpace.GetFirstPrivateAllocation(va, size, out ulong nextVa);
+
+ if (privateRange.Memory != null)
+ {
+ return (privateRange.Memory, privateRange.Offset, privateRange.Size);
+ }
+
+ ulong physSize = GetContiguousSize(va, Math.Min(size, nextVa - va));
+
+ return new(_backingMemory, GetPhysicalAddressChecked(va), physSize);
+ }
+
+ public IEnumerable GetHostRegions(ulong va, ulong size)
+ {
+ if (!ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ var regions = new List();
+ ulong endVa = va + size;
+
+ try
+ {
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong rangeSize) = GetMemoryOffsetAndSize(va, endVa - va);
+
+ regions.Add(new((UIntPtr)memory.GetPointer(rangeOffset, rangeSize), rangeSize));
+
+ va += rangeSize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ return null;
+ }
+
+ return regions;
+ }
+
+ public IEnumerable GetPhysicalRegions(ulong va, ulong size)
+ {
+ if (size == 0)
+ {
+ return Enumerable.Empty();
+ }
+
+ return GetPhysicalRegionsImpl(va, size);
+ }
+
+ private List GetPhysicalRegionsImpl(ulong va, ulong size)
+ {
+ if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
+ {
+ return null;
+ }
+
+ int pages = GetPagesCount(va, (uint)size, out va);
+
+ var regions = new List();
+
+ ulong regionStart = GetPhysicalAddressInternal(va);
+ ulong regionSize = PageSize;
+
+ for (int page = 0; page < pages - 1; page++)
+ {
+ if (!ValidateAddress(va + PageSize))
+ {
+ return null;
+ }
+
+ ulong newPa = GetPhysicalAddressInternal(va + PageSize);
+
+ if (GetPhysicalAddressInternal(va) + PageSize != newPa)
+ {
+ regions.Add(new MemoryRange(regionStart, regionSize));
+ regionStart = newPa;
+ regionSize = 0;
+ }
+
+ va += PageSize;
+ regionSize += PageSize;
+ }
+
+ regions.Add(new MemoryRange(regionStart, regionSize));
+
+ return regions;
+ }
+
+ private void ReadImpl(ulong va, Span data)
+ {
+ if (data.Length == 0)
+ {
+ return;
+ }
+
+ try
+ {
+ AssertValidAddressAndSize(va, (ulong)data.Length);
+
+ ulong endVa = va + (ulong)data.Length;
+ int offset = 0;
+
+ while (va < endVa)
+ {
+ (MemoryBlock memory, ulong rangeOffset, ulong copySize) = GetMemoryOffsetAndSize(va, (ulong)(data.Length - offset));
+
+ memory.GetSpan(rangeOffset, (int)copySize).CopyTo(data.Slice(offset, (int)copySize));
+
+ va += copySize;
+ offset += (int)copySize;
+ }
+ }
+ catch (InvalidMemoryRegionException)
+ {
+ if (_invalidAccessHandler == null || !_invalidAccessHandler(va))
+ {
+ throw;
+ }
+ }
+ }
+
+ ///
+ ///
+ /// This function also validates that the given range is both valid and mapped, and will throw if it is not.
+ ///
+ public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
+ {
+ AssertValidAddressAndSize(va, size);
+
+ if (precise)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: true, exemptId);
+ return;
+ }
+
+ // Software table, used for managed memory tracking.
+
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong tag = (ulong)(write ? HostMappedPtBits.WriteTracked : HostMappedPtBits.ReadWriteTracked);
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong state = ((pte >> bit) & 3);
+
+ if (state >= tag)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ return;
+ }
+ else if (state == 0)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong anyTrackingTag = (ulong)HostMappedPtBits.WriteTrackedReplicated;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte = Volatile.Read(ref pageRef);
+ ulong mappedMask = mask & BlockMappedMask;
+
+ ulong mappedPte = pte | (pte >> 1);
+ if ((mappedPte & mappedMask) != mappedMask)
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}");
+ }
+
+ pte &= mask;
+ if ((pte & anyTrackingTag) != 0) // Search for any tracking.
+ {
+ // Writes trigger any tracking.
+ // Only trigger tracking from reads if both bits are set on any page.
+ if (write || (pte & (pte >> 1) & BlockMappedMask) != 0)
+ {
+ Tracking.VirtualMemoryEvent(va, size, write, precise: false, exemptId);
+ break;
+ }
+ }
+
+ mask = ulong.MaxValue;
+ }
+ }
+ }
+
+ ///
+ /// Computes the number of pages in a virtual address range.
+ ///
+ /// Virtual address of the range
+ /// Size of the range
+ /// The virtual address of the beginning of the first page
+ /// This function does not differentiate between allocated and unallocated pages.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private int GetPagesCount(ulong va, ulong size, out ulong startVa)
+ {
+ // WARNING: Always check if ulong does not overflow during the operations.
+ startVa = va & ~(ulong)PageMask;
+ ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
+
+ return (int)(vaSpan / PageSize);
+ }
+
+ public RegionHandle BeginTracking(ulong address, ulong size, int id)
+ {
+ return Tracking.BeginTracking(address, size, id);
+ }
+
+ public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable handles, ulong granularity, int id)
+ {
+ return Tracking.BeginGranularTracking(address, size, handles, granularity, id);
+ }
+
+ public SmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
+ {
+ return Tracking.BeginSmartGranularTracking(address, size, granularity, id);
+ }
+
+ ///
+ /// Adds the given address mapping to the page table.
+ ///
+ /// Virtual memory address
+ /// Size to be mapped
+ private void AddMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Map all 2-bit entries that are unmapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask |= ~mask; // Treat everything outside the range as mapped, thus unchanged.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & mappedMask) | (BlockMappedMask & (~mappedMask)), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ ///
+ /// Removes the given address mapping from the page table.
+ ///
+ /// Virtual memory address
+ /// Size to be unmapped
+ private void RemoveMapping(ulong va, ulong size)
+ {
+ int pages = GetPagesCount(va, size, out _);
+ ulong pageStart = va >> PageBits;
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ startMask = ~startMask;
+ endMask = ~endMask;
+
+ ulong mask = startMask;
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask |= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while (Interlocked.CompareExchange(ref pageRef, pte & mask, pte) != pte);
+
+ mask = 0;
+ }
+ }
+
+ private ulong GetPhysicalAddressChecked(ulong va)
+ {
+ if (!IsMapped(va))
+ {
+ ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
+ }
+
+ return GetPhysicalAddressInternal(va);
+ }
+
+ private ulong GetPhysicalAddressInternal(ulong va)
+ {
+ return _pageTable.Read(va) + (va & PageMask);
+ }
+
+ private static void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message);
+
+ ///
+ public void Reprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ // TODO
+ }
+
+ ///
+ public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
+ {
+ // Protection is inverted on software pages, since the default value is 0.
+ protection = (~protection) & MemoryPermission.ReadAndWrite;
+
+ int pages = GetPagesCount(va, size, out va);
+ ulong pageStart = va >> PageBits;
+
+ if (pages == 1)
+ {
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.Mapped,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTracked,
+ _ => (ulong)HostMappedPtBits.ReadWriteTracked,
+ };
+
+ int bit = (int)((pageStart & 31) << 1);
+
+ ulong tagMask = 3UL << bit;
+ ulong invTagMask = ~tagMask;
+
+ ulong tag = protTag << bit;
+
+ int pageIndex = (int)(pageStart >> PageToPteShift);
+ ref ulong pageRef = ref _pageBitmap[pageIndex];
+
+ ulong pte;
+
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+ }
+ while ((pte & tagMask) != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte);
+ }
+ else
+ {
+ ulong pageEnd = pageStart + (ulong)pages;
+
+ GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex);
+
+ ulong mask = startMask;
+
+ ulong protTag = protection switch
+ {
+ MemoryPermission.None => (ulong)HostMappedPtBits.MappedReplicated,
+ MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTrackedReplicated,
+ _ => (ulong)HostMappedPtBits.ReadWriteTrackedReplicated,
+ };
+
+ while (pageIndex <= pageEndIndex)
+ {
+ if (pageIndex == pageEndIndex)
+ {
+ mask &= endMask;
+ }
+
+ ref ulong pageRef = ref _pageBitmap[pageIndex++];
+
+ ulong pte;
+ ulong mappedMask;
+
+ // Change the protection of all 2 bit entries that are mapped.
+ do
+ {
+ pte = Volatile.Read(ref pageRef);
+
+ mappedMask = pte | (pte >> 1);
+ mappedMask |= (mappedMask & BlockMappedMask) << 1;
+ mappedMask &= mask; // Only update mapped pages within the given range.
+ }
+ while (Interlocked.CompareExchange(ref pageRef, (pte & (~mappedMask)) | (protTag & mappedMask), pte) != pte);
+
+ mask = ulong.MaxValue;
+ }
+ }
+
+ protection = protection switch
+ {
+ MemoryPermission.None => MemoryPermission.ReadAndWrite,
+ MemoryPermission.Write => MemoryPermission.Read,
+ _ => MemoryPermission.None,
+ };
+
+ _addressSpace.Reprotect(va, size, protection, Tracking);
+ }
+
+ ///
+ /// Disposes of resources used by the memory manager.
+ ///
+ protected override void Destroy()
+ {
+ _addressSpace.Dispose();
+ }
+ }
+}
diff --git a/src/Ryujinx.Cpu/MemoryEhMeilleure.cs b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
index 54e232d9c..d3763c777 100644
--- a/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
+++ b/src/Ryujinx.Cpu/MemoryEhMeilleure.cs
@@ -1,4 +1,4 @@
-using ARMeilleure.Signal;
+using ARMeilleure.Signal;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
@@ -8,19 +8,21 @@ namespace Ryujinx.Cpu
{
public class MemoryEhMeilleure : IDisposable
{
- private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
+ public delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
private readonly TrackingEventDelegate _trackingEvent;
private readonly ulong _baseAddress;
private readonly ulong _mirrorAddress;
- public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking)
+ public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking, TrackingEventDelegate trackingEvent = null)
{
_baseAddress = (ulong)addressSpace.Pointer;
+
ulong endAddress = _baseAddress + addressSpace.Size;
- _trackingEvent = tracking.VirtualMemoryEvent;
+ _trackingEvent = trackingEvent ?? tracking.VirtualMemoryEvent;
+
bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)
@@ -28,7 +30,7 @@ namespace Ryujinx.Cpu
throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
}
- if (OperatingSystem.IsWindows())
+ if (OperatingSystem.IsWindows() && addressSpaceMirror != null)
{
// Add a tracking event with no signal handler for the mirror on Windows.
// The native handler has its own code to check for the partial overlap race when regions are protected by accident,
diff --git a/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
index ce8e83419..8db74f1e9 100644
--- a/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
+++ b/src/Ryujinx.Cpu/PrivateMemoryAllocator.cs
@@ -143,7 +143,7 @@ namespace Ryujinx.Cpu
}
}
- public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
+ public PrivateMemoryAllocator(ulong blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
{
}
@@ -180,10 +180,10 @@ namespace Ryujinx.Cpu
private readonly List _blocks;
- private readonly int _blockAlignment;
+ private readonly ulong _blockAlignment;
private readonly MemoryAllocationFlags _allocationFlags;
- public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags)
+ public PrivateMemoryAllocatorImpl(ulong blockAlignment, MemoryAllocationFlags allocationFlags)
{
_blocks = new List();
_blockAlignment = blockAlignment;
@@ -212,7 +212,7 @@ namespace Ryujinx.Cpu
}
}
- ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment);
+ ulong blockAlignedSize = BitUtils.AlignUp(size, _blockAlignment);
var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
var newBlock = createBlock(memory, blockAlignedSize);
diff --git a/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs b/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
index bec2722ef..533347690 100644
--- a/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
+++ b/src/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs
@@ -1,4 +1,4 @@
-using Ryujinx.Common.Configuration;
+using Ryujinx.Common.Configuration;
using Ryujinx.Common.Logging;
using Ryujinx.Cpu;
using Ryujinx.Cpu.AppleHv;
@@ -67,7 +67,7 @@ namespace Ryujinx.HLE.HOS
AddressSpace addressSpace = null;
- if (mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe)
+ if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000)
{
if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace))
{
@@ -86,13 +86,21 @@ namespace Ryujinx.HLE.HOS
case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe:
- if (addressSpaceSize != addressSpace.AddressSpaceSize)
+ if (addressSpace == null)
{
- Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
+ var memoryManagerHostTracked = new MemoryManagerHostTracked(context.Memory, addressSpaceSize, invalidAccessHandler);
+ processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostTracked, addressSpaceSize, for64Bit);
}
+ else
+ {
+ if (addressSpaceSize != addressSpace.AddressSpaceSize)
+ {
+ Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
+ }
- var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
- processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
+ var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
+ processContext = new ArmProcessContext(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
+ }
break;
default:
diff --git a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
index 543acb7a0..d7b601d1c 100644
--- a/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
+++ b/src/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
@@ -165,6 +165,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
///
protected override Result MapForeign(IEnumerable regions, ulong va, ulong size)
{
+ ulong backingStart = (ulong)Context.Memory.Pointer;
+ ulong backingEnd = backingStart + Context.Memory.Size;
+
+ KPageList pageList = new();
+
+ foreach (HostMemoryRange region in regions)
+ {
+ // If the range is inside the physical memory, it is shared and we should increment the page count,
+ // otherwise it is private and we don't need to increment the page count.
+
+ if (region.Address >= backingStart && region.Address < backingEnd)
+ {
+ pageList.AddRange(region.Address - backingStart + DramMemoryMap.DramBase, region.Size / PageSize);
+ }
+ }
+
+ using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
+
+ foreach (var pageNode in pageList)
+ {
+ Context.CommitMemory(pageNode.Address - DramMemoryMap.DramBase, pageNode.PagesCount * PageSize);
+ }
+
ulong offset = 0;
foreach (var region in regions)
@@ -174,6 +197,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
offset += region.Size;
}
+ scopedPageList.SignalSuccess();
+
return Result.Success;
}
diff --git a/src/Ryujinx.Memory/AddressSpaceManager.cs b/src/Ryujinx.Memory/AddressSpaceManager.cs
index 021d33663..05447ae39 100644
--- a/src/Ryujinx.Memory/AddressSpaceManager.cs
+++ b/src/Ryujinx.Memory/AddressSpaceManager.cs
@@ -293,9 +293,9 @@ namespace Ryujinx.Memory
{
var hostRegion = hostRegions[i];
- if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd)
+ if (hostRegion.Address >= backingStart && hostRegion.Address < backingEnd)
{
- regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size);
+ regions[count++] = new MemoryRange(hostRegion.Address - backingStart, hostRegion.Size);
}
}