Merge branch 'host-tracked-clean' into new-jit-ht

This commit is contained in:
Gabriel A 2024-01-04 18:00:35 -03:00
commit 133f7d10de
16 changed files with 2246 additions and 28 deletions

View File

@ -157,7 +157,7 @@ namespace ARMeilleure.Instructions
context.Copy(temp, value);
if (!context.Memory.Type.IsHostMapped())
if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@ -198,7 +198,7 @@ namespace ARMeilleure.Instructions
SetInt(context, rt, value);
if (!context.Memory.Type.IsHostMapped())
if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@ -265,7 +265,7 @@ namespace ARMeilleure.Instructions
context.Copy(GetVec(rt), value);
if (!context.Memory.Type.IsHostMapped())
if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@ -312,7 +312,7 @@ namespace ARMeilleure.Instructions
break;
}
if (!context.Memory.Type.IsHostMapped())
if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@ -385,7 +385,7 @@ namespace ARMeilleure.Instructions
break;
}
if (!context.Memory.Type.IsHostMapped())
if (!context.Memory.Type.IsHostMappedOrTracked())
{
context.Branch(lblEnd);
@ -403,6 +403,21 @@ namespace ARMeilleure.Instructions
{
return EmitHostMappedPointer(context, address);
}
else if (context.Memory.Type == MemoryManagerType.HostTracked)
{
Operand ptBase = !context.HasPtc
? Const(context.Memory.PageTablePointer.ToInt64())
: Const(context.Memory.PageTablePointer.ToInt64(), Ptc.PageTableSymbol);
Operand ptOffset = context.ShiftRightUI(address, Const(PageBits));
if (ptOffset.Type == OperandType.I32)
{
ptOffset = context.ZeroExtend32(OperandType.I64, ptOffset);
}
return context.Add(address, context.Load(OperandType.I64, context.Add(ptBase, context.ShiftLeft(ptOffset, Const(3)))));
}
int ptLevelBits = context.Memory.AddressSpaceBits - PageBits;
int ptLevelSize = 1 << ptLevelBits;

View File

@ -18,6 +18,12 @@ namespace ARMeilleure.Memory
/// </summary>
SoftwarePageTable,
/// <summary>
/// High level implementation using a software flat page table for address translation,
/// no support for handling invalid or non-contiguous memory access.
/// </summary>
HostTracked,
/// <summary>
/// High level implementation with mappings managed by the host OS, effectively using hardware
/// page tables. No address translation is performed in software and the memory is just accessed directly.
@ -37,5 +43,10 @@ namespace ARMeilleure.Memory
{
return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
}
public static bool IsHostMappedOrTracked(this MemoryManagerType type)
{
return type == MemoryManagerType.HostTracked || type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe;
}
}
}

View File

@ -1,4 +1,4 @@
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.IntermediateRepresentation;
using ARMeilleure.Memory;
using ARMeilleure.Translation;
using ARMeilleure.Translation.Cache;

View File

@ -114,7 +114,7 @@ namespace ARMeilleure.Translation.Cache
{
int stackOffset = entry.StackOffsetOrAllocSize;
Debug.Assert(stackOffset % 16 == 0);
// Debug.Assert(stackOffset % 16 == 0);
if (stackOffset <= 0xFFFF0)
{
@ -135,7 +135,7 @@ namespace ARMeilleure.Translation.Cache
{
int allocSize = entry.StackOffsetOrAllocSize;
Debug.Assert(allocSize % 8 == 0);
// Debug.Assert(allocSize % 8 == 0);
if (allocSize <= 128)
{

View File

@ -80,7 +80,7 @@ namespace ARMeilleure.Translation
FunctionTable.Fill = (ulong)Stubs.SlowDispatchStub;
if (memory.Type.IsHostMapped())
if (memory.Type.IsHostMappedOrTracked())
{
NativeSignalHandler.InitializeSignalHandler(allocator.GetPageSize());
}

View File

@ -38,7 +38,7 @@ namespace Ryujinx.Cpu.AppleHv
private readonly HvIpaAllocator _ipaAllocator;
public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, int blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
public HvMemoryBlockAllocator(HvIpaAllocator ipaAllocator, ulong blockAlignment) : base(blockAlignment, MemoryAllocationFlags.None)
{
_ipaAllocator = ipaAllocator;
}

View File

@ -0,0 +1,612 @@
using Ryujinx.Common;
using Ryujinx.Common.Collections;
using Ryujinx.Memory;
using System;
using System.Diagnostics;
namespace Ryujinx.Cpu.Jit
{
readonly struct PrivateRange
{
public readonly MemoryBlock Memory;
public readonly ulong Offset;
public readonly ulong Size;
public static PrivateRange Empty => new(null, 0, 0);
public PrivateRange(MemoryBlock memory, ulong offset, ulong size)
{
Memory = memory;
Offset = offset;
Size = size;
}
}
class AddressSpacePartition : IDisposable
{
private const ulong GuestPageSize = 0x1000;
private const int DefaultBlockAlignment = 1 << 20;
private enum MappingType : byte
{
None,
Private,
}
private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public MappingType Type { get; private set; }
public Mapping(ulong address, ulong size, MappingType type)
{
Address = address;
Size = size;
Type = type;
}
public Mapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
Mapping left = new(Address, leftSize, Type);
Address = splitAddress;
Size = rightSize;
return left;
}
public void UpdateState(MappingType newType)
{
Type = newType;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(Mapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private class PrivateMapping : IntrusiveRedBlackTreeNode<PrivateMapping>, IComparable<PrivateMapping>
{
public ulong Address { get; private set; }
public ulong Size { get; private set; }
public ulong EndAddress => Address + Size;
public PrivateMemoryAllocation PrivateAllocation { get; private set; }
public PrivateMapping(ulong address, ulong size, PrivateMemoryAllocation privateAllocation)
{
if (size == 0)
{
throw new Exception("huh? size is 0");
}
Address = address;
Size = size;
PrivateAllocation = privateAllocation;
}
public PrivateMapping Split(ulong splitAddress)
{
ulong leftSize = splitAddress - Address;
ulong rightSize = EndAddress - splitAddress;
Debug.Assert(leftSize > 0);
Debug.Assert(rightSize > 0);
(var leftAllocation, PrivateAllocation) = PrivateAllocation.Split(leftSize);
PrivateMapping left = new(Address, leftSize, leftAllocation);
Address = splitAddress;
Size = rightSize;
return left;
}
public void Map(AddressSpacePartitionAllocation baseBlock, ulong baseAddress, PrivateMemoryAllocation newAllocation)
{
baseBlock.MapView(newAllocation.Memory, newAllocation.Offset, Address - baseAddress, Size);
PrivateAllocation = newAllocation;
}
public void Unmap(AddressSpacePartitionAllocation baseBlock, ulong baseAddress)
{
if (PrivateAllocation.IsValid)
{
baseBlock.UnmapView(PrivateAllocation.Memory, Address - baseAddress, Size);
PrivateAllocation.Dispose();
}
PrivateAllocation = default;
}
public void Extend(ulong sizeDelta)
{
Size += sizeDelta;
}
public int CompareTo(PrivateMapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private readonly MemoryBlock _backingMemory;
private readonly AddressSpacePartitionAllocation _baseMemory;
private readonly PrivateMemoryAllocator _privateMemoryAllocator;
private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
private readonly IntrusiveRedBlackTree<PrivateMapping> _privateTree;
private readonly object _treeLock;
private readonly ulong _hostPageSize;
private ulong? _firstPagePa;
private ulong? _lastPagePa;
private ulong _cachedFirstPagePa;
private ulong _cachedLastPagePa;
private bool _hasBridgeAtEnd;
private MemoryPermission _lastPageProtection;
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress => Address + Size;
public AddressSpacePartition(AddressSpacePartitionAllocation baseMemory, MemoryBlock backingMemory, ulong address, ulong size)
{
_privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable);
_mappingTree = new IntrusiveRedBlackTree<Mapping>();
_privateTree = new IntrusiveRedBlackTree<PrivateMapping>();
_treeLock = new object();
_mappingTree.Add(new Mapping(address, size, MappingType.None));
_privateTree.Add(new PrivateMapping(address, size, default));
_hostPageSize = MemoryBlock.GetPageSize();
_backingMemory = backingMemory;
_baseMemory = baseMemory;
_cachedFirstPagePa = ulong.MaxValue;
_cachedLastPagePa = ulong.MaxValue;
_lastPageProtection = MemoryPermission.ReadAndWrite;
Address = address;
Size = size;
}
public bool IsEmpty()
{
lock (_treeLock)
{
Mapping map = _mappingTree.GetNode(new Mapping(Address, Size, MappingType.None));
return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None;
}
}
public void Map(ulong va, ulong pa, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
if (va == Address)
{
_firstPagePa = pa;
}
if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
{
_lastPagePa = pa + ((EndAddress - GuestPageSize) - va);
}
lock (_treeLock)
{
Update(va, pa, size, MappingType.Private);
}
}
public void Unmap(ulong va, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
if (va == Address)
{
_firstPagePa = null;
}
if (va <= EndAddress - GuestPageSize && va + size > EndAddress - GuestPageSize)
{
_lastPagePa = null;
}
lock (_treeLock)
{
Update(va, 0UL, size, MappingType.None);
}
}
public void Reprotect(ulong va, ulong size, MemoryPermission protection)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
_baseMemory.Reprotect(va - Address, size, protection, false);
if (va == EndAddress - _hostPageSize)
{
// Protections at the last page also applies to the bridge, if we have one.
// (This is because last page access is always done on the bridge, not on our base mapping,
// for the cases where access crosses a page boundary and reaches the non-contiguous next mapping).
if (_hasBridgeAtEnd)
{
_baseMemory.Reprotect(Size, size, protection, false);
}
_lastPageProtection = protection;
}
}
public IntPtr GetPointer(ulong va, ulong size)
{
Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress);
if (va >= EndAddress - _hostPageSize && _hasBridgeAtEnd)
{
return _baseMemory.GetPointer(Size + va - (EndAddress - _hostPageSize), size);
}
return _baseMemory.GetPointer(va - Address, size);
}
public void InsertBridgeAtEnd(AddressSpacePartition partitionAfter, Action<ulong, IntPtr, ulong> updatePtCallback)
{
ulong firstPagePa = partitionAfter._firstPagePa.HasValue ? partitionAfter._firstPagePa.Value : ulong.MaxValue;
ulong lastPagePa = _lastPagePa.HasValue ? _lastPagePa.Value : ulong.MaxValue;
if (firstPagePa != _cachedFirstPagePa || lastPagePa != _cachedLastPagePa)
{
if (partitionAfter._firstPagePa.HasValue && _lastPagePa.HasValue)
{
(MemoryBlock firstPageMemory, ulong firstPageOffset) = partitionAfter.GetFirstPageMemoryAndOffset();
(MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
_baseMemory.MapView(lastPageMemory, lastPageOffset, Size, _hostPageSize);
_baseMemory.MapView(firstPageMemory, firstPageOffset, Size + _hostPageSize, _hostPageSize);
_baseMemory.Reprotect(Size, _hostPageSize, _lastPageProtection, false);
updatePtCallback(EndAddress - _hostPageSize, _baseMemory.GetPointer(Size, _hostPageSize), _hostPageSize);
_hasBridgeAtEnd = true;
}
else
{
if (_lastPagePa.HasValue)
{
(MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
updatePtCallback(EndAddress - _hostPageSize, lastPageMemory.GetPointer(lastPageOffset, _hostPageSize), _hostPageSize);
}
_hasBridgeAtEnd = false;
}
_cachedFirstPagePa = firstPagePa;
_cachedLastPagePa = lastPagePa;
}
}
public void RemoveBridgeFromEnd(Action<ulong, IntPtr, ulong> updatePtCallback)
{
if (_lastPagePa.HasValue)
{
(MemoryBlock lastPageMemory, ulong lastPageOffset) = GetLastPageMemoryAndOffset();
updatePtCallback(EndAddress - _hostPageSize, lastPageMemory.GetPointer(lastPageOffset, _hostPageSize), _hostPageSize);
}
_cachedFirstPagePa = ulong.MaxValue;
_cachedLastPagePa = ulong.MaxValue;
_hasBridgeAtEnd = false;
}
private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset()
{
lock (_treeLock)
{
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(Address, 1UL, default));
if (map != null && map.PrivateAllocation.IsValid)
{
return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address));
}
}
return (_backingMemory, _firstPagePa.Value);
}
private (MemoryBlock, ulong) GetLastPageMemoryAndOffset()
{
lock (_treeLock)
{
ulong pageAddress = EndAddress - _hostPageSize;
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(pageAddress, 1UL, default));
if (map != null && map.PrivateAllocation.IsValid)
{
return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (pageAddress - map.Address));
}
}
return (_backingMemory, _lastPagePa.Value & ~(_hostPageSize - 1));
}
private void Update(ulong va, ulong pa, ulong size, MappingType type)
{
Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
Update(map, va, pa, size, type);
}
private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
{
ulong endAddress = va + size;
for (; map != null; map = map.Successor)
{
if (map.Address < va)
{
_mappingTree.Add(map.Split(va));
}
if (map.EndAddress > endAddress)
{
Mapping newMap = map.Split(endAddress);
_mappingTree.Add(newMap);
map = newMap;
}
switch (type)
{
case MappingType.None:
ulong alignment = MemoryBlock.GetPageSize();
bool unmappedBefore = map.Predecessor == null ||
(map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment));
bool unmappedAfter = map.Successor == null ||
(map.Successor.Type == MappingType.None && map.Successor.EndAddress >= BitUtils.AlignUp(endAddress, alignment));
UnmapPrivate(va, size, unmappedBefore, unmappedAfter);
break;
case MappingType.Private:
MapPrivate(va, size);
break;
}
map.UpdateState(type);
map = TryCoalesce(map);
if (map.EndAddress >= endAddress)
{
break;
}
}
return map;
}
private Mapping TryCoalesce(Mapping map)
{
Mapping previousMap = map.Predecessor;
Mapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_mappingTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_mappingTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(Mapping left, Mapping right)
{
return left.Type == right.Type;
}
private void MapPrivate(ulong va, ulong size)
{
ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize();
// Expand the range outwards based on page size to ensure that at least the requested region is mapped.
ulong vaAligned = BitUtils.AlignDown(va, alignment);
ulong endAddressAligned = BitUtils.AlignUp(endAddress, alignment);
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
for (; map != null; map = map.Successor)
{
if (!map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize()));
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private void UnmapPrivate(ulong va, ulong size, bool unmappedBefore, bool unmappedAfter)
{
ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize();
// If the adjacent mappings are unmapped, expand the range outwards,
// otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use.
ulong vaAligned = unmappedBefore ? BitUtils.AlignDown(va, alignment) : BitUtils.AlignUp(va, alignment);
ulong endAddressAligned = unmappedAfter ? BitUtils.AlignUp(endAddress, alignment) : BitUtils.AlignDown(endAddress, alignment);
if (endAddressAligned <= vaAligned)
{
return;
}
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(vaAligned, 1UL, default));
for (; map != null; map = map.Successor)
{
if (map.PrivateAllocation.IsValid)
{
if (map.Address < vaAligned)
{
_privateTree.Add(map.Split(vaAligned));
}
if (map.EndAddress > endAddressAligned)
{
PrivateMapping newMap = map.Split(endAddressAligned);
_privateTree.Add(newMap);
map = newMap;
}
map.Unmap(_baseMemory, Address);
map = TryCoalesce(map);
}
if (map.EndAddress >= endAddressAligned)
{
break;
}
}
}
private PrivateMapping TryCoalesce(PrivateMapping map)
{
PrivateMapping previousMap = map.Predecessor;
PrivateMapping nextMap = map.Successor;
if (previousMap != null && CanCoalesce(previousMap, map))
{
previousMap.Extend(map.Size);
_privateTree.Remove(map);
map = previousMap;
}
if (nextMap != null && CanCoalesce(map, nextMap))
{
map.Extend(nextMap.Size);
_privateTree.Remove(nextMap);
}
return map;
}
private static bool CanCoalesce(PrivateMapping left, PrivateMapping right)
{
return !left.PrivateAllocation.IsValid && !right.PrivateAllocation.IsValid;
}
public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
{
lock (_treeLock)
{
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
nextVa = map.EndAddress;
if (map != null && map.PrivateAllocation.IsValid)
{
ulong startOffset = va - map.Address;
return new(
map.PrivateAllocation.Memory,
map.PrivateAllocation.Offset + startOffset,
Math.Min(map.PrivateAllocation.Size - startOffset, size));
}
}
return PrivateRange.Empty;
}
public bool HasPrivateAllocation(ulong va, ulong size)
{
lock (_treeLock)
{
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, size, default));
return map != null && map.PrivateAllocation.IsValid;
}
}
public void Dispose()
{
GC.SuppressFinalize(this);
_privateMemoryAllocator?.Dispose();
_baseMemory.Dispose();
}
}
}

View File

@ -0,0 +1,174 @@
using Ryujinx.Common.Collections;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
namespace Ryujinx.Cpu.Jit
{
readonly struct AddressSpacePartitionAllocation : IDisposable
{
private readonly AddressSpacePartitionAllocator _owner;
private readonly PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation _allocation;
public IntPtr Pointer => (IntPtr)((ulong)_allocation.Block.Memory.Pointer + _allocation.Offset);
public AddressSpacePartitionAllocation(
AddressSpacePartitionAllocator owner,
PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>.Allocation allocation)
{
_owner = owner;
_allocation = allocation;
}
public void RegisterMapping(ulong va, ulong endVa, int bridgeSize)
{
_allocation.Block.AddMapping(_allocation.Offset, _allocation.Size, va, endVa, bridgeSize);
}
public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
_allocation.Block.Memory.MapView(srcBlock, srcOffset, _allocation.Offset + dstOffset, size);
}
public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
{
_allocation.Block.Memory.UnmapView(srcBlock, _allocation.Offset + offset, size);
}
public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail)
{
_allocation.Block.Memory.Reprotect(_allocation.Offset + offset, size, permission, throwOnFail);
}
public IntPtr GetPointer(ulong offset, ulong size)
{
return _allocation.Block.Memory.GetPointer(_allocation.Offset + offset, size);
}
public void Dispose()
{
lock (_owner.Lock)
{
_allocation.Block.RemoveMapping(_allocation.Offset, _allocation.Size);
_owner.Free(_allocation.Block, _allocation.Offset, _allocation.Size);
}
}
}
class AddressSpacePartitionAllocator : PrivateMemoryAllocatorImpl<AddressSpacePartitionAllocator.Block>
{
private const ulong DefaultBlockAlignment = 1UL << 32; // 4GB
public class Block : PrivateMemoryAllocator.Block
{
private readonly MemoryTracking _tracking;
private readonly MemoryEhMeilleure _memoryEh;
private class Mapping : IntrusiveRedBlackTreeNode<Mapping>, IComparable<Mapping>
{
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress => Address + Size;
public ulong Va { get; }
public ulong EndVa { get; }
public int BridgeSize { get; }
public Mapping(ulong address, ulong size, ulong va, ulong endVa, int bridgeSize)
{
Address = address;
Size = size;
Va = va;
EndVa = endVa;
BridgeSize = bridgeSize;
}
public int CompareTo(Mapping other)
{
if (Address < other.Address)
{
return -1;
}
else if (Address <= other.EndAddress - 1UL)
{
return 0;
}
else
{
return 1;
}
}
}
private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
public Block(MemoryTracking tracking, MemoryBlock memory, ulong size) : base(memory, size)
{
_tracking = tracking;
_memoryEh = new(memory, null, tracking, VirtualMemoryEvent);
_mappingTree = new();
}
public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa, int bridgeSize)
{
_mappingTree.Add(new(offset, size, va, endVa, bridgeSize));
}
public void RemoveMapping(ulong offset, ulong size)
{
_mappingTree.Remove(_mappingTree.GetNode(new Mapping(offset, size, 0, 0, 0)));
}
private bool VirtualMemoryEvent(ulong address, ulong size, bool write)
{
Mapping map = _mappingTree.GetNode(new Mapping(address, size, 0, 0, 0));
if (map == null)
{
return false;
}
address -= map.Address;
if (address >= (map.EndVa - map.Va))
{
address -= (ulong)(map.BridgeSize / 2);
}
return _tracking.VirtualMemoryEvent(map.Va + address, size, write);
}
public override void Destroy()
{
_memoryEh.Dispose();
base.Destroy();
}
}
private readonly MemoryTracking _tracking;
public object Lock { get; }
public AddressSpacePartitionAllocator(MemoryTracking tracking) : base(DefaultBlockAlignment, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.ViewCompatible)
{
_tracking = tracking;
Lock = new();
}
public AddressSpacePartitionAllocation Allocate(ulong va, ulong size, int bridgeSize)
{
lock (Lock)
{
AddressSpacePartitionAllocation allocation = new(this, Allocate(size + (ulong)bridgeSize, MemoryBlock.GetPageSize(), CreateBlock));
allocation.RegisterMapping(va, va + size, bridgeSize);
return allocation;
}
}
private Block CreateBlock(MemoryBlock memory, ulong size)
{
return new Block(_tracking, memory, size);
}
}
}

View File

@ -0,0 +1,362 @@
using Ryujinx.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
using System.Collections.Generic;
using System.Diagnostics;
namespace Ryujinx.Cpu.Jit
{
class AddressSpacePartitioned : IDisposable
{
private const int PartitionBits = 25;
private const ulong PartitionSize = 1UL << PartitionBits;
private readonly MemoryBlock _backingMemory;
private readonly List<AddressSpacePartition> _partitions;
private readonly AddressSpacePartitionAllocator _asAllocator;
private readonly Action<ulong, IntPtr, ulong> _updatePtCallback;
public AddressSpacePartitioned(MemoryTracking tracking, MemoryBlock backingMemory, Action<ulong, IntPtr, ulong> updatePtCallback)
{
_backingMemory = backingMemory;
_partitions = new();
_asAllocator = new(tracking);
_updatePtCallback = updatePtCallback;
}
public void Map(ulong va, ulong pa, ulong size)
{
EnsurePartitions(va, size);
ulong endVa = va + size;
while (va < endVa)
{
int partitionIndex = FindPartitionIndex(va);
AddressSpacePartition partition = _partitions[partitionIndex];
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
partition.Map(clampedVa, pa, clampedEndVa - clampedVa);
ulong currentSize = clampedEndVa - clampedVa;
va += currentSize;
pa += currentSize;
InsertBridgeIfNeeded(partitionIndex);
}
}
public void Unmap(ulong va, ulong size)
{
ulong endVa = va + size;
while (va < endVa)
{
int partitionIndex = FindPartitionIndex(va);
AddressSpacePartition partition = _partitions[partitionIndex];
if (partition == null)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
partition.Unmap(clampedVa, clampedEndVa - clampedVa);
va += clampedEndVa - clampedVa;
RemoveBridgeIfNeeded(partitionIndex);
if (partition.IsEmpty())
{
lock (_partitions)
{
_partitions.Remove(partition);
partition.Dispose();
}
}
}
}
public void Reprotect(ulong va, ulong size, MemoryPermission protection, MemoryTracking tracking)
{
ulong endVa = va + size;
while (va < endVa)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
partition.Reprotect(clampedVa, clampedEndVa - clampedVa, protection);
va += clampedEndVa - clampedVa;
}
}
public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
nextVa = (va & ~(PartitionSize - 1)) + PartitionSize;
return PrivateRange.Empty;
}
return partition.GetFirstPrivateAllocation(va, size, out nextVa);
}
public bool HasAnyPrivateAllocation(ulong va, ulong size)
{
ulong endVa = va + size;
while (va < endVa)
{
AddressSpacePartition partition = FindPartition(va);
if (partition == null)
{
va += PartitionSize - (va & (PartitionSize - 1));
continue;
}
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa))
{
return true;
}
va += clampedEndVa - clampedVa;
}
return false;
}
private void InsertBridgeIfNeeded(int partitionIndex)
{
if (partitionIndex > 0 && _partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
{
_partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _updatePtCallback);
}
if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
{
_partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _updatePtCallback);
}
}
private void RemoveBridgeIfNeeded(int partitionIndex)
{
if (partitionIndex > 0 && _partitions[partitionIndex - 1].EndAddress == _partitions[partitionIndex].Address)
{
_partitions[partitionIndex - 1].InsertBridgeAtEnd(_partitions[partitionIndex], _updatePtCallback);
}
if (partitionIndex + 1 < _partitions.Count && _partitions[partitionIndex].EndAddress == _partitions[partitionIndex + 1].Address)
{
_partitions[partitionIndex].InsertBridgeAtEnd(_partitions[partitionIndex + 1], _updatePtCallback);
}
else
{
_partitions[partitionIndex].RemoveBridgeFromEnd(_updatePtCallback);
}
}
public IntPtr GetPointer(ulong va, ulong size)
{
AddressSpacePartition partition = FindPartition(va);
return partition.GetPointer(va, size);
}
private static (ulong, ulong) ClampRange(AddressSpacePartition partition, ulong va, ulong endVa)
{
if (va < partition.Address)
{
va = partition.Address;
}
if (endVa > partition.EndAddress)
{
endVa = partition.EndAddress;
}
return (va, endVa);
}
private void EnsurePartitions(ulong va, ulong size)
{
lock (_partitions)
{
EnsurePartitionsForRange(va, size);
}
}
private AddressSpacePartition FindPartition(ulong va)
{
lock (_partitions)
{
int index = FindPartitionIndex(va);
if (index >= 0)
{
return _partitions[index];
}
}
return null;
}
private int FindPartitionIndex(ulong va)
{
lock (_partitions)
{
int left = 0;
int middle = 0;
int right = _partitions.Count - 1;
while (left <= right)
{
middle = left + ((right - left) >> 1);
AddressSpacePartition partition = _partitions[middle];
if (partition.Address <= va && partition.EndAddress > va)
{
return middle;
}
if (partition.Address >= va)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
}
return -1;
}
private void EnsurePartitionsForRange(ulong va, ulong size)
{
ulong endVa = BitUtils.AlignUp(va + size, PartitionSize);
va = BitUtils.AlignDown(va, PartitionSize);
for (int i = 0; i < _partitions.Count && va < endVa; i++)
{
AddressSpacePartition partition = _partitions[i];
if (partition.Address <= va && partition.EndAddress > va)
{
if (partition.EndAddress >= endVa)
{
// Fully mapped already.
va = endVa;
break;
}
ulong gapSize;
if (i + 1 < _partitions.Count)
{
AddressSpacePartition nextPartition = _partitions[i + 1];
if (partition.EndAddress == nextPartition.Address)
{
va = partition.EndAddress;
continue;
}
gapSize = Math.Min(endVa, nextPartition.Address) - partition.EndAddress;
}
else
{
gapSize = endVa - partition.EndAddress;
}
_partitions.Insert(i + 1, new(CreateAsPartitionAllocation(partition.EndAddress, gapSize), _backingMemory, partition.EndAddress, gapSize));
va = partition.EndAddress + gapSize;
i++;
}
else if (partition.EndAddress > va)
{
Debug.Assert(partition.Address > va);
ulong gapSize;
if (partition.Address < endVa)
{
gapSize = partition.Address - va;
}
else
{
gapSize = endVa - va;
}
_partitions.Insert(i, new(CreateAsPartitionAllocation(va, gapSize), _backingMemory, va, gapSize));
va = Math.Min(partition.EndAddress, endVa);
i++;
}
}
if (va < endVa)
{
_partitions.Add(new(CreateAsPartitionAllocation(va, endVa - va), _backingMemory, va, endVa - va));
}
for (int i = 1; i < _partitions.Count; i++)
{
Debug.Assert(_partitions[i].Address > _partitions[i - 1].Address);
Debug.Assert(_partitions[i].EndAddress > _partitions[i - 1].EndAddress);
}
}
private AddressSpacePartitionAllocation CreateAsPartitionAllocation(ulong va, ulong size)
{
ulong bridgeSize = MemoryBlock.GetPageSize() * 2;
return _asAllocator.Allocate(va, size, (int)bridgeSize);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
foreach (AddressSpacePartition partition in _partitions)
{
partition.Dispose();
}
_partitions.Clear();
_asAllocator.Dispose();
}
}
public void Dispose()
{
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}
}

View File

@ -1,4 +1,4 @@
using ARMeilleure.Memory;
using ARMeilleure.Memory;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using Ryujinx.Memory.Tracking;

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
using ARMeilleure.Signal;
using ARMeilleure.Signal;
using Ryujinx.Memory;
using Ryujinx.Memory.Tracking;
using System;
@ -8,19 +8,21 @@ namespace Ryujinx.Cpu
{
public class MemoryEhMeilleure : IDisposable
{
private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
public delegate bool TrackingEventDelegate(ulong address, ulong size, bool write);
private readonly TrackingEventDelegate _trackingEvent;
private readonly ulong _baseAddress;
private readonly ulong _mirrorAddress;
public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking)
public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryBlock addressSpaceMirror, MemoryTracking tracking, TrackingEventDelegate trackingEvent = null)
{
_baseAddress = (ulong)addressSpace.Pointer;
ulong endAddress = _baseAddress + addressSpace.Size;
_trackingEvent = tracking.VirtualMemoryEvent;
_trackingEvent = trackingEvent ?? tracking.VirtualMemoryEvent;
bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent));
if (!added)
@ -28,7 +30,7 @@ namespace Ryujinx.Cpu
throw new InvalidOperationException("Number of allowed tracked regions exceeded.");
}
if (OperatingSystem.IsWindows())
if (OperatingSystem.IsWindows() && addressSpaceMirror != null)
{
// Add a tracking event with no signal handler for the mirror on Windows.
// The native handler has its own code to check for the partial overlap race when regions are protected by accident,

View File

@ -143,7 +143,7 @@ namespace Ryujinx.Cpu
}
}
public PrivateMemoryAllocator(int blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
public PrivateMemoryAllocator(ulong blockAlignment, MemoryAllocationFlags allocationFlags) : base(blockAlignment, allocationFlags)
{
}
@ -180,10 +180,10 @@ namespace Ryujinx.Cpu
private readonly List<T> _blocks;
private readonly int _blockAlignment;
private readonly ulong _blockAlignment;
private readonly MemoryAllocationFlags _allocationFlags;
public PrivateMemoryAllocatorImpl(int blockAlignment, MemoryAllocationFlags allocationFlags)
public PrivateMemoryAllocatorImpl(ulong blockAlignment, MemoryAllocationFlags allocationFlags)
{
_blocks = new List<T>();
_blockAlignment = blockAlignment;
@ -212,7 +212,7 @@ namespace Ryujinx.Cpu
}
}
ulong blockAlignedSize = BitUtils.AlignUp(size, (ulong)_blockAlignment);
ulong blockAlignedSize = BitUtils.AlignUp(size, _blockAlignment);
var memory = new MemoryBlock(blockAlignedSize, _allocationFlags);
var newBlock = createBlock(memory, blockAlignedSize);

View File

@ -1,4 +1,4 @@
using Ryujinx.Common.Configuration;
using Ryujinx.Common.Configuration;
using Ryujinx.Common.Logging;
using Ryujinx.Cpu;
using Ryujinx.Cpu.AppleHv;
@ -72,7 +72,7 @@ namespace Ryujinx.HLE.HOS
AddressSpace addressSpace = null;
if (mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe)
if ((mode == MemoryManagerMode.HostMapped || mode == MemoryManagerMode.HostMappedUnsafe) && MemoryBlock.GetPageSize() <= 0x1000)
{
if (!AddressSpace.TryCreate(context.Memory, addressSpaceSize, MemoryBlock.GetPageSize() == MemoryManagerHostMapped.PageSize, out addressSpace))
{
@ -91,13 +91,21 @@ namespace Ryujinx.HLE.HOS
case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe:
if (addressSpaceSize != addressSpace.AddressSpaceSize)
if (addressSpace == null)
{
Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
var memoryManagerHostTracked = new MemoryManagerHostTracked(context.Memory, addressSpaceSize, invalidAccessHandler);
processContext = new ArmProcessContext<MemoryManagerHostTracked>(pid, cpuEngine, _gpu, memoryManagerHostTracked, addressSpaceSize, for64Bit);
}
else
{
if (addressSpaceSize != addressSpace.AddressSpaceSize)
{
Logger.Warning?.Print(LogClass.Emulation, $"Allocated address space (0x{addressSpace.AddressSpaceSize:X}) is smaller than guest application requirements (0x{addressSpaceSize:X})");
}
var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
processContext = new ArmProcessContext<MemoryManagerHostMapped>(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
var memoryManagerHostMapped = new MemoryManagerHostMapped(addressSpace, mode == MemoryManagerMode.HostMappedUnsafe, invalidAccessHandler);
processContext = new ArmProcessContext<MemoryManagerHostMapped>(pid, cpuEngine, _gpu, memoryManagerHostMapped, addressSpace.AddressSpaceSize, for64Bit);
}
break;
default:

View File

@ -165,6 +165,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <inheritdoc/>
protected override Result MapForeign(IEnumerable<HostMemoryRange> regions, ulong va, ulong size)
{
ulong backingStart = (ulong)Context.Memory.Pointer;
ulong backingEnd = backingStart + Context.Memory.Size;
KPageList pageList = new();
foreach (HostMemoryRange region in regions)
{
// If the range is inside the physical memory, it is shared and we should increment the page count,
// otherwise it is private and we don't need to increment the page count.
if (region.Address >= backingStart && region.Address < backingEnd)
{
pageList.AddRange(region.Address - backingStart + DramMemoryMap.DramBase, region.Size / PageSize);
}
}
using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
foreach (var pageNode in pageList)
{
Context.CommitMemory(pageNode.Address - DramMemoryMap.DramBase, pageNode.PagesCount * PageSize);
}
ulong offset = 0;
foreach (var region in regions)
@ -174,6 +197,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
offset += region.Size;
}
scopedPageList.SignalSuccess();
return Result.Success;
}

View File

@ -293,9 +293,9 @@ namespace Ryujinx.Memory
{
var hostRegion = hostRegions[i];
if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd)
if (hostRegion.Address >= backingStart && hostRegion.Address < backingEnd)
{
regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size);
regions[count++] = new MemoryRange(hostRegion.Address - backingStart, hostRegion.Size);
}
}