forked from MeloNX/MeloNX
Avoid double lookup on TryGetVirtualContiguous, fix/change some lock usage
This commit is contained in:
parent
942efbb6d6
commit
87ba82691c
@ -3,6 +3,7 @@ using Ryujinx.Common.Collections;
|
||||
using Ryujinx.Memory;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Threading;
|
||||
|
||||
namespace Ryujinx.Cpu.Jit
|
||||
{
|
||||
@ -169,7 +170,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
|
||||
private readonly IntrusiveRedBlackTree<PrivateMapping> _privateTree;
|
||||
|
||||
private readonly object _treeLock;
|
||||
private readonly ReaderWriterLockSlim _treeLock;
|
||||
|
||||
private readonly ulong _hostPageSize;
|
||||
|
||||
@ -189,7 +190,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
_privateMemoryAllocator = new PrivateMemoryAllocator(DefaultBlockAlignment, MemoryAllocationFlags.Mirrorable);
|
||||
_mappingTree = new IntrusiveRedBlackTree<Mapping>();
|
||||
_privateTree = new IntrusiveRedBlackTree<PrivateMapping>();
|
||||
_treeLock = new object();
|
||||
_treeLock = new ReaderWriterLockSlim();
|
||||
|
||||
_mappingTree.Add(new Mapping(address, size, MappingType.None));
|
||||
_privateTree.Add(new PrivateMapping(address, size, default));
|
||||
@ -209,12 +210,18 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
public bool IsEmpty()
|
||||
{
|
||||
lock (_treeLock)
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
try
|
||||
{
|
||||
Mapping map = _mappingTree.GetNode(new Mapping(Address, Size, MappingType.None));
|
||||
|
||||
return map != null && map.Address == Address && map.Size == Size && map.Type == MappingType.None;
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
}
|
||||
|
||||
public void Map(ulong va, ulong pa, ulong size)
|
||||
@ -232,10 +239,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
_lastPagePa = pa + ((EndAddress - GuestPageSize) - va);
|
||||
}
|
||||
|
||||
lock (_treeLock)
|
||||
{
|
||||
Update(va, pa, size, MappingType.Private);
|
||||
}
|
||||
Update(va, pa, size, MappingType.Private);
|
||||
}
|
||||
|
||||
public void Unmap(ulong va, ulong size)
|
||||
@ -253,10 +257,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
_lastPagePa = null;
|
||||
}
|
||||
|
||||
lock (_treeLock)
|
||||
{
|
||||
Update(va, 0UL, size, MappingType.None);
|
||||
}
|
||||
Update(va, 0UL, size, MappingType.None);
|
||||
}
|
||||
|
||||
public void Reprotect(ulong va, ulong size, MemoryPermission protection)
|
||||
@ -349,7 +350,9 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
private (MemoryBlock, ulong) GetFirstPageMemoryAndOffset()
|
||||
{
|
||||
lock (_treeLock)
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
try
|
||||
{
|
||||
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(Address, 1UL, default));
|
||||
|
||||
@ -358,13 +361,19 @@ namespace Ryujinx.Cpu.Jit
|
||||
return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (Address - map.Address));
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
|
||||
return (_backingMemory, _firstPagePa.Value);
|
||||
}
|
||||
|
||||
private (MemoryBlock, ulong) GetLastPageMemoryAndOffset()
|
||||
{
|
||||
lock (_treeLock)
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
try
|
||||
{
|
||||
ulong pageAddress = EndAddress - _hostPageSize;
|
||||
|
||||
@ -375,15 +384,28 @@ namespace Ryujinx.Cpu.Jit
|
||||
return (map.PrivateAllocation.Memory, map.PrivateAllocation.Offset + (pageAddress - map.Address));
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
|
||||
return (_backingMemory, _lastPagePa.Value & ~(_hostPageSize - 1));
|
||||
}
|
||||
|
||||
private void Update(ulong va, ulong pa, ulong size, MappingType type)
|
||||
{
|
||||
Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
|
||||
_treeLock.EnterWriteLock();
|
||||
|
||||
Update(map, va, pa, size, type);
|
||||
try
|
||||
{
|
||||
Mapping map = _mappingTree.GetNode(new Mapping(va, 1UL, MappingType.None));
|
||||
|
||||
Update(map, va, pa, size, type);
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitWriteLock();
|
||||
}
|
||||
}
|
||||
|
||||
private Mapping Update(Mapping map, ulong va, ulong pa, ulong size, MappingType type)
|
||||
@ -571,7 +593,9 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
public PrivateRange GetFirstPrivateAllocation(ulong va, ulong size, out ulong nextVa)
|
||||
{
|
||||
lock (_treeLock)
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
try
|
||||
{
|
||||
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, 1UL, default));
|
||||
|
||||
@ -587,18 +611,43 @@ namespace Ryujinx.Cpu.Jit
|
||||
Math.Min(map.PrivateAllocation.Size - startOffset, size));
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
|
||||
return PrivateRange.Empty;
|
||||
}
|
||||
|
||||
public bool HasPrivateAllocation(ulong va, ulong size)
|
||||
public bool HasPrivateAllocation(ulong va, ulong size, ulong startVa, ulong startSize, ref PrivateRange range)
|
||||
{
|
||||
lock (_treeLock)
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
try
|
||||
{
|
||||
PrivateMapping map = _privateTree.GetNode(new PrivateMapping(va, size, default));
|
||||
|
||||
return map != null && map.PrivateAllocation.IsValid;
|
||||
if (map != null && map.PrivateAllocation.IsValid)
|
||||
{
|
||||
if (map.Address <= startVa && map.EndAddress >= startVa + startSize)
|
||||
{
|
||||
ulong startOffset = startVa - map.Address;
|
||||
|
||||
range = new(
|
||||
map.PrivateAllocation.Memory,
|
||||
map.PrivateAllocation.Offset + startOffset,
|
||||
Math.Min(map.PrivateAllocation.Size - startOffset, startSize));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
|
@ -2,6 +2,7 @@ using Ryujinx.Common.Collections;
|
||||
using Ryujinx.Memory;
|
||||
using Ryujinx.Memory.Tracking;
|
||||
using System;
|
||||
using System.Threading;
|
||||
|
||||
namespace Ryujinx.Cpu.Jit
|
||||
{
|
||||
@ -100,41 +101,70 @@ namespace Ryujinx.Cpu.Jit
|
||||
}
|
||||
|
||||
private readonly IntrusiveRedBlackTree<Mapping> _mappingTree;
|
||||
private readonly ReaderWriterLockSlim _treeLock;
|
||||
|
||||
public Block(MemoryTracking tracking, MemoryBlock memory, ulong size) : base(memory, size)
|
||||
{
|
||||
_tracking = tracking;
|
||||
_memoryEh = new(memory, null, tracking, VirtualMemoryEvent);
|
||||
_mappingTree = new();
|
||||
_treeLock = new();
|
||||
}
|
||||
|
||||
public void AddMapping(ulong offset, ulong size, ulong va, ulong endVa, int bridgeSize)
|
||||
{
|
||||
_mappingTree.Add(new(offset, size, va, endVa, bridgeSize));
|
||||
_treeLock.EnterWriteLock();
|
||||
|
||||
try
|
||||
{
|
||||
_mappingTree.Add(new(offset, size, va, endVa, bridgeSize));
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitWriteLock();
|
||||
}
|
||||
}
|
||||
|
||||
public void RemoveMapping(ulong offset, ulong size)
|
||||
{
|
||||
_mappingTree.Remove(_mappingTree.GetNode(new Mapping(offset, size, 0, 0, 0)));
|
||||
_treeLock.EnterWriteLock();
|
||||
|
||||
try
|
||||
{
|
||||
_mappingTree.Remove(_mappingTree.GetNode(new Mapping(offset, size, 0, 0, 0)));
|
||||
}
|
||||
finally
|
||||
{
|
||||
_treeLock.ExitWriteLock();
|
||||
}
|
||||
}
|
||||
|
||||
private bool VirtualMemoryEvent(ulong address, ulong size, bool write)
|
||||
{
|
||||
Mapping map = _mappingTree.GetNode(new Mapping(address, size, 0, 0, 0));
|
||||
_treeLock.EnterReadLock();
|
||||
|
||||
if (map == null)
|
||||
try
|
||||
{
|
||||
return false;
|
||||
Mapping map = _mappingTree.GetNode(new Mapping(address, size, 0, 0, 0));
|
||||
|
||||
if (map == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
address -= map.Address;
|
||||
|
||||
if (address >= (map.EndVa - map.Va))
|
||||
{
|
||||
address -= (ulong)(map.BridgeSize / 2);
|
||||
}
|
||||
|
||||
return _tracking.VirtualMemoryEvent(map.Va + address, size, write);
|
||||
}
|
||||
|
||||
address -= map.Address;
|
||||
|
||||
if (address >= (map.EndVa - map.Va))
|
||||
finally
|
||||
{
|
||||
address -= (ulong)(map.BridgeSize / 2);
|
||||
_treeLock.ExitReadLock();
|
||||
}
|
||||
|
||||
return _tracking.VirtualMemoryEvent(map.Va + address, size, write);
|
||||
}
|
||||
|
||||
public override void Destroy()
|
||||
|
@ -27,15 +27,15 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
public void Map(ulong va, ulong pa, ulong size)
|
||||
{
|
||||
EnsurePartitions(va, size);
|
||||
|
||||
ulong endVa = va + size;
|
||||
|
||||
while (va < endVa)
|
||||
lock (_partitions)
|
||||
{
|
||||
lock (_partitions)
|
||||
EnsurePartitionsLocked(va, size);
|
||||
|
||||
while (va < endVa)
|
||||
{
|
||||
int partitionIndex = FindPartitionIndex(va);
|
||||
int partitionIndex = FindPartitionIndexLocked(va);
|
||||
AddressSpacePartition partition = _partitions[partitionIndex];
|
||||
|
||||
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
|
||||
@ -62,7 +62,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
lock (_partitions)
|
||||
{
|
||||
int partitionIndex = FindPartitionIndex(va);
|
||||
int partitionIndex = FindPartitionIndexLocked(va);
|
||||
if (partitionIndex < 0)
|
||||
{
|
||||
va += PartitionSize - (va & (PartitionSize - 1));
|
||||
@ -126,8 +126,11 @@ namespace Ryujinx.Cpu.Jit
|
||||
return partition.GetFirstPrivateAllocation(va, size, out nextVa);
|
||||
}
|
||||
|
||||
public bool HasAnyPrivateAllocation(ulong va, ulong size)
|
||||
public bool HasAnyPrivateAllocation(ulong va, ulong size, out PrivateRange range)
|
||||
{
|
||||
range = PrivateRange.Empty;
|
||||
|
||||
ulong startVa = va;
|
||||
ulong endVa = va + size;
|
||||
|
||||
while (va < endVa)
|
||||
@ -143,7 +146,7 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
(ulong clampedVa, ulong clampedEndVa) = ClampRange(partition, va, endVa);
|
||||
|
||||
if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa))
|
||||
if (partition.HasPrivateAllocation(clampedVa, clampedEndVa - clampedVa, startVa, size, ref range))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -206,19 +209,11 @@ namespace Ryujinx.Cpu.Jit
|
||||
return (va, endVa);
|
||||
}
|
||||
|
||||
private void EnsurePartitions(ulong va, ulong size)
|
||||
{
|
||||
lock (_partitions)
|
||||
{
|
||||
EnsurePartitionsForRange(va, size);
|
||||
}
|
||||
}
|
||||
|
||||
private AddressSpacePartition FindPartition(ulong va)
|
||||
{
|
||||
lock (_partitions)
|
||||
{
|
||||
int index = FindPartitionIndex(va);
|
||||
int index = FindPartitionIndexLocked(va);
|
||||
if (index >= 0)
|
||||
{
|
||||
return _partitions[index];
|
||||
@ -228,40 +223,37 @@ namespace Ryujinx.Cpu.Jit
|
||||
return null;
|
||||
}
|
||||
|
||||
private int FindPartitionIndex(ulong va)
|
||||
private int FindPartitionIndexLocked(ulong va)
|
||||
{
|
||||
lock (_partitions)
|
||||
int left = 0;
|
||||
int middle = 0;
|
||||
int right = _partitions.Count - 1;
|
||||
|
||||
while (left <= right)
|
||||
{
|
||||
int left = 0;
|
||||
int middle = 0;
|
||||
int right = _partitions.Count - 1;
|
||||
middle = left + ((right - left) >> 1);
|
||||
|
||||
while (left <= right)
|
||||
AddressSpacePartition partition = _partitions[middle];
|
||||
|
||||
if (partition.Address <= va && partition.EndAddress > va)
|
||||
{
|
||||
middle = left + ((right - left) >> 1);
|
||||
return middle;
|
||||
}
|
||||
|
||||
AddressSpacePartition partition = _partitions[middle];
|
||||
|
||||
if (partition.Address <= va && partition.EndAddress > va)
|
||||
{
|
||||
return middle;
|
||||
}
|
||||
|
||||
if (partition.Address >= va)
|
||||
{
|
||||
right = middle - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
left = middle + 1;
|
||||
}
|
||||
if (partition.Address >= va)
|
||||
{
|
||||
right = middle - 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
left = middle + 1;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
private void EnsurePartitionsForRange(ulong va, ulong size)
|
||||
private void EnsurePartitionsLocked(ulong va, ulong size)
|
||||
{
|
||||
ulong endVa = BitUtils.AlignUp(va + size, PartitionSize);
|
||||
va = BitUtils.AlignDown(va, PartitionSize);
|
||||
|
@ -473,14 +473,12 @@ namespace Ryujinx.Cpu.Jit
|
||||
|
||||
private bool TryGetVirtualContiguous(ulong va, int size, out MemoryBlock memory, out ulong offset)
|
||||
{
|
||||
if (_addressSpace.HasAnyPrivateAllocation(va, (ulong)size))
|
||||
if (_addressSpace.HasAnyPrivateAllocation(va, (ulong)size, out PrivateRange range))
|
||||
{
|
||||
// If we have a private allocation overlapping the range,
|
||||
// this the access is only considered contiguous if it covers the entire range.
|
||||
|
||||
PrivateRange range = _addressSpace.GetFirstPrivateAllocation(va, (ulong)size, out _);
|
||||
|
||||
if (range.Memory != null && range.Size == (ulong)size)
|
||||
if (range.Memory != null)
|
||||
{
|
||||
memory = range.Memory;
|
||||
offset = range.Offset;
|
||||
@ -500,9 +498,6 @@ namespace Ryujinx.Cpu.Jit
|
||||
return IsPhysicalContiguous(va, size);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private bool IsPhysicalContiguousAndMapped(ulong va, int size) => IsPhysicalContiguous(va, size) && IsMapped(va);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private bool IsPhysicalContiguous(ulong va, int size)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user