Kaynağa Gözat

Update Svelto.ECS to 3.2.5

pull/72/head
sebas77 2 yıl önce
ebeveyn
işleme
09a0189779
32 değiştirilmiş dosya ile 277 ekleme ve 501 silme
  1. +1
    -1
      com.sebaslab.svelto.common
  2. +4
    -0
      com.sebaslab.svelto.ecs/CHANGELOG.md
  3. +2
    -2
      com.sebaslab.svelto.ecs/Core/CheckEntityUtilities.cs
  4. +3
    -2
      com.sebaslab.svelto.ecs/Core/EnginesRoot.Engines.cs
  5. +1
    -1
      com.sebaslab.svelto.ecs/Core/EnginesRoot.Entities.cs
  6. +1
    -1
      com.sebaslab.svelto.ecs/Core/EnginesRoot.Submission.cs
  7. +1
    -0
      com.sebaslab.svelto.ecs/Core/EntitiesDB.cs
  8. +1
    -0
      com.sebaslab.svelto.ecs/Core/EntityCollection.cs
  9. +6
    -9
      com.sebaslab.svelto.ecs/Core/EntityFactory.cs
  10. +1
    -0
      com.sebaslab.svelto.ecs/Core/EntityReference/EnginesRoot.LocatorMap.cs
  11. +1
    -0
      com.sebaslab.svelto.ecs/Core/Filters/EntitiesDB.GroupFilters.cs
  12. +4
    -3
      com.sebaslab.svelto.ecs/Core/Filters/FilterGroup.cs
  13. +1
    -0
      com.sebaslab.svelto.ecs/Core/Filters/GroupFilters.cs
  14. +10
    -10
      com.sebaslab.svelto.ecs/Core/SimpleEntitiesSubmissionScheduler.cs
  15. +0
    -74
      com.sebaslab.svelto.ecs/DataStructures/ThreadSafeNativeBagTest.cs
  16. +1
    -0
      com.sebaslab.svelto.ecs/DataStructures/TypeSafeDictionary.cs
  17. +9
    -5
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/AtomicNativeBags.cs
  18. +34
    -27
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/NativeBag.cs
  19. +5
    -5
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/NativeDynamicArray.cs
  20. +0
    -194
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/ThreadSafeNativeBag.cs
  21. +5
    -2
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/UnsafeArray.cs
  22. +170
    -148
      com.sebaslab.svelto.ecs/DataStructures/Unmanaged/UnsafeBlob.cs
  23. +2
    -1
      com.sebaslab.svelto.ecs/Extensions/Svelto/EGIDMultiMapper.cs
  24. +1
    -0
      com.sebaslab.svelto.ecs/Extensions/Svelto/EntityCollectionExtension.cs
  25. +1
    -0
      com.sebaslab.svelto.ecs/Extensions/Svelto/EntityNativeDBExtensions.cs
  26. +1
    -1
      com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/EnginesRoot.NativeOperation.cs
  27. +1
    -0
      com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEGIDMapper.cs
  28. +1
    -0
      com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEGIDMultiMapper.cs
  29. +5
    -12
      com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEntityFactory.cs
  30. +1
    -0
      com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/UnityNativeEntityDBExtensions.cs
  31. +2
    -2
      com.sebaslab.svelto.ecs/package.json
  32. +1
    -1
      com.sebaslab.svelto.ecs/version.json

+ 1
- 1
com.sebaslab.svelto.common

@@ -1 +1 @@
Subproject commit 8a1924de0f6017ccc03396b45a3ee78ae9195dac
Subproject commit e33d7ea39d1775cc518272079f12154fbe8f9eb1

+ 4
- 0
com.sebaslab.svelto.ecs/CHANGELOG.md Dosyayı Görüntüle

@@ -1,6 +1,10 @@
# Changelog
All notable changes to this project will be documented in this file. Changes are listed in random order of importance.

## [3.2.5]

* refactor and improved NativeBag and UnsafeBlob. This fix a previously known crash with Unity IL2CPP

## [3.2.0]

* Improved checks on Svelto rules for the declaration of components and view components. This set of rules is not final yet (ideally one day they should be moved to static analyzers)


+ 2
- 2
com.sebaslab.svelto.ecs/Core/CheckEntityUtilities.cs Dosyayı Görüntüle

@@ -79,7 +79,7 @@ namespace Svelto.ECS
#endif
void ClearChecks() { _multipleOperationOnSameEGIDChecker.FastClear(); }

readonly FasterDictionary<EGID, uint> _multipleOperationOnSameEGIDChecker = new FasterDictionary<EGID, uint>();
readonly FasterDictionary<ExclusiveGroupStruct, HashSet<uint>> _idChecker = new FasterDictionary<ExclusiveGroupStruct, HashSet<uint>>();
readonly FasterDictionary<EGID, uint> _multipleOperationOnSameEGIDChecker;
readonly FasterDictionary<ExclusiveGroupStruct, HashSet<uint>> _idChecker;
}
}

+ 3
- 2
com.sebaslab.svelto.ecs/Core/EnginesRoot.Engines.cs Dosyayı Görüntüle

@@ -24,7 +24,9 @@ namespace Svelto.ECS
/// </summary>
public EnginesRoot(EntitiesSubmissionScheduler entitiesComponentScheduler)
{
_entitiesOperations = new FasterDictionary<ulong, EntitySubmitOperation>();
_entitiesOperations = new FasterDictionary<ulong, EntitySubmitOperation>();
_idChecker = new FasterDictionary<ExclusiveGroupStruct, HashSet<uint>>();
_multipleOperationOnSameEGIDChecker = new FasterDictionary<EGID, uint>();
#if UNITY_NATIVE //because of the thread count, ATM this is only for unity
_nativeSwapOperationQueue = new DataStructures.AtomicNativeBags(Allocator.Persistent);
_nativeRemoveOperationQueue = new DataStructures.AtomicNativeBags(Allocator.Persistent);
@@ -282,7 +284,6 @@ namespace Svelto.ECS
#if UNITY_NATIVE
enginesRootTarget.FlushNativeOperations(profiler);
#endif

//todo: proper unit test structural changes made as result of add/remove callbacks
while (enginesRootTarget.HasMadeNewStructuralChangesInThisIteration() && iterations++ < 5)
{


+ 1
- 1
com.sebaslab.svelto.ecs/Core/EnginesRoot.Entities.cs Dosyayı Görüntüle

@@ -89,7 +89,7 @@ namespace Svelto.ECS
//Check if there is an EntityInfo linked to this entity, if so it's a DynamicEntityDescriptor!
if (fromGroup.TryGetValue(new RefWrapperType(ComponentBuilderUtilities.ENTITY_INFO_COMPONENT)
, out var entityInfoDic)
&& (entityInfoDic as ITypeSafeDictionary<EntityInfoComponent>).TryGetValue(
&& ((ITypeSafeDictionary<EntityInfoComponent>) entityInfoDic).TryGetValue(
fromEntityGID.entityID, out var entityInfo))
SwapOrRemoveEntityComponents(fromEntityGID, toEntityGID, entityInfo.componentsToBuild, fromGroup
, sampler);


+ 1
- 1
com.sebaslab.svelto.ecs/Core/EnginesRoot.Submission.cs Dosyayı Görüntüle

@@ -76,7 +76,7 @@ namespace Svelto.ECS
if ((uint) numberOfOperations >= (uint) _maxNumberOfOperationsPerFrame)
{
using (sample.Yield())
yield return true;
yield return true;

numberOfOperations = 0;
}


+ 1
- 0
com.sebaslab.svelto.ecs/Core/EntitiesDB.cs Dosyayı Görüntüle

@@ -6,6 +6,7 @@ using System;
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Internal;

namespace Svelto.ECS


+ 1
- 0
com.sebaslab.svelto.ecs/Core/EntityCollection.cs Dosyayı Görüntüle

@@ -1,5 +1,6 @@
using System.Runtime.CompilerServices;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Internal;

namespace Svelto.ECS


+ 6
- 9
com.sebaslab.svelto.ecs/Core/EntityFactory.cs Dosyayı Görüntüle

@@ -28,12 +28,8 @@ namespace Svelto.ECS.Internal
static FasterDictionary<RefWrapperType, ITypeSafeDictionary> FetchEntityGroup
(ExclusiveGroupStruct groupID, EnginesRoot.DoubleBufferedEntitiesToAdd groupEntityComponentsByType)
{
if (groupEntityComponentsByType.current.TryGetValue(groupID, out var group) == false)
{
group = new FasterDictionary<RefWrapperType, ITypeSafeDictionary>();

groupEntityComponentsByType.current.Add(groupID, group);
}
var group = groupEntityComponentsByType.current.GetOrCreate(
groupID, () => new FasterDictionary<RefWrapperType, ITypeSafeDictionary>());

//track the number of entities created so far in the group.
groupEntityComponentsByType.IncrementEntityCount(groupID);
@@ -77,15 +73,16 @@ namespace Svelto.ECS.Internal
}
}

static void BuildEntity(EGID entityID, FasterDictionary<RefWrapperType, ITypeSafeDictionary> group
, IComponentBuilder componentBuilder, IEnumerable<object> implementors)
static void BuildEntity
(EGID entityID, FasterDictionary<RefWrapperType, ITypeSafeDictionary> group
, IComponentBuilder componentBuilder, IEnumerable<object> implementors)
{
var entityComponentType = componentBuilder.GetEntityComponentType();
var safeDictionary = group.GetOrCreate(new RefWrapperType(entityComponentType)
, (ref IComponentBuilder cb) => cb.CreateDictionary(1)
, ref componentBuilder);

//if the safeDictionary hasn't been created yet, it will be created inside this method.
// if the safeDictionary hasn't been created yet, it will be created inside this method.
componentBuilder.BuildEntityAndAddToList(safeDictionary, entityID, implementors);
}
}

+ 1
- 0
com.sebaslab.svelto.ecs/Core/EntityReference/EnginesRoot.LocatorMap.cs Dosyayı Görüntüle

@@ -1,6 +1,7 @@
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.DataStructures;
using Svelto.ECS.Reference;



+ 1
- 0
com.sebaslab.svelto.ecs/Core/Filters/EntitiesDB.GroupFilters.cs Dosyayı Görüntüle

@@ -1,4 +1,5 @@
using Svelto.DataStructures;
using Svelto.DataStructures.Native;

namespace Svelto.ECS
{


+ 4
- 3
com.sebaslab.svelto.ecs/Core/Filters/FilterGroup.cs Dosyayı Görüntüle

@@ -1,6 +1,7 @@
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.DataStructures;

namespace Svelto.ECS
@@ -24,9 +25,9 @@ namespace Svelto.ECS
//from the index, find the entityID
_reverseEIDs = new NativeDynamicArrayCast<uint>(NativeDynamicArray.Alloc<uint>(Allocator.Persistent));
//from the entityID, find the index
_indexOfEntityInDenseList = new SharedSveltoDictionaryNative<uint, uint>(0);
_exclusiveGroupStruct = exclusiveGroupStruct;
_ID = ID;
_indexOfEntityInDenseList = new SharedSveltoDictionaryNative<uint, uint>(0);
_exclusiveGroupStruct = exclusiveGroupStruct;
_ID = ID;
}

/// <summary>


+ 1
- 0
com.sebaslab.svelto.ecs/Core/Filters/GroupFilters.cs Dosyayı Görüntüle

@@ -1,4 +1,5 @@
using Svelto.DataStructures;
using Svelto.DataStructures.Native;

namespace Svelto.ECS
{


+ 10
- 10
com.sebaslab.svelto.ecs/Core/SimpleEntitiesSubmissionScheduler.cs Dosyayı Görüntüle

@@ -10,7 +10,10 @@ namespace Svelto.ECS.Schedulers
_enumerator = SubmitEntitiesAsync(maxNumberOfOperationsPerFrame);
}

public IEnumerator<bool> SubmitEntitiesAsync() { return _enumerator; }
public IEnumerator<bool> SubmitEntitiesAsync()
{
return _enumerator;
}

public IEnumerator<bool> SubmitEntitiesAsync(uint maxNumberOfOperations)
{
@@ -22,23 +25,20 @@ namespace Svelto.ECS.Schedulers
if (paused == false)
{
var entitiesSubmitterSubmitEntities = entitiesSubmitter.submitEntities;
entitiesSubmitterSubmitEntities.MoveNext();

if (entitiesSubmitterSubmitEntities.Current == true)
yield return true;
else
yield return false;
yield return entitiesSubmitterSubmitEntities.Current == true;
}
}
}

public void SubmitEntities()
{
_enumerator.MoveNext();

while (_enumerator.Current == true)
do
{
_enumerator.MoveNext();
} while (_enumerator.Current == true);
}

public override bool paused { get; set; }


+ 0
- 74
com.sebaslab.svelto.ecs/DataStructures/ThreadSafeNativeBagTest.cs Dosyayı Görüntüle

@@ -1,74 +0,0 @@
// using System.Threading.Tasks;
// using NUnit.Framework;
// using Svelto.Common;
// using Svelto.ECS.DataStructures;
//
// namespace Svelto.ECS.Tests.Common.DataStructures
// {
// [TestFixture]
// public class ThreadSafeNativeBagTest
// {
// [Test]
// public void TestByteReallocWorks()
// {
// var threadNativeBag = new ThreadSafeNativeBag(Allocator.Persistent);
//
// Parallel.Invoke(() =>
// {
// for (int i = 0; i < 100; i++)
// {
// threadNativeBag.Enqueue((int)1);
// }
// }
// , // close first Action
// () =>
// {
// for (int i = 0; i < 100; i++)
// {
// threadNativeBag.Enqueue((int)2);
// }
// }
// , //close second Action
//
// () =>
// {
// for (int i = 0; i < 100; i++)
// {
// threadNativeBag.Enqueue(3);
// }
// } //close third Action
// ); //close parallel.invoke
//
// // for (int i = 0; i < 100; i++)
// // {
// // threadNativeBag.Enqueue(1);
// // }
//
// int oneCount = 0, twoCount = 0, threeCount = 0;
//
// while (threadNativeBag.count > 0)
// {
// var value = threadNativeBag.Dequeue<int>();
//
// switch (value)
// {
// case 1:
// oneCount++;
// break;
// case 2:
// twoCount++;
// break;
// case 3:
// threeCount++;
// break;
// }
// }
//
// Assert.That(oneCount, Is.EqualTo(100));
// Assert.That(twoCount, Is.EqualTo(100));
// Assert.That(threeCount, Is.EqualTo(100));
//
// threadNativeBag.Dispose();
// }
// }
// }

+ 1
- 0
com.sebaslab.svelto.ecs/DataStructures/TypeSafeDictionary.cs Dosyayı Görüntüle

@@ -2,6 +2,7 @@
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Hybrid;

namespace Svelto.ECS.Internal


+ 9
- 5
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/AtomicNativeBags.cs Dosyayı Görüntüle

@@ -34,7 +34,7 @@ namespace Svelto.ECS.DataStructures
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public ref NativeBag GetBuffer(int index)
public readonly ref NativeBag GetBuffer(int index)
{
#if DEBUG
if (_data == null)
@@ -72,12 +72,16 @@ namespace Svelto.ECS.DataStructures
}
}
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
NativeBag* _data;
readonly Allocator _allocator;
readonly uint _threadsCount;
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
#if UNITY_BURST
[Unity.Burst.NoAlias]
#endif
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
NativeBag* _data;
}
}
#endif

+ 34
- 27
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/NativeBag.cs Dosyayı Görüntüle

@@ -15,12 +15,12 @@ namespace Svelto.ECS.DataStructures
{
/// <summary>
/// Burst friendly RingBuffer on steroid:
/// it can: Enqueue/Dequeue, it wraps if there is enough space after dequeuing
/// it can: Enqueue/Dequeue, it wraps around if there is enough space after dequeuing
/// It resizes if there isn't enough space left.
/// It's a "bag", you can queue and dequeue any T. Just be sure that you dequeue what you queue! No check on type
/// It's a "bag", you can queue and dequeue any type and mix them. Just be sure that you dequeue what you queue! No check on type
/// is done.
/// You can reserve a position in the queue to update it later.
/// The datastructure is a struct and it's "copyable"
/// The datastructure is a struct and it's "copiable"
/// I eventually decided to call it NativeBag and not NativeBag because it can also be used as
/// a preallocated memory pool where any kind of T can be stored as long as T is unmanaged
/// </summary>
@@ -34,11 +34,11 @@ namespace Svelto.ECS.DataStructures
unsafe
{
BasicTests();
#if ENABLE_THREAD_SAFE_CHECKS
#if ENABLE_THREAD_SAFE_CHECKS
try
{
#endif
return _queue->size;
return _queue->size;
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -62,7 +62,7 @@ namespace Svelto.ECS.DataStructures
try
{
#endif
return _queue->capacity;
return _queue->capacity;
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -84,9 +84,9 @@ namespace Svelto.ECS.DataStructures
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf);
listData->allocator = allocator;
_queue = listData;
#if ENABLE_THREAD_SAFE_CHECKS
_threadSentinel = 0;
#endif
#if ENABLE_THREAD_SAFE_CHECKS
_threadSentinel = 0;
#endif
}
}

@@ -100,8 +100,8 @@ namespace Svelto.ECS.DataStructures
try
{
#endif
if (_queue == null || _queue->ptr == null)
return true;
if (_queue == null || _queue->ptr == null)
return true;
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -127,7 +127,7 @@ namespace Svelto.ECS.DataStructures

try
{
#endif
#endif
_queue->Dispose();
MemoryUtilities.Free((IntPtr) _queue, _queue->allocator);
_queue = null;
@@ -137,7 +137,7 @@ namespace Svelto.ECS.DataStructures
{
Volatile.Write(ref _threadSentinel, 0);
}
#endif
#endif
}
}

@@ -149,16 +149,17 @@ namespace Svelto.ECS.DataStructures
BasicTests();

var sizeOf = MemoryUtilities.SizeOf<T>();
if (_queue->space - sizeOf < 0)
//Todo: NativeBag is very complicated. At the time of writing of this comment I don't remember if the sizeof really needs to be aligned by 4. To check and change this comment
_queue->Realloc((uint) ((_queue->capacity + MemoryUtilities.Align4((uint) sizeOf)) * 2.0f));
if (_queue->availableSpace - sizeOf < 0)
{
_queue->Realloc((_queue->capacity + (uint)sizeOf) << 1);
}

#if ENABLE_THREAD_SAFE_CHECKS
try
{
#endif

return ref _queue->Reserve<T>(out index);
return ref _queue->Reserve<T>(out index);
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -181,11 +182,14 @@ namespace Svelto.ECS.DataStructures
{
#endif
var sizeOf = MemoryUtilities.SizeOf<T>();
if (_queue->space - sizeOf < 0)
//Todo: NativeBag is very complicated. At the time of writing of this comment I don't remember if the sizeof really needs to be aligned by 4. To check and change this comment
_queue->Realloc((uint) ((_queue->capacity + MemoryUtilities.Align4((uint) sizeOf)) * 2.0f));
if (_queue->availableSpace - sizeOf < 0)
{
var capacityInBytes = (_queue->capacity + (uint)sizeOf);

_queue->Realloc(capacityInBytes << 1);
}

_queue->Write(item);
_queue->Enqueue(item);
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -226,7 +230,7 @@ namespace Svelto.ECS.DataStructures
try
{
#endif
return _queue->Read<T>();
return _queue->Dequeue<T>();
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -237,16 +241,16 @@ namespace Svelto.ECS.DataStructures
}
}

internal ref T AccessReserved<T>(UnsafeArrayIndex reserverIndex) where T : struct
public ref T AccessReserved<T>(UnsafeArrayIndex reservedIndex) where T : struct
{
unsafe
{
BasicTests();
#if ENABLE_THREAD_SAFE_CHECKS
#if ENABLE_THREAD_SAFE_CHECKS
try
{
#endif
return ref _queue->AccessReserved<T>(reserverIndex);
return ref _queue->AccessReserved<T>(reservedIndex);
#if ENABLE_THREAD_SAFE_CHECKS
}
finally
@@ -267,14 +271,17 @@ namespace Svelto.ECS.DataStructures
if (Interlocked.CompareExchange(ref _threadSentinel, 1, 0) != 0)
throw new Exception("NativeBag is not thread safe, reading and writing operations can happen"
+ "on different threads, but not simultaneously");
#endif
#endif
}

#if ENABLE_THREAD_SAFE_CHECKS
int _threadSentinel;
#endif
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#if UNITY_BURST
[Unity.Burst.NoAlias]
#endif
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
unsafe UnsafeBlob* _queue;
}

+ 5
- 5
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/NativeDynamicArray.cs Dosyayı Görüntüle

@@ -377,11 +377,11 @@ namespace Svelto.ECS.DataStructures
}
}
#if UNITY_COLLECTIONS || UNITY_JOBS
#if UNITY_BURST
[global::Unity.Burst.NoAlias]
#endif
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
#if UNITY_BURST
[Unity.Burst.NoAlias]
#endif
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
unsafe UnsafeArray* _list;
#if DEBUG && !PROFILE_SVELTO


+ 0
- 194
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/ThreadSafeNativeBag.cs Dosyayı Görüntüle

@@ -1,194 +0,0 @@
#if later
using System;
using System.Runtime.CompilerServices;
using System.Threading;
using Svelto.Common;
using Svelto.Utilities;

namespace Svelto.ECS.DataStructures
{
/// <summary>
/// Burst friendly Ring Buffer on steroid:
/// it can: Enqueue/Dequeue, it wraps if there is enough space after dequeuing
/// It resizes if there isn't enough space left.
/// It's a "bag", you can queue and dequeue any T. Just be sure that you dequeue what you queue! No check on type
/// is done.
/// You can reserve a position in the queue to update it later.
/// The datastructure is a struct and it's "copyable"
/// I eventually decided to call it NativeBag and not NativeBag because it can also be used as
/// a preallocated memory pool where any kind of T can be stored as long as T is unmanaged
/// </summary>
public struct ThreadSafeNativeBag : IDisposable
{
public uint count
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get
{
unsafe
{
#if DEBUG && !PROFILE_SVELTO
if (_queue == null)
throw new Exception("SimpleNativeArray: null-access");
#endif

return _queue->size;
}
}
}

public uint capacity
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
get
{
unsafe
{
#if DEBUG && !PROFILE_SVELTO
if (_queue == null)
throw new Exception("SimpleNativeArray: null-access");
#endif

return _queue->capacity;
}
}
}

public ThreadSafeNativeBag(Allocator allocator)
{
unsafe
{
var sizeOf = MemoryUtilities.SizeOf<UnsafeBlob>();
var listData = (UnsafeBlob*) MemoryUtilities.Alloc((uint) sizeOf, allocator);

//clear to nullify the pointers
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf);
listData->allocator = allocator;
_queue = listData;
}

_writingGuard = 0;
}
public ThreadSafeNativeBag(Allocator allocator, uint capacity)
{
unsafe
{
var sizeOf = MemoryUtilities.SizeOf<UnsafeBlob>();
var listData = (UnsafeBlob*) MemoryUtilities.Alloc((uint) sizeOf, allocator);

//clear to nullify the pointers
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf);
listData->allocator = allocator;
_queue = listData;
_queue->Realloc(capacity);
}

_writingGuard = 0;
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsEmpty()
{
unsafe
{
if (_queue == null || _queue->ptr == null)
return true;
}

return count == 0;
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe void Dispose()
{
if (_queue != null)
{
_queue->Dispose();
MemoryUtilities.Free((IntPtr) _queue, _queue->allocator);
_queue = null;
}
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Enqueue<T>(in T item) where T : struct
{
unsafe
{
#if DEBUG && !PROFILE_SVELTO
if (_queue == null)
throw new Exception("SimpleNativeArray: null-access");
#endif
var sizeOf = MemoryUtilities.SizeOf<T>();
var alignedSize = (uint) MemoryUtilities.SizeOfAligned<T>();

Interlocked.MemoryBarrier();
Reset:
var oldCapacity = _queue->capacity;
var spaceleft = oldCapacity - (_queue->_writeIndex - _queue->_readIndex) - sizeOf;

while (spaceleft < 0)
{
//if _writingGuard is not equal to 0, it means that another thread increased the
//value so it's possible the reallocing is already happening OR it means that
//writing are still in progress and we must be sure that are all flushed first
if (Interlocked.CompareExchange(ref _writingGuard, 1, 0) != 0)
{
ThreadUtility.Yield();
goto Reset;
}
var newCapacity = (uint) ((oldCapacity + alignedSize) * 2.0f);
Svelto.Console.Log($"realloc {newCapacity}");
_queue->Realloc(newCapacity);
Volatile.Write(ref _writingGuard, 0);
}
int writeIndex;
//look for the first available slot to write in
writeIndex = _queue->_writeIndex;
if (Interlocked.CompareExchange(ref _queue->_writeIndex, (int) (writeIndex + alignedSize)
, writeIndex) != writeIndex)
{
ThreadUtility.Yield();
goto Reset;
}

Interlocked.Increment(ref _writingGuard);
_queue->Write(item, (uint) writeIndex);
Interlocked.Decrement(ref _writingGuard);
}
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Clear()
{
unsafe
{
#if DEBUG && !PROFILE_SVELTO
if (_queue == null)
throw new Exception("SimpleNativeArray: null-access");
#endif
_queue->Clear();
}
}

public T Dequeue<T>() where T : struct
{
unsafe
{
return _queue->Read<T>();
}
}
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
unsafe UnsafeBlob* _queue;

int _writingGuard;
}
}
#endif

+ 5
- 2
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/UnsafeArray.cs Dosyayı Görüntüle

@@ -133,8 +133,11 @@ namespace Svelto.ECS.DataStructures
_writeIndex = count;
}
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST
#if UNITY_BURST
[Unity.Burst.NoAlias]
#endif
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction]
#endif
unsafe byte* _ptr;


+ 170
- 148
com.sebaslab.svelto.ecs/DataStructures/Unmanaged/UnsafeBlob.cs Dosyayı Görüntüle

@@ -4,16 +4,19 @@ using Svelto.Common;

namespace Svelto.ECS.DataStructures
{
//ToDO to complete in future version of svelto, maybe removed
//Necessary to be sure that the user won't pass random values
public struct UnsafeArrayIndex
{
internal uint index;
internal uint capacity;
}

/// <summary>
/// Note: this must work inside burst, so it must follow burst restrictions
/// Note: All the svelto native structures
/// It's a typeless native queue based on a ring-buffer model. This means that the writing head and the
/// reading head always advance independently. IF there is enough space left by dequeued elements,
/// the writing head will wrap around. The writing head cannot ever surpass the reading head.
///
/// </summary>
struct UnsafeBlob : IDisposable
{
@@ -23,139 +26,132 @@ namespace Svelto.ECS.DataStructures
internal uint capacity { get; private set; }

//expressed in bytes
internal uint size => (uint)_writeIndex - _readIndex;
internal uint size
{
get
{
var currentSize = (uint) _writeIndex - _readIndex;
#if DEBUG && !PROFILE_SVELTO
if ((currentSize & (4 - 1)) != 0)
throw new Exception("size is expected to be a multiple of 4");
#endif

return currentSize;
}
}

//expressed in bytes
internal uint space => capacity - size;
internal uint availableSpace => capacity - size;

/// <summary>
/// </summary>
internal Allocator allocator;
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal void Write<T>(in T item) where T : struct
internal void Enqueue<T>(in T item) where T : struct
{
unsafe
{
var structSize = (uint) MemoryUtilities.SizeOf<T>();
var writeHead = _writeIndex % capacity;

//the idea is, considering the wrap, a read pointer must always be behind a writer pointer
#if DEBUG && !PROFILE_SVELTO
if (space - (int) structSize < 0)
var size = _writeIndex - _readIndex;
var spaceAvailable = capacity - size;
if (spaceAvailable - (int) structSize < 0)
throw new Exception("no writing authorized");
#endif
var writeHead = _writeIndex % capacity;

if ((writeHead & (4 - 1)) != 0)
throw new Exception("write head is expected to be a multiple of 4");
#endif
if (writeHead + structSize <= capacity)
{
Unsafe.Write(ptr + writeHead, item);
}
else
//copy with wrap, will start to copy and wrap for the reminder
else //copy with wrap, will start to copy and wrap for the reminder
{
var byteCountToEnd = capacity - writeHead;

var localCopyToAvoidGcIssues = item;
//read and copy the first portion of Item until the end of the stream
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), (uint)byteCountToEnd);
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues)
, (uint) byteCountToEnd);

var restCount = structSize - byteCountToEnd;

//read and copy the remainder
Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
, (uint)restCount);
, (uint) restCount);
}

//this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types.
//it's still possible to use WriteUnaligned though
int paddedStructSize = (int) MemoryUtilities.Align4(structSize);
uint paddedStructSize = (uint) (structSize + (int) MemoryUtilities.Pad4(structSize));

_writeIndex += paddedStructSize;
_writeIndex += paddedStructSize; //we want _writeIndex to be always aligned by 4
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal void Write<T>(in T item, uint writeIndex) where T : struct
//The index returned is the index of the unwrapped ring. It must be wrapped again before to be used
internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct
{
unsafe
{
var structSize = (uint) MemoryUtilities.SizeOf<T>();

//the idea is, considering the wrap, a read pointer must always be behind a writer pointer
var writeHead = writeIndex % capacity;
var structSize = (uint) MemoryUtilities.SizeOf<T>();
var wrappedIndex = _writeIndex % capacity;
#if DEBUG && !PROFILE_SVELTO
var size = _writeIndex - _readIndex;
var spaceAvailable = capacity - size;
if (spaceAvailable - (int) structSize < 0)
throw new Exception("no writing authorized");

if (writeHead + structSize <= capacity)
{
Unsafe.Write(ptr + writeHead, item);
}
else //copy with wrap, will start to copy and wrap for the reminder
{
var byteCountToEnd = capacity - writeHead;
if ((wrappedIndex & (4 - 1)) != 0)
throw new Exception("write head is expected to be a multiple of 4");
#endif
ref var buffer = ref Unsafe.AsRef<T>(ptr + wrappedIndex);

var localCopyToAvoidGcIssues = item;
//read and copy the first portion of Item until the end of the stream
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), byteCountToEnd);
index.index = _writeIndex;

var restCount = structSize - byteCountToEnd;
_writeIndex += structSize + MemoryUtilities.Pad4(structSize);

//read and copy the remainder
Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
, restCount);
}
return ref buffer;
}
}

// [MethodImpl(MethodImplOptions.AggressiveInlining)]
// //ToDo: remove this and create an UnsafeBlobUnaligned, used on NativeRingBuffer where T cannot change
// internal void WriteUnaligned<T>(in T item) where T : struct
// {
// unsafe
// {
// var structSize = (uint) MemoryUtilities.SizeOf<T>();
//
// //the idea is, considering the wrap, a read pointer must always be behind a writer pointer
// #if DEBUG && !PROFILE_SVELTO
// if (space - (int) structSize < 0)
// throw new Exception("no writing authorized");
// #endif
// var pointer = _writeIndex % capacity;
//
// if (pointer + structSize <= capacity)
// {
// Unsafe.Write(ptr + pointer, item);
// }
// else
// {
// var byteCount = capacity - pointer;
//
// var localCopyToAvoidGCIssues = item;
//
// Unsafe.CopyBlockUnaligned(ptr + pointer, Unsafe.AsPointer(ref localCopyToAvoidGCIssues), byteCount);
//
// var restCount = structSize - byteCount;
// Unsafe.CopyBlockUnaligned(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGCIssues) + byteCount
// , restCount);
// }
//
// _writeIndex += structSize;
// }
// }
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct
{
unsafe
{
var wrappedIndex = index.index % capacity;
#if DEBUG && !PROFILE_SVELTO
if ((index.index & 3) != 0)
throw new Exception($"invalid index detected");
#endif
return ref Unsafe.AsRef<T>(ptr + wrappedIndex);
}
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal T Read<T>() where T : struct
internal T Dequeue<T>() where T : struct
{
unsafe
{
var structSize = (uint) MemoryUtilities.SizeOf<T>();
var readHead = _readIndex % capacity;

#if DEBUG && !PROFILE_SVELTO
var size = _writeIndex - _readIndex;
if (size < structSize) //are there enough bytes to read?
throw new Exception("dequeuing empty queue or unexpected type dequeued");
if (_readIndex > _writeIndex)
throw new Exception("unexpected read");
if ((readHead & (4 - 1)) != 0)
throw new Exception("read head is expected to be a multiple of 4");
#endif
var head = _readIndex % capacity;
var paddedStructSize = MemoryUtilities.Align4(structSize);
var paddedStructSize = structSize + MemoryUtilities.Pad4(structSize);
_readIndex += paddedStructSize;

if (_readIndex == _writeIndex)
@@ -167,12 +163,14 @@ namespace Svelto.ECS.DataStructures
_readIndex = 0;
}

if (head + paddedStructSize <= capacity)
return Unsafe.Read<T>(ptr + head);
if (readHead + paddedStructSize <= capacity)
return Unsafe.Read<T>(ptr + readHead);

//handle the case the structure wraps around so it must be reconstructed from the part at the
//end of the stream and the part starting from the begin.
T item = default;
var byteCountToEnd = capacity - head;
Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + head, byteCountToEnd);
var byteCountToEnd = capacity - readHead;
Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + readHead, byteCountToEnd);

var restCount = structSize - byteCountToEnd;
Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount);
@@ -181,86 +179,110 @@ namespace Svelto.ECS.DataStructures
}
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct
{
unsafe
{
var sizeOf = (uint) MemoryUtilities.SizeOf<T>();

ref var buffer = ref Unsafe.AsRef<T>(ptr + _writeIndex);

#if DEBUG && !PROFILE_SVELTO
if (_writeIndex > capacity)
throw new Exception(
$"can't reserve if the writeIndex wrapped around the capacity, writeIndex {_writeIndex} capacity {capacity}");
if (_writeIndex + sizeOf > capacity)
throw new Exception("out of bound reserving");
#endif
index = new UnsafeArrayIndex
{
capacity = capacity
, index = (uint)_writeIndex
};

int align4 = (int) MemoryUtilities.Align4(sizeOf);
_writeIndex += align4;

return ref buffer;
}
}

[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct
{
unsafe
{
#if DEBUG && !PROFILE_SVELTO
var size = MemoryUtilities.SizeOf<T>();
if (index.index + size > capacity)
throw new Exception($"out of bound access, index {index.index} size {size} capacity {capacity}");
#endif
return ref Unsafe.AsRef<T>(ptr + index.index);
}
}

// /// <summary>
// /// Note when a realloc happens it doesn't just unwrap the data, but also reset the readIndex to 0 so
// /// if readIndex is greater than 0 the index of elements of an unwrapped queue will be shifted back
// /// </summary>
// [MethodImpl(MethodImplOptions.AggressiveInlining)]
// internal void ReallocOld(uint newCapacity)
// {
// unsafe
// {
// //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
// //the reading and writing head always increment in multiple of 4
// newCapacity += MemoryUtilities.Pad4(newCapacity);
//
// byte* newPointer = null;
// #if DEBUG && !PROFILE_SVELTO
// if (newCapacity <= capacity)
// throw new Exception("new capacity must be bigger than current");
// #endif
// newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
//
// //copy wrapped content if there is any
// var currentSize = _writeIndex - _readIndex;
// if (currentSize > 0)
// {
// var readerHead = _readIndex % capacity;
// var writerHead = _writeIndex % capacity;
//
// //there was no wrapping
// if (readerHead < writerHead)
// {
// //copy to the new pointer, starting from the first byte still to be read, so that readIndex
// //position can be reset
// Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint) currentSize);
// }
// //the goal of the following code is to unwrap the queue into a linear array.
// //the assumption is that if the wrapped writeHead is smaller than the wrapped readHead
// //the writerHead wrapped and restart from the being of the array.
// //so I have to copy the data from readerHead to the end of the array and then
// //from the start of the array to writerHead (which is the same position of readerHead)
// else
// {
// var byteCountToEnd = capacity - readerHead;
//
// Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd);
// Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint) writerHead);
// }
// }
//
// if (ptr != null)
// MemoryUtilities.Free((IntPtr) ptr, allocator);
//
// ptr = newPointer;
// capacity = newCapacity;
//
// _readIndex = 0;
// _writeIndex = currentSize;
// }
// }
/// <summary>
/// This version of Realloc unwrap a queue, but doesn't change the unwrapped index of existing elements.
/// In this way the previously index will remain valid
/// </summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal void Realloc(uint newCapacity)
{
unsafe
{
//be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
newCapacity = MemoryUtilities.Align4(newCapacity);
//the reading and writing head always increment in multiple of 4
newCapacity += MemoryUtilities.Pad4(newCapacity);

byte* newPointer = null;
#if DEBUG && !PROFILE_SVELTO
if (newCapacity <= capacity)
throw new Exception("new capacity must be bigger than current");
#endif
if (newCapacity > 0)
newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);

//copy wrapped content if there is any
var currentSize = _writeIndex - _readIndex;
if (currentSize > 0)
{
newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
if (size > 0)
var oldReaderHead = _readIndex % capacity;
var writerHead = _writeIndex % capacity;

//there was no wrapping
if (oldReaderHead < writerHead)
{
var newReaderHead = _readIndex % newCapacity;
Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, (uint) currentSize);
}
else
{
var readerHead = _readIndex % capacity;
var writerHead = _writeIndex % capacity;

if (readerHead < writerHead)
{
//copy to the new pointer, from th reader position
var currentSize = _writeIndex - _readIndex;
Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint)currentSize);
}
//the assumption is that if size > 0 (so readerPointer and writerPointer are not the same)
//writerHead wrapped and reached readerHead. so I have to copy from readerHead to the end
//and from the start to writerHead (which is the same position of readerHead)
else
{
var byteCountToEnd = capacity - readerHead;

Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd);
Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint)writerHead);
}
var byteCountToEnd = capacity - oldReaderHead;
var newReaderHead = _readIndex % newCapacity;
#if DEBUG && !PROFILE_SVELTO
if (newReaderHead + byteCountToEnd + writerHead > newCapacity)
throw new Exception("something is wrong with my previous assumptions");
#endif
Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, byteCountToEnd); //from the old reader head to the end of the old array
Unsafe.CopyBlock(newPointer + newReaderHead + byteCountToEnd, ptr + 0, (uint) writerHead); //from the begin of the old array to the old writer head (rember the writerHead wrapped)
}
}

@@ -269,9 +291,9 @@ namespace Svelto.ECS.DataStructures

ptr = newPointer;
capacity = newCapacity;
_readIndex = 0;
_writeIndex = (int)size;
//_readIndex = 0;
_writeIndex = _readIndex + currentSize;
}
}

@@ -296,7 +318,7 @@ namespace Svelto.ECS.DataStructures
_readIndex = 0;
}

internal int _writeIndex;
internal uint _readIndex;
uint _writeIndex;
uint _readIndex;
}
}

+ 2
- 1
com.sebaslab.svelto.ecs/Extensions/Svelto/EGIDMultiMapper.cs Dosyayı Görüntüle

@@ -2,6 +2,7 @@ using System;
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Hybrid;

namespace Svelto.ECS
@@ -13,7 +14,7 @@ namespace Svelto.ECS
public EGIDMultiMapper
(SveltoDictionary<ExclusiveGroupStruct,
SveltoDictionary<uint, T, NativeStrategy<
SveltoDictionaryNode<uint>>, NativeStrategy<T>, NativeStrategy<int>>,
SveltoDictionaryNode<uint>>, NativeStrategy<T>, NativeStrategy<int>>,
ManagedStrategy<SveltoDictionaryNode<ExclusiveGroupStruct>>,
ManagedStrategy<SveltoDictionary<uint, T, NativeStrategy<SveltoDictionaryNode<uint>>, NativeStrategy<T>
, NativeStrategy<int>>>, NativeStrategy<int>> dictionary)


+ 1
- 0
com.sebaslab.svelto.ecs/Extensions/Svelto/EntityCollectionExtension.cs Dosyayı Görüntüle

@@ -1,5 +1,6 @@
using System.Runtime.CompilerServices;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Hybrid;

namespace Svelto.ECS


+ 1
- 0
com.sebaslab.svelto.ecs/Extensions/Svelto/EntityNativeDBExtensions.cs Dosyayı Görüntüle

@@ -1,6 +1,7 @@
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Internal;

//todo: once using native memory for unmanaged struct will be optional, this will need to be moved under the Native namespace


+ 1
- 1
com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/EnginesRoot.NativeOperation.cs Dosyayı Görüntüle

@@ -105,7 +105,7 @@ namespace Svelto.ECS
var reference = buffer.Dequeue<EntityReference>();
var componentCounts = buffer.Dequeue<uint>();

Check.Require(egid.groupID.isInvalid == false, "invalid group detected, are you using new ExclusiveGroupStruct() instead of new ExclusiveGroup()?");
Check.Assert(egid.groupID.isInvalid == false, "invalid group detected, are you using new ExclusiveGroupStruct() instead of new ExclusiveGroup()?");

var componentBuilders = _nativeAddOperations[componentsIndex].components;
#if DEBUG && !PROFILE_SVELTO


+ 1
- 0
com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEGIDMapper.cs Dosyayı Görüntüle

@@ -3,6 +3,7 @@ using System;
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;

namespace Svelto.ECS.Native
{


+ 1
- 0
com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEGIDMultiMapper.cs Dosyayı Görüntüle

@@ -1,6 +1,7 @@
#if UNITY_NATIVE
using System;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;

namespace Svelto.ECS.Native
{


+ 5
- 12
com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/NativeEntityFactory.cs Dosyayı Görüntüle

@@ -16,12 +16,14 @@ namespace Svelto.ECS.Native
(uint eindex, ExclusiveBuildGroup exclusiveBuildGroup, int threadIndex)
{
EntityReference reference = _entityLocator.ClaimReference();

NativeBag unsafeBuffer = _addOperationQueue.GetBuffer(threadIndex + 1);

unsafeBuffer.Enqueue(_index);
unsafeBuffer.Enqueue(_index); //each native ECS native operation is stored in an array, each request to perform a native operation in a queue. _index is the index of the operation in the array that will be dequeued later
unsafeBuffer.Enqueue(new EGID(eindex, exclusiveBuildGroup));
unsafeBuffer.Enqueue(reference);
//NativeEntityInitializer is quite a complex beast. It holds the starting values of the component set by the user. These components must be later dequeued and in order to know how many components
//must be dequeued, a count must be used. The space to hold the count is then reserved in the queue and index will be used access the count later on through NativeEntityInitializer so it can increment it.
unsafeBuffer.ReserveEnqueue<uint>(out var index) = 0;

return new NativeEntityInitializer(unsafeBuffer, index, reference);
@@ -29,16 +31,7 @@ namespace Svelto.ECS.Native

public NativeEntityInitializer BuildEntity(EGID egid, int threadIndex)
{
EntityReference reference = _entityLocator.ClaimReference();

NativeBag unsafeBuffer = _addOperationQueue.GetBuffer(threadIndex + 1);

unsafeBuffer.Enqueue(_index);
unsafeBuffer.Enqueue(new EGID(egid.entityID, egid.groupID));
unsafeBuffer.Enqueue(reference);
unsafeBuffer.ReserveEnqueue<uint>(out var index) = 0;

return new NativeEntityInitializer(unsafeBuffer, index, reference);
return BuildEntity(egid.entityID, egid.groupID, threadIndex);
}

readonly EnginesRoot.LocatorMap _entityLocator;


+ 1
- 0
com.sebaslab.svelto.ecs/Extensions/Unity/DOTS/Native/UnityNativeEntityDBExtensions.cs Dosyayı Görüntüle

@@ -2,6 +2,7 @@
using System.Runtime.CompilerServices;
using Svelto.Common;
using Svelto.DataStructures;
using Svelto.DataStructures.Native;
using Svelto.ECS.Internal;

namespace Svelto.ECS.Native


+ 2
- 2
com.sebaslab.svelto.ecs/package.json Dosyayı Görüntüle

@@ -3,13 +3,13 @@
"category": "Svelto",
"description": "Svelto ECS C# Lightweight Data Oriented Entity Component System Framework",
"dependencies": {
"com.sebaslab.svelto.common": "3.2.2"
"com.sebaslab.svelto.common": "3.2.3"
},
"keywords": [
"svelto"
],
"name": "com.sebaslab.svelto.ecs",
"version": "3.2.4",
"version": "3.2.5",
"type": "library",
"unity": "2019.3"
}

+ 1
- 1
com.sebaslab.svelto.ecs/version.json Dosyayı Görüntüle

@@ -1,3 +1,3 @@
{
"version": "3.2.4"
"version": "3.2.5"
}

Yükleniyor…
İptal
Kaydet