@@ -1,6 +1,10 @@ | |||
# Changelog | |||
All notable changes to this project will be documented in this file. Changes are listed in random order of importance. | |||
## [3.2.5] | |||
* refactor and improved NativeBag and UnsafeBlob. This fix a previously known crash with Unity IL2CPP | |||
## [3.2.0] | |||
* Improved checks on Svelto rules for the declaration of components and view components. This set of rules is not final yet (ideally one day they should be moved to static analyzers) | |||
@@ -79,7 +79,7 @@ namespace Svelto.ECS | |||
#endif | |||
void ClearChecks() { _multipleOperationOnSameEGIDChecker.FastClear(); } | |||
readonly FasterDictionary<EGID, uint> _multipleOperationOnSameEGIDChecker = new FasterDictionary<EGID, uint>(); | |||
readonly FasterDictionary<ExclusiveGroupStruct, HashSet<uint>> _idChecker = new FasterDictionary<ExclusiveGroupStruct, HashSet<uint>>(); | |||
readonly FasterDictionary<EGID, uint> _multipleOperationOnSameEGIDChecker; | |||
readonly FasterDictionary<ExclusiveGroupStruct, HashSet<uint>> _idChecker; | |||
} | |||
} |
@@ -24,7 +24,9 @@ namespace Svelto.ECS | |||
/// </summary> | |||
public EnginesRoot(EntitiesSubmissionScheduler entitiesComponentScheduler) | |||
{ | |||
_entitiesOperations = new FasterDictionary<ulong, EntitySubmitOperation>(); | |||
_entitiesOperations = new FasterDictionary<ulong, EntitySubmitOperation>(); | |||
_idChecker = new FasterDictionary<ExclusiveGroupStruct, HashSet<uint>>(); | |||
_multipleOperationOnSameEGIDChecker = new FasterDictionary<EGID, uint>(); | |||
#if UNITY_NATIVE //because of the thread count, ATM this is only for unity | |||
_nativeSwapOperationQueue = new DataStructures.AtomicNativeBags(Allocator.Persistent); | |||
_nativeRemoveOperationQueue = new DataStructures.AtomicNativeBags(Allocator.Persistent); | |||
@@ -282,7 +284,6 @@ namespace Svelto.ECS | |||
#if UNITY_NATIVE | |||
enginesRootTarget.FlushNativeOperations(profiler); | |||
#endif | |||
//todo: proper unit test structural changes made as result of add/remove callbacks | |||
while (enginesRootTarget.HasMadeNewStructuralChangesInThisIteration() && iterations++ < 5) | |||
{ | |||
@@ -89,7 +89,7 @@ namespace Svelto.ECS | |||
//Check if there is an EntityInfo linked to this entity, if so it's a DynamicEntityDescriptor! | |||
if (fromGroup.TryGetValue(new RefWrapperType(ComponentBuilderUtilities.ENTITY_INFO_COMPONENT) | |||
, out var entityInfoDic) | |||
&& (entityInfoDic as ITypeSafeDictionary<EntityInfoComponent>).TryGetValue( | |||
&& ((ITypeSafeDictionary<EntityInfoComponent>) entityInfoDic).TryGetValue( | |||
fromEntityGID.entityID, out var entityInfo)) | |||
SwapOrRemoveEntityComponents(fromEntityGID, toEntityGID, entityInfo.componentsToBuild, fromGroup | |||
, sampler); | |||
@@ -76,7 +76,7 @@ namespace Svelto.ECS | |||
if ((uint) numberOfOperations >= (uint) _maxNumberOfOperationsPerFrame) | |||
{ | |||
using (sample.Yield()) | |||
yield return true; | |||
yield return true; | |||
numberOfOperations = 0; | |||
} | |||
@@ -6,6 +6,7 @@ using System; | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Internal; | |||
namespace Svelto.ECS | |||
@@ -1,5 +1,6 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Internal; | |||
namespace Svelto.ECS | |||
@@ -28,12 +28,8 @@ namespace Svelto.ECS.Internal | |||
static FasterDictionary<RefWrapperType, ITypeSafeDictionary> FetchEntityGroup | |||
(ExclusiveGroupStruct groupID, EnginesRoot.DoubleBufferedEntitiesToAdd groupEntityComponentsByType) | |||
{ | |||
if (groupEntityComponentsByType.current.TryGetValue(groupID, out var group) == false) | |||
{ | |||
group = new FasterDictionary<RefWrapperType, ITypeSafeDictionary>(); | |||
groupEntityComponentsByType.current.Add(groupID, group); | |||
} | |||
var group = groupEntityComponentsByType.current.GetOrCreate( | |||
groupID, () => new FasterDictionary<RefWrapperType, ITypeSafeDictionary>()); | |||
//track the number of entities created so far in the group. | |||
groupEntityComponentsByType.IncrementEntityCount(groupID); | |||
@@ -77,15 +73,16 @@ namespace Svelto.ECS.Internal | |||
} | |||
} | |||
static void BuildEntity(EGID entityID, FasterDictionary<RefWrapperType, ITypeSafeDictionary> group | |||
, IComponentBuilder componentBuilder, IEnumerable<object> implementors) | |||
static void BuildEntity | |||
(EGID entityID, FasterDictionary<RefWrapperType, ITypeSafeDictionary> group | |||
, IComponentBuilder componentBuilder, IEnumerable<object> implementors) | |||
{ | |||
var entityComponentType = componentBuilder.GetEntityComponentType(); | |||
var safeDictionary = group.GetOrCreate(new RefWrapperType(entityComponentType) | |||
, (ref IComponentBuilder cb) => cb.CreateDictionary(1) | |||
, ref componentBuilder); | |||
//if the safeDictionary hasn't been created yet, it will be created inside this method. | |||
// if the safeDictionary hasn't been created yet, it will be created inside this method. | |||
componentBuilder.BuildEntityAndAddToList(safeDictionary, entityID, implementors); | |||
} | |||
} |
@@ -1,6 +1,7 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.DataStructures; | |||
using Svelto.ECS.Reference; | |||
@@ -1,4 +1,5 @@ | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
namespace Svelto.ECS | |||
{ | |||
@@ -1,6 +1,7 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.DataStructures; | |||
namespace Svelto.ECS | |||
@@ -24,9 +25,9 @@ namespace Svelto.ECS | |||
//from the index, find the entityID | |||
_reverseEIDs = new NativeDynamicArrayCast<uint>(NativeDynamicArray.Alloc<uint>(Allocator.Persistent)); | |||
//from the entityID, find the index | |||
_indexOfEntityInDenseList = new SharedSveltoDictionaryNative<uint, uint>(0); | |||
_exclusiveGroupStruct = exclusiveGroupStruct; | |||
_ID = ID; | |||
_indexOfEntityInDenseList = new SharedSveltoDictionaryNative<uint, uint>(0); | |||
_exclusiveGroupStruct = exclusiveGroupStruct; | |||
_ID = ID; | |||
} | |||
/// <summary> | |||
@@ -1,4 +1,5 @@ | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
namespace Svelto.ECS | |||
{ | |||
@@ -10,7 +10,10 @@ namespace Svelto.ECS.Schedulers | |||
_enumerator = SubmitEntitiesAsync(maxNumberOfOperationsPerFrame); | |||
} | |||
public IEnumerator<bool> SubmitEntitiesAsync() { return _enumerator; } | |||
public IEnumerator<bool> SubmitEntitiesAsync() | |||
{ | |||
return _enumerator; | |||
} | |||
public IEnumerator<bool> SubmitEntitiesAsync(uint maxNumberOfOperations) | |||
{ | |||
@@ -22,23 +25,20 @@ namespace Svelto.ECS.Schedulers | |||
if (paused == false) | |||
{ | |||
var entitiesSubmitterSubmitEntities = entitiesSubmitter.submitEntities; | |||
entitiesSubmitterSubmitEntities.MoveNext(); | |||
if (entitiesSubmitterSubmitEntities.Current == true) | |||
yield return true; | |||
else | |||
yield return false; | |||
yield return entitiesSubmitterSubmitEntities.Current == true; | |||
} | |||
} | |||
} | |||
public void SubmitEntities() | |||
{ | |||
_enumerator.MoveNext(); | |||
while (_enumerator.Current == true) | |||
do | |||
{ | |||
_enumerator.MoveNext(); | |||
} while (_enumerator.Current == true); | |||
} | |||
public override bool paused { get; set; } | |||
@@ -1,74 +0,0 @@ | |||
// using System.Threading.Tasks; | |||
// using NUnit.Framework; | |||
// using Svelto.Common; | |||
// using Svelto.ECS.DataStructures; | |||
// | |||
// namespace Svelto.ECS.Tests.Common.DataStructures | |||
// { | |||
// [TestFixture] | |||
// public class ThreadSafeNativeBagTest | |||
// { | |||
// [Test] | |||
// public void TestByteReallocWorks() | |||
// { | |||
// var threadNativeBag = new ThreadSafeNativeBag(Allocator.Persistent); | |||
// | |||
// Parallel.Invoke(() => | |||
// { | |||
// for (int i = 0; i < 100; i++) | |||
// { | |||
// threadNativeBag.Enqueue((int)1); | |||
// } | |||
// } | |||
// , // close first Action | |||
// () => | |||
// { | |||
// for (int i = 0; i < 100; i++) | |||
// { | |||
// threadNativeBag.Enqueue((int)2); | |||
// } | |||
// } | |||
// , //close second Action | |||
// | |||
// () => | |||
// { | |||
// for (int i = 0; i < 100; i++) | |||
// { | |||
// threadNativeBag.Enqueue(3); | |||
// } | |||
// } //close third Action | |||
// ); //close parallel.invoke | |||
// | |||
// // for (int i = 0; i < 100; i++) | |||
// // { | |||
// // threadNativeBag.Enqueue(1); | |||
// // } | |||
// | |||
// int oneCount = 0, twoCount = 0, threeCount = 0; | |||
// | |||
// while (threadNativeBag.count > 0) | |||
// { | |||
// var value = threadNativeBag.Dequeue<int>(); | |||
// | |||
// switch (value) | |||
// { | |||
// case 1: | |||
// oneCount++; | |||
// break; | |||
// case 2: | |||
// twoCount++; | |||
// break; | |||
// case 3: | |||
// threeCount++; | |||
// break; | |||
// } | |||
// } | |||
// | |||
// Assert.That(oneCount, Is.EqualTo(100)); | |||
// Assert.That(twoCount, Is.EqualTo(100)); | |||
// Assert.That(threeCount, Is.EqualTo(100)); | |||
// | |||
// threadNativeBag.Dispose(); | |||
// } | |||
// } | |||
// } |
@@ -1,11 +0,0 @@ | |||
fileFormatVersion: 2 | |||
guid: 6783b8d49c5935fd8f863f6d78e003ef | |||
MonoImporter: | |||
externalObjects: {} | |||
serializedVersion: 2 | |||
defaultReferences: [] | |||
executionOrder: 0 | |||
icon: {instanceID: 0} | |||
userData: | |||
assetBundleName: | |||
assetBundleVariant: |
@@ -2,6 +2,7 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Hybrid; | |||
namespace Svelto.ECS.Internal | |||
@@ -34,7 +34,7 @@ namespace Svelto.ECS.DataStructures | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
public ref NativeBag GetBuffer(int index) | |||
public readonly ref NativeBag GetBuffer(int index) | |||
{ | |||
#if DEBUG | |||
if (_data == null) | |||
@@ -72,12 +72,16 @@ namespace Svelto.ECS.DataStructures | |||
} | |||
} | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
NativeBag* _data; | |||
readonly Allocator _allocator; | |||
readonly uint _threadsCount; | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
#if UNITY_BURST | |||
[Unity.Burst.NoAlias] | |||
#endif | |||
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
NativeBag* _data; | |||
} | |||
} | |||
#endif |
@@ -15,12 +15,12 @@ namespace Svelto.ECS.DataStructures | |||
{ | |||
/// <summary> | |||
/// Burst friendly RingBuffer on steroid: | |||
/// it can: Enqueue/Dequeue, it wraps if there is enough space after dequeuing | |||
/// it can: Enqueue/Dequeue, it wraps around if there is enough space after dequeuing | |||
/// It resizes if there isn't enough space left. | |||
/// It's a "bag", you can queue and dequeue any T. Just be sure that you dequeue what you queue! No check on type | |||
/// It's a "bag", you can queue and dequeue any type and mix them. Just be sure that you dequeue what you queue! No check on type | |||
/// is done. | |||
/// You can reserve a position in the queue to update it later. | |||
/// The datastructure is a struct and it's "copyable" | |||
/// The datastructure is a struct and it's "copiable" | |||
/// I eventually decided to call it NativeBag and not NativeBag because it can also be used as | |||
/// a preallocated memory pool where any kind of T can be stored as long as T is unmanaged | |||
/// </summary> | |||
@@ -34,11 +34,11 @@ namespace Svelto.ECS.DataStructures | |||
unsafe | |||
{ | |||
BasicTests(); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
try | |||
{ | |||
#endif | |||
return _queue->size; | |||
return _queue->size; | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -62,7 +62,7 @@ namespace Svelto.ECS.DataStructures | |||
try | |||
{ | |||
#endif | |||
return _queue->capacity; | |||
return _queue->capacity; | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -84,9 +84,9 @@ namespace Svelto.ECS.DataStructures | |||
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf); | |||
listData->allocator = allocator; | |||
_queue = listData; | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
_threadSentinel = 0; | |||
#endif | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
_threadSentinel = 0; | |||
#endif | |||
} | |||
} | |||
@@ -100,8 +100,8 @@ namespace Svelto.ECS.DataStructures | |||
try | |||
{ | |||
#endif | |||
if (_queue == null || _queue->ptr == null) | |||
return true; | |||
if (_queue == null || _queue->ptr == null) | |||
return true; | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -127,7 +127,7 @@ namespace Svelto.ECS.DataStructures | |||
try | |||
{ | |||
#endif | |||
#endif | |||
_queue->Dispose(); | |||
MemoryUtilities.Free((IntPtr) _queue, _queue->allocator); | |||
_queue = null; | |||
@@ -137,7 +137,7 @@ namespace Svelto.ECS.DataStructures | |||
{ | |||
Volatile.Write(ref _threadSentinel, 0); | |||
} | |||
#endif | |||
#endif | |||
} | |||
} | |||
@@ -149,16 +149,17 @@ namespace Svelto.ECS.DataStructures | |||
BasicTests(); | |||
var sizeOf = MemoryUtilities.SizeOf<T>(); | |||
if (_queue->space - sizeOf < 0) | |||
//Todo: NativeBag is very complicated. At the time of writing of this comment I don't remember if the sizeof really needs to be aligned by 4. To check and change this comment | |||
_queue->Realloc((uint) ((_queue->capacity + MemoryUtilities.Align4((uint) sizeOf)) * 2.0f)); | |||
if (_queue->availableSpace - sizeOf < 0) | |||
{ | |||
_queue->Realloc((_queue->capacity + (uint)sizeOf) << 1); | |||
} | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
try | |||
{ | |||
#endif | |||
return ref _queue->Reserve<T>(out index); | |||
return ref _queue->Reserve<T>(out index); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -181,11 +182,14 @@ namespace Svelto.ECS.DataStructures | |||
{ | |||
#endif | |||
var sizeOf = MemoryUtilities.SizeOf<T>(); | |||
if (_queue->space - sizeOf < 0) | |||
//Todo: NativeBag is very complicated. At the time of writing of this comment I don't remember if the sizeof really needs to be aligned by 4. To check and change this comment | |||
_queue->Realloc((uint) ((_queue->capacity + MemoryUtilities.Align4((uint) sizeOf)) * 2.0f)); | |||
if (_queue->availableSpace - sizeOf < 0) | |||
{ | |||
var capacityInBytes = (_queue->capacity + (uint)sizeOf); | |||
_queue->Realloc(capacityInBytes << 1); | |||
} | |||
_queue->Write(item); | |||
_queue->Enqueue(item); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -226,7 +230,7 @@ namespace Svelto.ECS.DataStructures | |||
try | |||
{ | |||
#endif | |||
return _queue->Read<T>(); | |||
return _queue->Dequeue<T>(); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -237,16 +241,16 @@ namespace Svelto.ECS.DataStructures | |||
} | |||
} | |||
internal ref T AccessReserved<T>(UnsafeArrayIndex reserverIndex) where T : struct | |||
public ref T AccessReserved<T>(UnsafeArrayIndex reservedIndex) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
BasicTests(); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
try | |||
{ | |||
#endif | |||
return ref _queue->AccessReserved<T>(reserverIndex); | |||
return ref _queue->AccessReserved<T>(reservedIndex); | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
} | |||
finally | |||
@@ -267,14 +271,17 @@ namespace Svelto.ECS.DataStructures | |||
if (Interlocked.CompareExchange(ref _threadSentinel, 1, 0) != 0) | |||
throw new Exception("NativeBag is not thread safe, reading and writing operations can happen" | |||
+ "on different threads, but not simultaneously"); | |||
#endif | |||
#endif | |||
} | |||
#if ENABLE_THREAD_SAFE_CHECKS | |||
int _threadSentinel; | |||
#endif | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#if UNITY_BURST | |||
[Unity.Burst.NoAlias] | |||
#endif | |||
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
unsafe UnsafeBlob* _queue; | |||
} |
@@ -377,11 +377,11 @@ namespace Svelto.ECS.DataStructures | |||
} | |||
} | |||
#if UNITY_COLLECTIONS || UNITY_JOBS | |||
#if UNITY_BURST | |||
[global::Unity.Burst.NoAlias] | |||
#endif | |||
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
#if UNITY_BURST | |||
[Unity.Burst.NoAlias] | |||
#endif | |||
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
unsafe UnsafeArray* _list; | |||
#if DEBUG && !PROFILE_SVELTO | |||
@@ -1,194 +0,0 @@ | |||
#if later | |||
using System; | |||
using System.Runtime.CompilerServices; | |||
using System.Threading; | |||
using Svelto.Common; | |||
using Svelto.Utilities; | |||
namespace Svelto.ECS.DataStructures | |||
{ | |||
/// <summary> | |||
/// Burst friendly Ring Buffer on steroid: | |||
/// it can: Enqueue/Dequeue, it wraps if there is enough space after dequeuing | |||
/// It resizes if there isn't enough space left. | |||
/// It's a "bag", you can queue and dequeue any T. Just be sure that you dequeue what you queue! No check on type | |||
/// is done. | |||
/// You can reserve a position in the queue to update it later. | |||
/// The datastructure is a struct and it's "copyable" | |||
/// I eventually decided to call it NativeBag and not NativeBag because it can also be used as | |||
/// a preallocated memory pool where any kind of T can be stored as long as T is unmanaged | |||
/// </summary> | |||
public struct ThreadSafeNativeBag : IDisposable | |||
{ | |||
public uint count | |||
{ | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
get | |||
{ | |||
unsafe | |||
{ | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (_queue == null) | |||
throw new Exception("SimpleNativeArray: null-access"); | |||
#endif | |||
return _queue->size; | |||
} | |||
} | |||
} | |||
public uint capacity | |||
{ | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
get | |||
{ | |||
unsafe | |||
{ | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (_queue == null) | |||
throw new Exception("SimpleNativeArray: null-access"); | |||
#endif | |||
return _queue->capacity; | |||
} | |||
} | |||
} | |||
public ThreadSafeNativeBag(Allocator allocator) | |||
{ | |||
unsafe | |||
{ | |||
var sizeOf = MemoryUtilities.SizeOf<UnsafeBlob>(); | |||
var listData = (UnsafeBlob*) MemoryUtilities.Alloc((uint) sizeOf, allocator); | |||
//clear to nullify the pointers | |||
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf); | |||
listData->allocator = allocator; | |||
_queue = listData; | |||
} | |||
_writingGuard = 0; | |||
} | |||
public ThreadSafeNativeBag(Allocator allocator, uint capacity) | |||
{ | |||
unsafe | |||
{ | |||
var sizeOf = MemoryUtilities.SizeOf<UnsafeBlob>(); | |||
var listData = (UnsafeBlob*) MemoryUtilities.Alloc((uint) sizeOf, allocator); | |||
//clear to nullify the pointers | |||
//MemoryUtilities.MemClear((IntPtr) listData, (uint) sizeOf); | |||
listData->allocator = allocator; | |||
_queue = listData; | |||
_queue->Realloc(capacity); | |||
} | |||
_writingGuard = 0; | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
public bool IsEmpty() | |||
{ | |||
unsafe | |||
{ | |||
if (_queue == null || _queue->ptr == null) | |||
return true; | |||
} | |||
return count == 0; | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
public unsafe void Dispose() | |||
{ | |||
if (_queue != null) | |||
{ | |||
_queue->Dispose(); | |||
MemoryUtilities.Free((IntPtr) _queue, _queue->allocator); | |||
_queue = null; | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
public void Enqueue<T>(in T item) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (_queue == null) | |||
throw new Exception("SimpleNativeArray: null-access"); | |||
#endif | |||
var sizeOf = MemoryUtilities.SizeOf<T>(); | |||
var alignedSize = (uint) MemoryUtilities.SizeOfAligned<T>(); | |||
Interlocked.MemoryBarrier(); | |||
Reset: | |||
var oldCapacity = _queue->capacity; | |||
var spaceleft = oldCapacity - (_queue->_writeIndex - _queue->_readIndex) - sizeOf; | |||
while (spaceleft < 0) | |||
{ | |||
//if _writingGuard is not equal to 0, it means that another thread increased the | |||
//value so it's possible the reallocing is already happening OR it means that | |||
//writing are still in progress and we must be sure that are all flushed first | |||
if (Interlocked.CompareExchange(ref _writingGuard, 1, 0) != 0) | |||
{ | |||
ThreadUtility.Yield(); | |||
goto Reset; | |||
} | |||
var newCapacity = (uint) ((oldCapacity + alignedSize) * 2.0f); | |||
Svelto.Console.Log($"realloc {newCapacity}"); | |||
_queue->Realloc(newCapacity); | |||
Volatile.Write(ref _writingGuard, 0); | |||
} | |||
int writeIndex; | |||
//look for the first available slot to write in | |||
writeIndex = _queue->_writeIndex; | |||
if (Interlocked.CompareExchange(ref _queue->_writeIndex, (int) (writeIndex + alignedSize) | |||
, writeIndex) != writeIndex) | |||
{ | |||
ThreadUtility.Yield(); | |||
goto Reset; | |||
} | |||
Interlocked.Increment(ref _writingGuard); | |||
_queue->Write(item, (uint) writeIndex); | |||
Interlocked.Decrement(ref _writingGuard); | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
public void Clear() | |||
{ | |||
unsafe | |||
{ | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (_queue == null) | |||
throw new Exception("SimpleNativeArray: null-access"); | |||
#endif | |||
_queue->Clear(); | |||
} | |||
} | |||
public T Dequeue<T>() where T : struct | |||
{ | |||
unsafe | |||
{ | |||
return _queue->Read<T>(); | |||
} | |||
} | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
unsafe UnsafeBlob* _queue; | |||
int _writingGuard; | |||
} | |||
} | |||
#endif |
@@ -1,11 +0,0 @@ | |||
fileFormatVersion: 2 | |||
guid: ecdfbc4967aa30eebd81d08e327f9857 | |||
MonoImporter: | |||
externalObjects: {} | |||
serializedVersion: 2 | |||
defaultReferences: [] | |||
executionOrder: 0 | |||
icon: {instanceID: 0} | |||
userData: | |||
assetBundleName: | |||
assetBundleVariant: |
@@ -133,8 +133,11 @@ namespace Svelto.ECS.DataStructures | |||
_writeIndex = count; | |||
} | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
[global::Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#if UNITY_COLLECTIONS || UNITY_JOBS || UNITY_BURST | |||
#if UNITY_BURST | |||
[Unity.Burst.NoAlias] | |||
#endif | |||
[Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestriction] | |||
#endif | |||
unsafe byte* _ptr; | |||
@@ -4,16 +4,19 @@ using Svelto.Common; | |||
namespace Svelto.ECS.DataStructures | |||
{ | |||
//ToDO to complete in future version of svelto, maybe removed | |||
//Necessary to be sure that the user won't pass random values | |||
public struct UnsafeArrayIndex | |||
{ | |||
internal uint index; | |||
internal uint capacity; | |||
} | |||
/// <summary> | |||
/// Note: this must work inside burst, so it must follow burst restrictions | |||
/// Note: All the svelto native structures | |||
/// It's a typeless native queue based on a ring-buffer model. This means that the writing head and the | |||
/// reading head always advance independently. IF there is enough space left by dequeued elements, | |||
/// the writing head will wrap around. The writing head cannot ever surpass the reading head. | |||
/// | |||
/// </summary> | |||
struct UnsafeBlob : IDisposable | |||
{ | |||
@@ -23,139 +26,132 @@ namespace Svelto.ECS.DataStructures | |||
internal uint capacity { get; private set; } | |||
//expressed in bytes | |||
internal uint size => (uint)_writeIndex - _readIndex; | |||
internal uint size | |||
{ | |||
get | |||
{ | |||
var currentSize = (uint) _writeIndex - _readIndex; | |||
#if DEBUG && !PROFILE_SVELTO | |||
if ((currentSize & (4 - 1)) != 0) | |||
throw new Exception("size is expected to be a multiple of 4"); | |||
#endif | |||
return currentSize; | |||
} | |||
} | |||
//expressed in bytes | |||
internal uint space => capacity - size; | |||
internal uint availableSpace => capacity - size; | |||
/// <summary> | |||
/// </summary> | |||
internal Allocator allocator; | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal void Write<T>(in T item) where T : struct | |||
internal void Enqueue<T>(in T item) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
var structSize = (uint) MemoryUtilities.SizeOf<T>(); | |||
var writeHead = _writeIndex % capacity; | |||
//the idea is, considering the wrap, a read pointer must always be behind a writer pointer | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (space - (int) structSize < 0) | |||
var size = _writeIndex - _readIndex; | |||
var spaceAvailable = capacity - size; | |||
if (spaceAvailable - (int) structSize < 0) | |||
throw new Exception("no writing authorized"); | |||
#endif | |||
var writeHead = _writeIndex % capacity; | |||
if ((writeHead & (4 - 1)) != 0) | |||
throw new Exception("write head is expected to be a multiple of 4"); | |||
#endif | |||
if (writeHead + structSize <= capacity) | |||
{ | |||
Unsafe.Write(ptr + writeHead, item); | |||
} | |||
else | |||
//copy with wrap, will start to copy and wrap for the reminder | |||
else //copy with wrap, will start to copy and wrap for the reminder | |||
{ | |||
var byteCountToEnd = capacity - writeHead; | |||
var localCopyToAvoidGcIssues = item; | |||
//read and copy the first portion of Item until the end of the stream | |||
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), (uint)byteCountToEnd); | |||
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues) | |||
, (uint) byteCountToEnd); | |||
var restCount = structSize - byteCountToEnd; | |||
//read and copy the remainder | |||
Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd | |||
, (uint)restCount); | |||
, (uint) restCount); | |||
} | |||
//this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types. | |||
//it's still possible to use WriteUnaligned though | |||
int paddedStructSize = (int) MemoryUtilities.Align4(structSize); | |||
uint paddedStructSize = (uint) (structSize + (int) MemoryUtilities.Pad4(structSize)); | |||
_writeIndex += paddedStructSize; | |||
_writeIndex += paddedStructSize; //we want _writeIndex to be always aligned by 4 | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal void Write<T>(in T item, uint writeIndex) where T : struct | |||
//The index returned is the index of the unwrapped ring. It must be wrapped again before to be used | |||
internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
var structSize = (uint) MemoryUtilities.SizeOf<T>(); | |||
//the idea is, considering the wrap, a read pointer must always be behind a writer pointer | |||
var writeHead = writeIndex % capacity; | |||
var structSize = (uint) MemoryUtilities.SizeOf<T>(); | |||
var wrappedIndex = _writeIndex % capacity; | |||
#if DEBUG && !PROFILE_SVELTO | |||
var size = _writeIndex - _readIndex; | |||
var spaceAvailable = capacity - size; | |||
if (spaceAvailable - (int) structSize < 0) | |||
throw new Exception("no writing authorized"); | |||
if (writeHead + structSize <= capacity) | |||
{ | |||
Unsafe.Write(ptr + writeHead, item); | |||
} | |||
else //copy with wrap, will start to copy and wrap for the reminder | |||
{ | |||
var byteCountToEnd = capacity - writeHead; | |||
if ((wrappedIndex & (4 - 1)) != 0) | |||
throw new Exception("write head is expected to be a multiple of 4"); | |||
#endif | |||
ref var buffer = ref Unsafe.AsRef<T>(ptr + wrappedIndex); | |||
var localCopyToAvoidGcIssues = item; | |||
//read and copy the first portion of Item until the end of the stream | |||
Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), byteCountToEnd); | |||
index.index = _writeIndex; | |||
var restCount = structSize - byteCountToEnd; | |||
_writeIndex += structSize + MemoryUtilities.Pad4(structSize); | |||
//read and copy the remainder | |||
Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd | |||
, restCount); | |||
} | |||
return ref buffer; | |||
} | |||
} | |||
// [MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
// //ToDo: remove this and create an UnsafeBlobUnaligned, used on NativeRingBuffer where T cannot change | |||
// internal void WriteUnaligned<T>(in T item) where T : struct | |||
// { | |||
// unsafe | |||
// { | |||
// var structSize = (uint) MemoryUtilities.SizeOf<T>(); | |||
// | |||
// //the idea is, considering the wrap, a read pointer must always be behind a writer pointer | |||
// #if DEBUG && !PROFILE_SVELTO | |||
// if (space - (int) structSize < 0) | |||
// throw new Exception("no writing authorized"); | |||
// #endif | |||
// var pointer = _writeIndex % capacity; | |||
// | |||
// if (pointer + structSize <= capacity) | |||
// { | |||
// Unsafe.Write(ptr + pointer, item); | |||
// } | |||
// else | |||
// { | |||
// var byteCount = capacity - pointer; | |||
// | |||
// var localCopyToAvoidGCIssues = item; | |||
// | |||
// Unsafe.CopyBlockUnaligned(ptr + pointer, Unsafe.AsPointer(ref localCopyToAvoidGCIssues), byteCount); | |||
// | |||
// var restCount = structSize - byteCount; | |||
// Unsafe.CopyBlockUnaligned(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGCIssues) + byteCount | |||
// , restCount); | |||
// } | |||
// | |||
// _writeIndex += structSize; | |||
// } | |||
// } | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
var wrappedIndex = index.index % capacity; | |||
#if DEBUG && !PROFILE_SVELTO | |||
if ((index.index & 3) != 0) | |||
throw new Exception($"invalid index detected"); | |||
#endif | |||
return ref Unsafe.AsRef<T>(ptr + wrappedIndex); | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal T Read<T>() where T : struct | |||
internal T Dequeue<T>() where T : struct | |||
{ | |||
unsafe | |||
{ | |||
var structSize = (uint) MemoryUtilities.SizeOf<T>(); | |||
var readHead = _readIndex % capacity; | |||
#if DEBUG && !PROFILE_SVELTO | |||
var size = _writeIndex - _readIndex; | |||
if (size < structSize) //are there enough bytes to read? | |||
throw new Exception("dequeuing empty queue or unexpected type dequeued"); | |||
if (_readIndex > _writeIndex) | |||
throw new Exception("unexpected read"); | |||
if ((readHead & (4 - 1)) != 0) | |||
throw new Exception("read head is expected to be a multiple of 4"); | |||
#endif | |||
var head = _readIndex % capacity; | |||
var paddedStructSize = MemoryUtilities.Align4(structSize); | |||
var paddedStructSize = structSize + MemoryUtilities.Pad4(structSize); | |||
_readIndex += paddedStructSize; | |||
if (_readIndex == _writeIndex) | |||
@@ -167,12 +163,14 @@ namespace Svelto.ECS.DataStructures | |||
_readIndex = 0; | |||
} | |||
if (head + paddedStructSize <= capacity) | |||
return Unsafe.Read<T>(ptr + head); | |||
if (readHead + paddedStructSize <= capacity) | |||
return Unsafe.Read<T>(ptr + readHead); | |||
//handle the case the structure wraps around so it must be reconstructed from the part at the | |||
//end of the stream and the part starting from the begin. | |||
T item = default; | |||
var byteCountToEnd = capacity - head; | |||
Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + head, byteCountToEnd); | |||
var byteCountToEnd = capacity - readHead; | |||
Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + readHead, byteCountToEnd); | |||
var restCount = structSize - byteCountToEnd; | |||
Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount); | |||
@@ -181,86 +179,110 @@ namespace Svelto.ECS.DataStructures | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
var sizeOf = (uint) MemoryUtilities.SizeOf<T>(); | |||
ref var buffer = ref Unsafe.AsRef<T>(ptr + _writeIndex); | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (_writeIndex > capacity) | |||
throw new Exception( | |||
$"can't reserve if the writeIndex wrapped around the capacity, writeIndex {_writeIndex} capacity {capacity}"); | |||
if (_writeIndex + sizeOf > capacity) | |||
throw new Exception("out of bound reserving"); | |||
#endif | |||
index = new UnsafeArrayIndex | |||
{ | |||
capacity = capacity | |||
, index = (uint)_writeIndex | |||
}; | |||
int align4 = (int) MemoryUtilities.Align4(sizeOf); | |||
_writeIndex += align4; | |||
return ref buffer; | |||
} | |||
} | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct | |||
{ | |||
unsafe | |||
{ | |||
#if DEBUG && !PROFILE_SVELTO | |||
var size = MemoryUtilities.SizeOf<T>(); | |||
if (index.index + size > capacity) | |||
throw new Exception($"out of bound access, index {index.index} size {size} capacity {capacity}"); | |||
#endif | |||
return ref Unsafe.AsRef<T>(ptr + index.index); | |||
} | |||
} | |||
// /// <summary> | |||
// /// Note when a realloc happens it doesn't just unwrap the data, but also reset the readIndex to 0 so | |||
// /// if readIndex is greater than 0 the index of elements of an unwrapped queue will be shifted back | |||
// /// </summary> | |||
// [MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
// internal void ReallocOld(uint newCapacity) | |||
// { | |||
// unsafe | |||
// { | |||
// //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads | |||
// //the reading and writing head always increment in multiple of 4 | |||
// newCapacity += MemoryUtilities.Pad4(newCapacity); | |||
// | |||
// byte* newPointer = null; | |||
// #if DEBUG && !PROFILE_SVELTO | |||
// if (newCapacity <= capacity) | |||
// throw new Exception("new capacity must be bigger than current"); | |||
// #endif | |||
// newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator); | |||
// | |||
// //copy wrapped content if there is any | |||
// var currentSize = _writeIndex - _readIndex; | |||
// if (currentSize > 0) | |||
// { | |||
// var readerHead = _readIndex % capacity; | |||
// var writerHead = _writeIndex % capacity; | |||
// | |||
// //there was no wrapping | |||
// if (readerHead < writerHead) | |||
// { | |||
// //copy to the new pointer, starting from the first byte still to be read, so that readIndex | |||
// //position can be reset | |||
// Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint) currentSize); | |||
// } | |||
// //the goal of the following code is to unwrap the queue into a linear array. | |||
// //the assumption is that if the wrapped writeHead is smaller than the wrapped readHead | |||
// //the writerHead wrapped and restart from the being of the array. | |||
// //so I have to copy the data from readerHead to the end of the array and then | |||
// //from the start of the array to writerHead (which is the same position of readerHead) | |||
// else | |||
// { | |||
// var byteCountToEnd = capacity - readerHead; | |||
// | |||
// Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd); | |||
// Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint) writerHead); | |||
// } | |||
// } | |||
// | |||
// if (ptr != null) | |||
// MemoryUtilities.Free((IntPtr) ptr, allocator); | |||
// | |||
// ptr = newPointer; | |||
// capacity = newCapacity; | |||
// | |||
// _readIndex = 0; | |||
// _writeIndex = currentSize; | |||
// } | |||
// } | |||
/// <summary> | |||
/// This version of Realloc unwrap a queue, but doesn't change the unwrapped index of existing elements. | |||
/// In this way the previously index will remain valid | |||
/// </summary> | |||
[MethodImpl(MethodImplOptions.AggressiveInlining)] | |||
internal void Realloc(uint newCapacity) | |||
{ | |||
unsafe | |||
{ | |||
//be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads | |||
newCapacity = MemoryUtilities.Align4(newCapacity); | |||
//the reading and writing head always increment in multiple of 4 | |||
newCapacity += MemoryUtilities.Pad4(newCapacity); | |||
byte* newPointer = null; | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (newCapacity <= capacity) | |||
throw new Exception("new capacity must be bigger than current"); | |||
#endif | |||
if (newCapacity > 0) | |||
newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator); | |||
//copy wrapped content if there is any | |||
var currentSize = _writeIndex - _readIndex; | |||
if (currentSize > 0) | |||
{ | |||
newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator); | |||
if (size > 0) | |||
var oldReaderHead = _readIndex % capacity; | |||
var writerHead = _writeIndex % capacity; | |||
//there was no wrapping | |||
if (oldReaderHead < writerHead) | |||
{ | |||
var newReaderHead = _readIndex % newCapacity; | |||
Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, (uint) currentSize); | |||
} | |||
else | |||
{ | |||
var readerHead = _readIndex % capacity; | |||
var writerHead = _writeIndex % capacity; | |||
if (readerHead < writerHead) | |||
{ | |||
//copy to the new pointer, from th reader position | |||
var currentSize = _writeIndex - _readIndex; | |||
Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint)currentSize); | |||
} | |||
//the assumption is that if size > 0 (so readerPointer and writerPointer are not the same) | |||
//writerHead wrapped and reached readerHead. so I have to copy from readerHead to the end | |||
//and from the start to writerHead (which is the same position of readerHead) | |||
else | |||
{ | |||
var byteCountToEnd = capacity - readerHead; | |||
Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd); | |||
Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint)writerHead); | |||
} | |||
var byteCountToEnd = capacity - oldReaderHead; | |||
var newReaderHead = _readIndex % newCapacity; | |||
#if DEBUG && !PROFILE_SVELTO | |||
if (newReaderHead + byteCountToEnd + writerHead > newCapacity) | |||
throw new Exception("something is wrong with my previous assumptions"); | |||
#endif | |||
Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, byteCountToEnd); //from the old reader head to the end of the old array | |||
Unsafe.CopyBlock(newPointer + newReaderHead + byteCountToEnd, ptr + 0, (uint) writerHead); //from the begin of the old array to the old writer head (rember the writerHead wrapped) | |||
} | |||
} | |||
@@ -269,9 +291,9 @@ namespace Svelto.ECS.DataStructures | |||
ptr = newPointer; | |||
capacity = newCapacity; | |||
_readIndex = 0; | |||
_writeIndex = (int)size; | |||
//_readIndex = 0; | |||
_writeIndex = _readIndex + currentSize; | |||
} | |||
} | |||
@@ -296,7 +318,7 @@ namespace Svelto.ECS.DataStructures | |||
_readIndex = 0; | |||
} | |||
internal int _writeIndex; | |||
internal uint _readIndex; | |||
uint _writeIndex; | |||
uint _readIndex; | |||
} | |||
} |
@@ -2,6 +2,7 @@ using System; | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Hybrid; | |||
namespace Svelto.ECS | |||
@@ -13,7 +14,7 @@ namespace Svelto.ECS | |||
public EGIDMultiMapper | |||
(SveltoDictionary<ExclusiveGroupStruct, | |||
SveltoDictionary<uint, T, NativeStrategy< | |||
SveltoDictionaryNode<uint>>, NativeStrategy<T>, NativeStrategy<int>>, | |||
SveltoDictionaryNode<uint>>, NativeStrategy<T>, NativeStrategy<int>>, | |||
ManagedStrategy<SveltoDictionaryNode<ExclusiveGroupStruct>>, | |||
ManagedStrategy<SveltoDictionary<uint, T, NativeStrategy<SveltoDictionaryNode<uint>>, NativeStrategy<T> | |||
, NativeStrategy<int>>>, NativeStrategy<int>> dictionary) | |||
@@ -1,5 +1,6 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Hybrid; | |||
namespace Svelto.ECS | |||
@@ -1,6 +1,7 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Internal; | |||
//todo: once using native memory for unmanaged struct will be optional, this will need to be moved under the Native namespace | |||
@@ -105,7 +105,7 @@ namespace Svelto.ECS | |||
var reference = buffer.Dequeue<EntityReference>(); | |||
var componentCounts = buffer.Dequeue<uint>(); | |||
Check.Require(egid.groupID.isInvalid == false, "invalid group detected, are you using new ExclusiveGroupStruct() instead of new ExclusiveGroup()?"); | |||
Check.Assert(egid.groupID.isInvalid == false, "invalid group detected, are you using new ExclusiveGroupStruct() instead of new ExclusiveGroup()?"); | |||
var componentBuilders = _nativeAddOperations[componentsIndex].components; | |||
#if DEBUG && !PROFILE_SVELTO | |||
@@ -3,6 +3,7 @@ using System; | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
namespace Svelto.ECS.Native | |||
{ | |||
@@ -1,6 +1,7 @@ | |||
#if UNITY_NATIVE | |||
using System; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
namespace Svelto.ECS.Native | |||
{ | |||
@@ -16,12 +16,14 @@ namespace Svelto.ECS.Native | |||
(uint eindex, ExclusiveBuildGroup exclusiveBuildGroup, int threadIndex) | |||
{ | |||
EntityReference reference = _entityLocator.ClaimReference(); | |||
NativeBag unsafeBuffer = _addOperationQueue.GetBuffer(threadIndex + 1); | |||
unsafeBuffer.Enqueue(_index); | |||
unsafeBuffer.Enqueue(_index); //each native ECS native operation is stored in an array, each request to perform a native operation in a queue. _index is the index of the operation in the array that will be dequeued later | |||
unsafeBuffer.Enqueue(new EGID(eindex, exclusiveBuildGroup)); | |||
unsafeBuffer.Enqueue(reference); | |||
//NativeEntityInitializer is quite a complex beast. It holds the starting values of the component set by the user. These components must be later dequeued and in order to know how many components | |||
//must be dequeued, a count must be used. The space to hold the count is then reserved in the queue and index will be used access the count later on through NativeEntityInitializer so it can increment it. | |||
unsafeBuffer.ReserveEnqueue<uint>(out var index) = 0; | |||
return new NativeEntityInitializer(unsafeBuffer, index, reference); | |||
@@ -29,16 +31,7 @@ namespace Svelto.ECS.Native | |||
public NativeEntityInitializer BuildEntity(EGID egid, int threadIndex) | |||
{ | |||
EntityReference reference = _entityLocator.ClaimReference(); | |||
NativeBag unsafeBuffer = _addOperationQueue.GetBuffer(threadIndex + 1); | |||
unsafeBuffer.Enqueue(_index); | |||
unsafeBuffer.Enqueue(new EGID(egid.entityID, egid.groupID)); | |||
unsafeBuffer.Enqueue(reference); | |||
unsafeBuffer.ReserveEnqueue<uint>(out var index) = 0; | |||
return new NativeEntityInitializer(unsafeBuffer, index, reference); | |||
return BuildEntity(egid.entityID, egid.groupID, threadIndex); | |||
} | |||
readonly EnginesRoot.LocatorMap _entityLocator; | |||
@@ -2,6 +2,7 @@ | |||
using System.Runtime.CompilerServices; | |||
using Svelto.Common; | |||
using Svelto.DataStructures; | |||
using Svelto.DataStructures.Native; | |||
using Svelto.ECS.Internal; | |||
namespace Svelto.ECS.Native | |||
@@ -6,6 +6,13 @@ Real ECS framework for c\#. Enables to write encapsulated, decoupled, maintainab | |||
## Why using Svelto.ECS with Unity? | |||
Svelto.ECS wasn't born just from the needs of a large team, but also as a result of years of reasoning behind software engineering applied to game development. Svelto.ECS hasn't been written just to develop faster code, it has been designed to help develop better code. Performance gains is just one of the benefits in using Svelto.ECS, as ECS is a great way to write cache-friendly code. Svelto.ECS has been developed with the idea of ECS being a paradigm and not just a pattern, letting the user shift completely away from Object Oriented Programming with consequent improvements of the code design and code maintainability. Svelto.ECS is the result of years of iteration of the ECS paradigm applied to real game development with the intent to be as foolproof as possible. Svelto.ECS has been designed to be used by a medium-size/large team working on long term projects where the cost of maintainability is relevant. Svelto.ECS can be educative for all kind of developers, but small teams must take in consideration the learning curve/cost of ECS in general and Svelto.ECS in particular. | |||
**Svelto.ECS is also fully compatible with DOTS and Svelto.ECS code can be fully burstied and jobified.** | |||
Svelto.ECS is compatible with Unity 2019.3.x cycle as long as it's not used with DOTS ECS. If DOTS ECS integration is necessary, Svelto.ECS will always target the last stable unity version using DOTS. | |||
## Why using Svelto.ECS without Unity? | |||
The question is just for fun! There are so many c# game engines out there (Stride, Monogame, FlatRedBall, WaveEngine, UnrealCLR, UniEngine just to mention some) and Svelto.ECS is compatible with all of them! | |||
## How to clone the repository: | |||
The folders Svelto.ECS and Svelto.Common, where present, are submodules pointing to the relative repositories. If you find them empty, you need to update them through the submodule command. Check some instructions here: https://github.com/sebas77/Svelto.ECS.Vanilla.Example/wiki | |||
@@ -3,13 +3,13 @@ | |||
"category": "Svelto", | |||
"description": "Svelto ECS C# Lightweight Data Oriented Entity Component System Framework", | |||
"dependencies": { | |||
"com.sebaslab.svelto.common": "3.2.2" | |||
"com.sebaslab.svelto.common": "3.2.3" | |||
}, | |||
"keywords": [ | |||
"svelto" | |||
], | |||
"name": "com.sebaslab.svelto.ecs", | |||
"version": "3.2.4", | |||
"version": "3.2.5", | |||
"type": "library", | |||
"unity": "2019.3" | |||
} |