Mirror of Svelto.ECS because we're a fan of it
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

272 řádky
12KB

  1. using System;
  2. using System.Runtime.CompilerServices;
  3. using Svelto.Common;
  4. namespace Svelto.ECS.DataStructures
  5. {
  6. //Necessary to be sure that the user won't pass random values
  7. public struct UnsafeArrayIndex
  8. {
  9. internal uint index;
  10. }
  11. /// <summary>
  12. /// Note: this must work inside burst, so it must follow burst restrictions
  13. /// It's a typeless native queue based on a ring-buffer model. This means that the writing head and the
  14. /// reading head always advance independently. If there is enough space left by dequeued elements,
  15. /// the writing head will wrap around. The writing head cannot ever surpass the reading head.
  16. ///
  17. /// </summary>
  18. struct UnsafeBlob : IDisposable
  19. {
  20. internal unsafe byte* ptr { get; set; }
  21. //expressed in bytes
  22. internal uint capacity { get; private set; }
  23. //expressed in bytes
  24. internal uint size
  25. {
  26. get
  27. {
  28. var currentSize = (uint) _writeIndex - _readIndex;
  29. #if DEBUG && !PROFILE_SVELTO
  30. if ((currentSize & (4 - 1)) != 0)
  31. throw new Exception("size is expected to be a multiple of 4");
  32. #endif
  33. return currentSize;
  34. }
  35. }
  36. //expressed in bytes
  37. internal uint availableSpace => capacity - size;
  38. /// <summary>
  39. /// </summary>
  40. internal Allocator allocator;
  41. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  42. internal void Enqueue<T>(in T item) where T : struct //should be unmanaged, but it's not due to Svelto.ECS constraints.
  43. {
  44. unsafe
  45. {
  46. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  47. var writeHead = _writeIndex % capacity;
  48. #if DEBUG && !PROFILE_SVELTO
  49. var size = _writeIndex - _readIndex;
  50. var spaceAvailable = capacity - size;
  51. if (spaceAvailable - (int) structSize < 0)
  52. throw new Exception("no writing authorized");
  53. if ((writeHead & (4 - 1)) != 0)
  54. throw new Exception("write head is expected to be a multiple of 4");
  55. #endif
  56. if (writeHead + structSize <= capacity)
  57. {
  58. Unsafe.Write(ptr + writeHead, item);
  59. }
  60. else //copy with wrap, will start to copy and wrap for the remainder
  61. {
  62. var byteCountToEnd = capacity - writeHead;
  63. var localCopyToAvoidGcIssues = item;
  64. //read and copy the first portion of Item until the end of the stream
  65. Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues)
  66. , (uint) byteCountToEnd);
  67. var restCount = structSize - byteCountToEnd;
  68. //read and copy the remainder
  69. Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
  70. , (uint) restCount);
  71. }
  72. //this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types.
  73. //it's still possible to use WriteUnaligned though
  74. uint paddedStructSize = (uint) (structSize + (int) MemoryUtilities.Pad4(structSize));
  75. _writeIndex += paddedStructSize; //we want _writeIndex to be always aligned by 4
  76. }
  77. }
  78. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  79. //The index returned is the index of the unwrapped ring. It must be wrapped again before to be used
  80. internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct //should be unmanaged, but it's not due to Svelto.ECS constraints.
  81. {
  82. unsafe
  83. {
  84. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  85. var wrappedIndex = _writeIndex % capacity;
  86. #if DEBUG && !PROFILE_SVELTO
  87. var size = _writeIndex - _readIndex;
  88. var spaceAvailable = capacity - size;
  89. if (spaceAvailable - (int) structSize < 0)
  90. throw new Exception("no writing authorized");
  91. if ((wrappedIndex & (4 - 1)) != 0)
  92. throw new Exception("write head is expected to be a multiple of 4");
  93. #endif
  94. ref var buffer = ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  95. index.index = _writeIndex;
  96. _writeIndex += structSize + MemoryUtilities.Pad4(structSize);
  97. return ref buffer;
  98. }
  99. }
  100. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  101. internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct //should be unmanaged, but it's not due to Svelto.ECS constraints.
  102. {
  103. unsafe
  104. {
  105. var wrappedIndex = index.index % capacity;
  106. #if DEBUG && !PROFILE_SVELTO
  107. if ((index.index & 3) != 0)
  108. throw new Exception($"invalid index detected");
  109. #endif
  110. return ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  111. }
  112. }
  113. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  114. internal T Dequeue<T>() where T : struct //should be unmanaged, but it's not due to Svelto.ECS constraints.
  115. {
  116. unsafe
  117. {
  118. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  119. var readHead = _readIndex % capacity;
  120. #if DEBUG && !PROFILE_SVELTO
  121. var size = _writeIndex - _readIndex;
  122. if (size < structSize) //are there enough bytes to read?
  123. throw new Exception("dequeuing empty queue or unexpected type dequeued");
  124. if (_readIndex > _writeIndex)
  125. throw new Exception("unexpected read");
  126. if ((readHead & (4 - 1)) != 0)
  127. throw new Exception("read head is expected to be a multiple of 4");
  128. #endif
  129. var paddedStructSize = structSize + MemoryUtilities.Pad4(structSize);
  130. _readIndex += paddedStructSize;
  131. if (_readIndex == _writeIndex)
  132. {
  133. //resetting the Indices has the benefit to let the Reserve work in more occasions and
  134. //the rapping happening less often. If the _readIndex reached the _writeIndex, it means
  135. //that there is no data left to read, so we can start to write again from the begin of the memory
  136. _writeIndex = 0;
  137. _readIndex = 0;
  138. }
  139. if (readHead + paddedStructSize <= capacity)
  140. return Unsafe.Read<T>(ptr + readHead);
  141. //handle the case the structure wraps around so it must be reconstructed from the part at the
  142. //end of the stream and the part starting from the begin.
  143. T item = default;
  144. var byteCountToEnd = capacity - readHead;
  145. Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + readHead, byteCountToEnd);
  146. var restCount = structSize - byteCountToEnd;
  147. Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount);
  148. return item;
  149. }
  150. }
  151. /// <summary>
  152. /// This code unwraps the queue and resizes the array, but doesn't change the unwrapped index of existing elements.
  153. /// In this way the previously reserved indices will remain valid
  154. /// </summary>
  155. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  156. internal void Grow<T>() where T : struct //should be unmanaged, but it's not due to Svelto.ECS constraints.
  157. {
  158. unsafe
  159. {
  160. var sizeOf = MemoryUtilities.SizeOf<T>();
  161. var oldCapacity = capacity;
  162. uint newCapacity = (uint) ((oldCapacity + sizeOf) << 1);
  163. //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  164. //the reading and writing head always increment in multiple of 4
  165. newCapacity += MemoryUtilities.Pad4(newCapacity);
  166. byte* newPointer = null;
  167. newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  168. //copy wrapped content if there is any
  169. var currentSize = _writeIndex - _readIndex;
  170. if (currentSize > 0)
  171. {
  172. var oldReaderHead = _readIndex % oldCapacity;
  173. var oldWriterHead = _writeIndex % oldCapacity;
  174. //Remembering that the unwrapped reader cannot ever surpass the unwrapped writer, if the reader is behind the writer
  175. //it means that the writer didn't wrap. It's the natural position so the data can be copied with
  176. //a single memcpy
  177. if (oldReaderHead < oldWriterHead)
  178. {
  179. var newReaderHead = _readIndex % newCapacity;
  180. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, (uint) currentSize);
  181. }
  182. else
  183. {
  184. //if the wrapped writer is behind the wrapped reader, it means the writer wrapped. Therefore
  185. //I need to copy the data from the current wrapped reader to the end and then from the
  186. //begin of the array to the current wrapped writer.
  187. var byteCountToEnd = oldCapacity - oldReaderHead; //bytes to copy from the reader to the end
  188. var newReaderHead = _readIndex % newCapacity;
  189. #if DEBUG && !PROFILE_SVELTO
  190. if (newReaderHead + byteCountToEnd + oldWriterHead > newCapacity) //basically the test is the old size must be less than the new capacity.
  191. throw new Exception("something is wrong with my previous assumptions");
  192. #endif
  193. //I am leaving on purpose gap at the begin of the new array if there is any, it will be
  194. //anyway used once it's time to wrap.
  195. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, byteCountToEnd); //from the old reader head to the end of the old array
  196. Unsafe.CopyBlock(newPointer + newReaderHead + byteCountToEnd, ptr + 0, (uint) oldWriterHead); //from the begin of the old array to the old writer head (rember the writerHead wrapped)
  197. }
  198. }
  199. if (ptr != null)
  200. MemoryUtilities.Free((IntPtr) ptr, allocator);
  201. ptr = newPointer;
  202. capacity = newCapacity;
  203. //_readIndex = 0; the idea is that the old readIndex should remain unchanged. Remember this is the unwrapped index.
  204. _writeIndex = _readIndex + currentSize;
  205. }
  206. }
  207. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  208. public void Dispose()
  209. {
  210. unsafe
  211. {
  212. if (ptr != null)
  213. MemoryUtilities.Free((IntPtr) ptr, allocator);
  214. ptr = null;
  215. _writeIndex = 0;
  216. capacity = 0;
  217. }
  218. }
  219. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  220. public void Clear()
  221. {
  222. _writeIndex = 0;
  223. _readIndex = 0;
  224. }
  225. uint _writeIndex;
  226. uint _readIndex;
  227. }
  228. }