Mirror of Svelto.ECS because we're a fan of it
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

324 lines
13KB

  1. using System;
  2. using System.Runtime.CompilerServices;
  3. using Svelto.Common;
  4. namespace Svelto.ECS.DataStructures
  5. {
  6. //Necessary to be sure that the user won't pass random values
  7. public struct UnsafeArrayIndex
  8. {
  9. internal uint index;
  10. }
  11. /// <summary>
  12. /// Note: this must work inside burst, so it must follow burst restrictions
  13. /// Note: All the svelto native structures
  14. /// It's a typeless native queue based on a ring-buffer model. This means that the writing head and the
  15. /// reading head always advance independently. IF there is enough space left by dequeued elements,
  16. /// the writing head will wrap around. The writing head cannot ever surpass the reading head.
  17. ///
  18. /// </summary>
  19. struct UnsafeBlob : IDisposable
  20. {
  21. internal unsafe byte* ptr { get; set; }
  22. //expressed in bytes
  23. internal uint capacity { get; private set; }
  24. //expressed in bytes
  25. internal uint size
  26. {
  27. get
  28. {
  29. var currentSize = (uint) _writeIndex - _readIndex;
  30. #if DEBUG && !PROFILE_SVELTO
  31. if ((currentSize & (4 - 1)) != 0)
  32. throw new Exception("size is expected to be a multiple of 4");
  33. #endif
  34. return currentSize;
  35. }
  36. }
  37. //expressed in bytes
  38. internal uint availableSpace => capacity - size;
  39. /// <summary>
  40. /// </summary>
  41. internal Allocator allocator;
  42. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  43. internal void Enqueue<T>(in T item) where T : struct
  44. {
  45. unsafe
  46. {
  47. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  48. var writeHead = _writeIndex % capacity;
  49. //the idea is, considering the wrap, a read pointer must always be behind a writer pointer
  50. #if DEBUG && !PROFILE_SVELTO
  51. var size = _writeIndex - _readIndex;
  52. var spaceAvailable = capacity - size;
  53. if (spaceAvailable - (int) structSize < 0)
  54. throw new Exception("no writing authorized");
  55. if ((writeHead & (4 - 1)) != 0)
  56. throw new Exception("write head is expected to be a multiple of 4");
  57. #endif
  58. if (writeHead + structSize <= capacity)
  59. {
  60. Unsafe.Write(ptr + writeHead, item);
  61. }
  62. else //copy with wrap, will start to copy and wrap for the reminder
  63. {
  64. var byteCountToEnd = capacity - writeHead;
  65. var localCopyToAvoidGcIssues = item;
  66. //read and copy the first portion of Item until the end of the stream
  67. Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues)
  68. , (uint) byteCountToEnd);
  69. var restCount = structSize - byteCountToEnd;
  70. //read and copy the remainder
  71. Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
  72. , (uint) restCount);
  73. }
  74. //this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types.
  75. //it's still possible to use WriteUnaligned though
  76. uint paddedStructSize = (uint) (structSize + (int) MemoryUtilities.Pad4(structSize));
  77. _writeIndex += paddedStructSize; //we want _writeIndex to be always aligned by 4
  78. }
  79. }
  80. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  81. //The index returned is the index of the unwrapped ring. It must be wrapped again before to be used
  82. internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct
  83. {
  84. unsafe
  85. {
  86. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  87. var wrappedIndex = _writeIndex % capacity;
  88. #if DEBUG && !PROFILE_SVELTO
  89. var size = _writeIndex - _readIndex;
  90. var spaceAvailable = capacity - size;
  91. if (spaceAvailable - (int) structSize < 0)
  92. throw new Exception("no writing authorized");
  93. if ((wrappedIndex & (4 - 1)) != 0)
  94. throw new Exception("write head is expected to be a multiple of 4");
  95. #endif
  96. ref var buffer = ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  97. index.index = _writeIndex;
  98. _writeIndex += structSize + MemoryUtilities.Pad4(structSize);
  99. return ref buffer;
  100. }
  101. }
  102. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  103. internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct
  104. {
  105. unsafe
  106. {
  107. var wrappedIndex = index.index % capacity;
  108. #if DEBUG && !PROFILE_SVELTO
  109. if ((index.index & 3) != 0)
  110. throw new Exception($"invalid index detected");
  111. #endif
  112. return ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  113. }
  114. }
  115. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  116. internal T Dequeue<T>() where T : struct
  117. {
  118. unsafe
  119. {
  120. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  121. var readHead = _readIndex % capacity;
  122. #if DEBUG && !PROFILE_SVELTO
  123. var size = _writeIndex - _readIndex;
  124. if (size < structSize) //are there enough bytes to read?
  125. throw new Exception("dequeuing empty queue or unexpected type dequeued");
  126. if (_readIndex > _writeIndex)
  127. throw new Exception("unexpected read");
  128. if ((readHead & (4 - 1)) != 0)
  129. throw new Exception("read head is expected to be a multiple of 4");
  130. #endif
  131. var paddedStructSize = structSize + MemoryUtilities.Pad4(structSize);
  132. _readIndex += paddedStructSize;
  133. if (_readIndex == _writeIndex)
  134. {
  135. //resetting the Indices has the benefit to let the Reserve work in more occasions and
  136. //the rapping happening less often. If the _readIndex reached the _writeIndex, it means
  137. //that there is no data left to read, so we can start to write again from the begin of the memory
  138. _writeIndex = 0;
  139. _readIndex = 0;
  140. }
  141. if (readHead + paddedStructSize <= capacity)
  142. return Unsafe.Read<T>(ptr + readHead);
  143. //handle the case the structure wraps around so it must be reconstructed from the part at the
  144. //end of the stream and the part starting from the begin.
  145. T item = default;
  146. var byteCountToEnd = capacity - readHead;
  147. Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + readHead, byteCountToEnd);
  148. var restCount = structSize - byteCountToEnd;
  149. Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount);
  150. return item;
  151. }
  152. }
  153. // /// <summary>
  154. // /// Note when a realloc happens it doesn't just unwrap the data, but also reset the readIndex to 0 so
  155. // /// if readIndex is greater than 0 the index of elements of an unwrapped queue will be shifted back
  156. // /// </summary>
  157. // [MethodImpl(MethodImplOptions.AggressiveInlining)]
  158. // internal void ReallocOld(uint newCapacity)
  159. // {
  160. // unsafe
  161. // {
  162. // //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  163. // //the reading and writing head always increment in multiple of 4
  164. // newCapacity += MemoryUtilities.Pad4(newCapacity);
  165. //
  166. // byte* newPointer = null;
  167. // #if DEBUG && !PROFILE_SVELTO
  168. // if (newCapacity <= capacity)
  169. // throw new Exception("new capacity must be bigger than current");
  170. // #endif
  171. // newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  172. //
  173. // //copy wrapped content if there is any
  174. // var currentSize = _writeIndex - _readIndex;
  175. // if (currentSize > 0)
  176. // {
  177. // var readerHead = _readIndex % capacity;
  178. // var writerHead = _writeIndex % capacity;
  179. //
  180. // //there was no wrapping
  181. // if (readerHead < writerHead)
  182. // {
  183. // //copy to the new pointer, starting from the first byte still to be read, so that readIndex
  184. // //position can be reset
  185. // Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint) currentSize);
  186. // }
  187. // //the goal of the following code is to unwrap the queue into a linear array.
  188. // //the assumption is that if the wrapped writeHead is smaller than the wrapped readHead
  189. // //the writerHead wrapped and restart from the being of the array.
  190. // //so I have to copy the data from readerHead to the end of the array and then
  191. // //from the start of the array to writerHead (which is the same position of readerHead)
  192. // else
  193. // {
  194. // var byteCountToEnd = capacity - readerHead;
  195. //
  196. // Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd);
  197. // Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint) writerHead);
  198. // }
  199. // }
  200. //
  201. // if (ptr != null)
  202. // MemoryUtilities.Free((IntPtr) ptr, allocator);
  203. //
  204. // ptr = newPointer;
  205. // capacity = newCapacity;
  206. //
  207. // _readIndex = 0;
  208. // _writeIndex = currentSize;
  209. // }
  210. // }
  211. /// <summary>
  212. /// This version of Realloc unwrap a queue, but doesn't change the unwrapped index of existing elements.
  213. /// In this way the previously index will remain valid
  214. /// </summary>
  215. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  216. internal void Realloc(uint newCapacity)
  217. {
  218. unsafe
  219. {
  220. //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  221. //the reading and writing head always increment in multiple of 4
  222. newCapacity += MemoryUtilities.Pad4(newCapacity);
  223. byte* newPointer = null;
  224. #if DEBUG && !PROFILE_SVELTO
  225. if (newCapacity <= capacity)
  226. throw new Exception("new capacity must be bigger than current");
  227. #endif
  228. newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  229. //copy wrapped content if there is any
  230. var currentSize = _writeIndex - _readIndex;
  231. if (currentSize > 0)
  232. {
  233. var oldReaderHead = _readIndex % capacity;
  234. var writerHead = _writeIndex % capacity;
  235. //there was no wrapping
  236. if (oldReaderHead < writerHead)
  237. {
  238. var newReaderHead = _readIndex % newCapacity;
  239. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, (uint) currentSize);
  240. }
  241. else
  242. {
  243. var byteCountToEnd = capacity - oldReaderHead;
  244. var newReaderHead = _readIndex % newCapacity;
  245. #if DEBUG && !PROFILE_SVELTO
  246. if (newReaderHead + byteCountToEnd + writerHead > newCapacity)
  247. throw new Exception("something is wrong with my previous assumptions");
  248. #endif
  249. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, byteCountToEnd); //from the old reader head to the end of the old array
  250. Unsafe.CopyBlock(newPointer + newReaderHead + byteCountToEnd, ptr + 0, (uint) writerHead); //from the begin of the old array to the old writer head (rember the writerHead wrapped)
  251. }
  252. }
  253. if (ptr != null)
  254. MemoryUtilities.Free((IntPtr) ptr, allocator);
  255. ptr = newPointer;
  256. capacity = newCapacity;
  257. //_readIndex = 0;
  258. _writeIndex = _readIndex + currentSize;
  259. }
  260. }
  261. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  262. public void Dispose()
  263. {
  264. unsafe
  265. {
  266. if (ptr != null)
  267. MemoryUtilities.Free((IntPtr) ptr, allocator);
  268. ptr = null;
  269. _writeIndex = 0;
  270. capacity = 0;
  271. }
  272. }
  273. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  274. public void Clear()
  275. {
  276. _writeIndex = 0;
  277. _readIndex = 0;
  278. }
  279. uint _writeIndex;
  280. uint _readIndex;
  281. }
  282. }