Mirror of Svelto.ECS because we're a fan of it
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

UnsafeBlob.cs 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. using System;
  2. using System.Runtime.CompilerServices;
  3. using Svelto.Common;
  4. namespace Svelto.ECS.DataStructures
  5. {
  6. //Necessary to be sure that the user won't pass random values
  7. public struct UnsafeArrayIndex
  8. {
  9. internal uint index;
  10. }
  11. /// <summary>
  12. /// Note: this must work inside burst, so it must follow burst restrictions
  13. /// It's a typeless native queue based on a ring-buffer model. This means that the writing head and the
  14. /// reading head always advance independently. If there is enough space left by dequeued elements,
  15. /// the writing head will wrap around if it reaches the end of the array. The writing head cannot ever surpass the reading head.
  16. ///
  17. /// </summary>
  18. struct UnsafeBlob : IDisposable
  19. {
  20. internal unsafe byte* ptr { get; set; }
  21. //expressed in bytes
  22. internal uint capacity { get; private set; }
  23. //expressed in bytes
  24. internal uint size
  25. {
  26. get
  27. {
  28. var currentSize = (uint) _writeIndex - _readIndex;
  29. #if DEBUG && !PROFILE_SVELTO
  30. if ((currentSize & (4 - 1)) != 0)
  31. throw new Exception("size is expected to be a multiple of 4");
  32. #endif
  33. return currentSize;
  34. }
  35. }
  36. //expressed in bytes
  37. internal uint availableSpace => capacity - size;
  38. /// <summary>
  39. /// </summary>
  40. internal Allocator allocator;
  41. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  42. internal void Enqueue<T>(in T item) where T : struct
  43. {
  44. unsafe
  45. {
  46. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  47. var writeHead = _writeIndex % capacity;
  48. #if DEBUG && !PROFILE_SVELTO
  49. var size = _writeIndex - _readIndex;
  50. var spaceAvailable = capacity - size;
  51. if (spaceAvailable - (int) structSize < 0)
  52. throw new Exception("no writing authorized");
  53. if ((writeHead & (4 - 1)) != 0)
  54. throw new Exception("write head is expected to be a multiple of 4");
  55. #endif
  56. if (writeHead + structSize <= capacity)
  57. {
  58. Unsafe.Write(ptr + writeHead, item);
  59. }
  60. else //copy with wrap, will start to copy and wrap for the remainder
  61. {
  62. var byteCountToEnd = capacity - writeHead;
  63. var localCopyToAvoidGcIssues = item;
  64. //read and copy the first portion of Item until the end of the stream
  65. Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues)
  66. , (uint) byteCountToEnd);
  67. var restCount = structSize - byteCountToEnd;
  68. //read and copy the remainder
  69. Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
  70. , (uint) restCount);
  71. }
  72. //this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types.
  73. //it's still possible to use WriteUnaligned though
  74. uint paddedStructSize = (uint) (structSize + (int) MemoryUtilities.Pad4(structSize));
  75. _writeIndex += paddedStructSize; //we want _writeIndex to be always aligned by 4
  76. }
  77. }
  78. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  79. //The index returned is the index of the unwrapped ring. It must be wrapped again before to be used
  80. internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct
  81. {
  82. unsafe
  83. {
  84. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  85. var wrappedIndex = _writeIndex % capacity;
  86. #if DEBUG && !PROFILE_SVELTO
  87. var size = _writeIndex - _readIndex;
  88. var spaceAvailable = capacity - size;
  89. if (spaceAvailable - (int) structSize < 0)
  90. throw new Exception("no writing authorized");
  91. if ((wrappedIndex & (4 - 1)) != 0)
  92. throw new Exception("write head is expected to be a multiple of 4");
  93. #endif
  94. ref var buffer = ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  95. index.index = _writeIndex;
  96. _writeIndex += structSize + MemoryUtilities.Pad4(structSize);
  97. return ref buffer;
  98. }
  99. }
  100. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  101. internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct
  102. {
  103. unsafe
  104. {
  105. var wrappedIndex = index.index % capacity;
  106. #if DEBUG && !PROFILE_SVELTO
  107. if ((index.index & 3) != 0)
  108. throw new Exception($"invalid index detected");
  109. #endif
  110. return ref Unsafe.AsRef<T>(ptr + wrappedIndex);
  111. }
  112. }
  113. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  114. internal T Dequeue<T>() where T : struct
  115. {
  116. unsafe
  117. {
  118. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  119. var readHead = _readIndex % capacity;
  120. #if DEBUG && !PROFILE_SVELTO
  121. var size = _writeIndex - _readIndex;
  122. if (size < structSize) //are there enough bytes to read?
  123. throw new Exception("dequeuing empty queue or unexpected type dequeued");
  124. if (_readIndex > _writeIndex)
  125. throw new Exception("unexpected read");
  126. if ((readHead & (4 - 1)) != 0)
  127. throw new Exception("read head is expected to be a multiple of 4");
  128. #endif
  129. var paddedStructSize = structSize + MemoryUtilities.Pad4(structSize);
  130. _readIndex += paddedStructSize;
  131. if (_readIndex == _writeIndex)
  132. {
  133. //resetting the Indices has the benefit to let the Reserve work in more occasions and
  134. //the rapping happening less often. If the _readIndex reached the _writeIndex, it means
  135. //that there is no data left to read, so we can start to write again from the begin of the memory
  136. _writeIndex = 0;
  137. _readIndex = 0;
  138. }
  139. if (readHead + paddedStructSize <= capacity)
  140. return Unsafe.Read<T>(ptr + readHead);
  141. //handle the case the structure wraps around so it must be reconstructed from the part at the
  142. //end of the stream and the part starting from the begin.
  143. T item = default;
  144. var byteCountToEnd = capacity - readHead;
  145. Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + readHead, byteCountToEnd);
  146. var restCount = structSize - byteCountToEnd;
  147. Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount);
  148. return item;
  149. }
  150. }
  151. // /// <summary>
  152. // /// Note when a realloc happens it doesn't just unwrap the data, but also reset the readIndex to 0 so
  153. // /// if readIndex is greater than 0 the index of elements of an unwrapped queue will be shifted back
  154. // /// </summary>
  155. // [MethodImpl(MethodImplOptions.AggressiveInlining)]
  156. // internal void ReallocOld(uint newCapacity)
  157. // {
  158. // unsafe
  159. // {
  160. // //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  161. // //the reading and writing head always increment in multiple of 4
  162. // newCapacity += MemoryUtilities.Pad4(newCapacity);
  163. //
  164. // byte* newPointer = null;
  165. // #if DEBUG && !PROFILE_SVELTO
  166. // if (newCapacity <= capacity)
  167. // throw new Exception("new capacity must be bigger than current");
  168. // #endif
  169. // newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  170. //
  171. // //copy wrapped content if there is any
  172. // var currentSize = _writeIndex - _readIndex;
  173. // if (currentSize > 0)
  174. // {
  175. // var readerHead = _readIndex % capacity;
  176. // var writerHead = _writeIndex % capacity;
  177. //
  178. // //there was no wrapping
  179. // if (readerHead < writerHead)
  180. // {
  181. // //copy to the new pointer, starting from the first byte still to be read, so that readIndex
  182. // //position can be reset
  183. // Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint) currentSize);
  184. // }
  185. // //the goal of the following code is to unwrap the queue into a linear array.
  186. // //the assumption is that if the wrapped writeHead is smaller than the wrapped readHead
  187. // //the writerHead wrapped and restart from the being of the array.
  188. // //so I have to copy the data from readerHead to the end of the array and then
  189. // //from the start of the array to writerHead (which is the same position of readerHead)
  190. // else
  191. // {
  192. // var byteCountToEnd = capacity - readerHead;
  193. //
  194. // Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd);
  195. // Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint) writerHead);
  196. // }
  197. // }
  198. //
  199. // if (ptr != null)
  200. // MemoryUtilities.Free((IntPtr) ptr, allocator);
  201. //
  202. // ptr = newPointer;
  203. // capacity = newCapacity;
  204. //
  205. // _readIndex = 0;
  206. // _writeIndex = currentSize;
  207. // }
  208. // }
  209. /// <summary>
  210. /// This version of Realloc unwrap a queue, but doesn't change the unwrapped index of existing elements.
  211. /// In this way the previously index will remain valid
  212. /// </summary>
  213. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  214. internal void Realloc(uint newCapacity)
  215. {
  216. unsafe
  217. {
  218. //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  219. //the reading and writing head always increment in multiple of 4
  220. newCapacity += MemoryUtilities.Pad4(newCapacity);
  221. byte* newPointer = null;
  222. #if DEBUG && !PROFILE_SVELTO
  223. if (newCapacity <= capacity)
  224. throw new Exception("new capacity must be bigger than current");
  225. #endif
  226. newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  227. //copy wrapped content if there is any
  228. var currentSize = _writeIndex - _readIndex;
  229. if (currentSize > 0)
  230. {
  231. var oldReaderHead = _readIndex % capacity;
  232. var writerHead = _writeIndex % capacity;
  233. //there was no wrapping
  234. if (oldReaderHead < writerHead)
  235. {
  236. var newReaderHead = _readIndex % newCapacity;
  237. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, (uint) currentSize);
  238. }
  239. else
  240. {
  241. var byteCountToEnd = capacity - oldReaderHead;
  242. var newReaderHead = _readIndex % newCapacity;
  243. #if DEBUG && !PROFILE_SVELTO
  244. if (newReaderHead + byteCountToEnd + writerHead > newCapacity)
  245. throw new Exception("something is wrong with my previous assumptions");
  246. #endif
  247. Unsafe.CopyBlock(newPointer + newReaderHead, ptr + oldReaderHead, byteCountToEnd); //from the old reader head to the end of the old array
  248. Unsafe.CopyBlock(newPointer + newReaderHead + byteCountToEnd, ptr + 0, (uint) writerHead); //from the begin of the old array to the old writer head (rember the writerHead wrapped)
  249. }
  250. }
  251. if (ptr != null)
  252. MemoryUtilities.Free((IntPtr) ptr, allocator);
  253. ptr = newPointer;
  254. capacity = newCapacity;
  255. //_readIndex = 0;
  256. _writeIndex = _readIndex + currentSize;
  257. }
  258. }
  259. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  260. public void Dispose()
  261. {
  262. unsafe
  263. {
  264. if (ptr != null)
  265. MemoryUtilities.Free((IntPtr) ptr, allocator);
  266. ptr = null;
  267. _writeIndex = 0;
  268. capacity = 0;
  269. }
  270. }
  271. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  272. public void Clear()
  273. {
  274. _writeIndex = 0;
  275. _readIndex = 0;
  276. }
  277. uint _writeIndex;
  278. uint _readIndex;
  279. }
  280. }