Mirror of Svelto.ECS because we're a fan of it
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

UnsafeBlob.cs 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. using System;
  2. using System.Runtime.CompilerServices;
  3. using Svelto.Common;
  4. namespace Svelto.ECS.DataStructures
  5. {
  6. //ToDO to complete in future version of svelto, maybe removed
  7. public struct UnsafeArrayIndex
  8. {
  9. internal uint index;
  10. internal uint capacity;
  11. }
  12. /// <summary>
  13. /// Note: this must work inside burst, so it must follow burst restrictions
  14. /// Note: All the svelto native structures
  15. /// </summary>
  16. struct UnsafeBlob : IDisposable
  17. {
  18. internal unsafe byte* ptr { get; set; }
  19. //expressed in bytes
  20. internal uint capacity { get; private set; }
  21. //expressed in bytes
  22. internal uint size => (uint)_writeIndex - _readIndex;
  23. //expressed in bytes
  24. internal uint space => capacity - size;
  25. /// <summary>
  26. /// </summary>
  27. internal Allocator allocator;
  28. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  29. internal void Write<T>(in T item) where T : struct
  30. {
  31. unsafe
  32. {
  33. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  34. //the idea is, considering the wrap, a read pointer must always be behind a writer pointer
  35. #if DEBUG && !PROFILE_SVELTO
  36. if (space - (int) structSize < 0)
  37. throw new Exception("no writing authorized");
  38. #endif
  39. var writeHead = _writeIndex % capacity;
  40. if (writeHead + structSize <= capacity)
  41. {
  42. Unsafe.Write(ptr + writeHead, item);
  43. }
  44. else
  45. //copy with wrap, will start to copy and wrap for the reminder
  46. {
  47. var byteCountToEnd = capacity - writeHead;
  48. var localCopyToAvoidGcIssues = item;
  49. //read and copy the first portion of Item until the end of the stream
  50. Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), (uint)byteCountToEnd);
  51. var restCount = structSize - byteCountToEnd;
  52. //read and copy the remainder
  53. Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
  54. , (uint)restCount);
  55. }
  56. //this is may seems a waste if you are going to use an unsafeBlob just for bytes, but it's necessary for mixed types.
  57. //it's still possible to use WriteUnaligned though
  58. int paddedStructSize = (int) MemoryUtilities.Align4(structSize);
  59. _writeIndex += paddedStructSize;
  60. }
  61. }
  62. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  63. internal void Write<T>(in T item, uint writeIndex) where T : struct
  64. {
  65. unsafe
  66. {
  67. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  68. //the idea is, considering the wrap, a read pointer must always be behind a writer pointer
  69. var writeHead = writeIndex % capacity;
  70. if (writeHead + structSize <= capacity)
  71. {
  72. Unsafe.Write(ptr + writeHead, item);
  73. }
  74. else //copy with wrap, will start to copy and wrap for the reminder
  75. {
  76. var byteCountToEnd = capacity - writeHead;
  77. var localCopyToAvoidGcIssues = item;
  78. //read and copy the first portion of Item until the end of the stream
  79. Unsafe.CopyBlock(ptr + writeHead, Unsafe.AsPointer(ref localCopyToAvoidGcIssues), byteCountToEnd);
  80. var restCount = structSize - byteCountToEnd;
  81. //read and copy the remainder
  82. Unsafe.CopyBlock(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGcIssues) + byteCountToEnd
  83. , restCount);
  84. }
  85. }
  86. }
  87. // [MethodImpl(MethodImplOptions.AggressiveInlining)]
  88. // //ToDo: remove this and create an UnsafeBlobUnaligned, used on NativeRingBuffer where T cannot change
  89. // internal void WriteUnaligned<T>(in T item) where T : struct
  90. // {
  91. // unsafe
  92. // {
  93. // var structSize = (uint) MemoryUtilities.SizeOf<T>();
  94. //
  95. // //the idea is, considering the wrap, a read pointer must always be behind a writer pointer
  96. // #if DEBUG && !PROFILE_SVELTO
  97. // if (space - (int) structSize < 0)
  98. // throw new Exception("no writing authorized");
  99. // #endif
  100. // var pointer = _writeIndex % capacity;
  101. //
  102. // if (pointer + structSize <= capacity)
  103. // {
  104. // Unsafe.Write(ptr + pointer, item);
  105. // }
  106. // else
  107. // {
  108. // var byteCount = capacity - pointer;
  109. //
  110. // var localCopyToAvoidGCIssues = item;
  111. //
  112. // Unsafe.CopyBlockUnaligned(ptr + pointer, Unsafe.AsPointer(ref localCopyToAvoidGCIssues), byteCount);
  113. //
  114. // var restCount = structSize - byteCount;
  115. // Unsafe.CopyBlockUnaligned(ptr, (byte*) Unsafe.AsPointer(ref localCopyToAvoidGCIssues) + byteCount
  116. // , restCount);
  117. // }
  118. //
  119. // _writeIndex += structSize;
  120. // }
  121. // }
  122. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  123. internal T Read<T>() where T : struct
  124. {
  125. unsafe
  126. {
  127. var structSize = (uint) MemoryUtilities.SizeOf<T>();
  128. #if DEBUG && !PROFILE_SVELTO
  129. if (size < structSize) //are there enough bytes to read?
  130. throw new Exception("dequeuing empty queue or unexpected type dequeued");
  131. if (_readIndex > _writeIndex)
  132. throw new Exception("unexpected read");
  133. #endif
  134. var head = _readIndex % capacity;
  135. var paddedStructSize = MemoryUtilities.Align4(structSize);
  136. _readIndex += paddedStructSize;
  137. if (_readIndex == _writeIndex)
  138. {
  139. //resetting the Indices has the benefit to let the Reserve work in more occasions and
  140. //the rapping happening less often. If the _readIndex reached the _writeIndex, it means
  141. //that there is no data left to read, so we can start to write again from the begin of the memory
  142. _writeIndex = 0;
  143. _readIndex = 0;
  144. }
  145. if (head + paddedStructSize <= capacity)
  146. return Unsafe.Read<T>(ptr + head);
  147. T item = default;
  148. var byteCountToEnd = capacity - head;
  149. Unsafe.CopyBlock(Unsafe.AsPointer(ref item), ptr + head, byteCountToEnd);
  150. var restCount = structSize - byteCountToEnd;
  151. Unsafe.CopyBlock((byte*) Unsafe.AsPointer(ref item) + byteCountToEnd, ptr, restCount);
  152. return item;
  153. }
  154. }
  155. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  156. internal ref T Reserve<T>(out UnsafeArrayIndex index) where T : struct
  157. {
  158. unsafe
  159. {
  160. var sizeOf = (uint) MemoryUtilities.SizeOf<T>();
  161. ref var buffer = ref Unsafe.AsRef<T>(ptr + _writeIndex);
  162. #if DEBUG && !PROFILE_SVELTO
  163. if (_writeIndex > capacity)
  164. throw new Exception(
  165. $"can't reserve if the writeIndex wrapped around the capacity, writeIndex {_writeIndex} capacity {capacity}");
  166. if (_writeIndex + sizeOf > capacity)
  167. throw new Exception("out of bound reserving");
  168. #endif
  169. index = new UnsafeArrayIndex
  170. {
  171. capacity = capacity
  172. , index = (uint)_writeIndex
  173. };
  174. int align4 = (int) MemoryUtilities.Align4(sizeOf);
  175. _writeIndex += align4;
  176. return ref buffer;
  177. }
  178. }
  179. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  180. internal ref T AccessReserved<T>(UnsafeArrayIndex index) where T : struct
  181. {
  182. unsafe
  183. {
  184. #if DEBUG && !PROFILE_SVELTO
  185. var size = MemoryUtilities.SizeOf<T>();
  186. if (index.index + size > capacity)
  187. throw new Exception($"out of bound access, index {index.index} size {size} capacity {capacity}");
  188. #endif
  189. return ref Unsafe.AsRef<T>(ptr + index.index);
  190. }
  191. }
  192. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  193. internal void Realloc(uint newCapacity)
  194. {
  195. unsafe
  196. {
  197. //be sure it's multiple of 4. Assuming that what we write is aligned to 4, then we will always have aligned wrapped heads
  198. newCapacity = MemoryUtilities.Align4(newCapacity);
  199. byte* newPointer = null;
  200. #if DEBUG && !PROFILE_SVELTO
  201. if (newCapacity <= capacity)
  202. throw new Exception("new capacity must be bigger than current");
  203. #endif
  204. if (newCapacity > 0)
  205. {
  206. newPointer = (byte*) MemoryUtilities.Alloc(newCapacity, allocator);
  207. if (size > 0)
  208. {
  209. var readerHead = _readIndex % capacity;
  210. var writerHead = _writeIndex % capacity;
  211. if (readerHead < writerHead)
  212. {
  213. //copy to the new pointer, from th reader position
  214. var currentSize = _writeIndex - _readIndex;
  215. Unsafe.CopyBlock(newPointer, ptr + readerHead, (uint)currentSize);
  216. }
  217. //the assumption is that if size > 0 (so readerPointer and writerPointer are not the same)
  218. //writerHead wrapped and reached readerHead. so I have to copy from readerHead to the end
  219. //and from the start to writerHead (which is the same position of readerHead)
  220. else
  221. {
  222. var byteCountToEnd = capacity - readerHead;
  223. Unsafe.CopyBlock(newPointer, ptr + readerHead, byteCountToEnd);
  224. Unsafe.CopyBlock(newPointer + byteCountToEnd, ptr, (uint)writerHead);
  225. }
  226. }
  227. }
  228. if (ptr != null)
  229. MemoryUtilities.Free((IntPtr) ptr, allocator);
  230. ptr = newPointer;
  231. capacity = newCapacity;
  232. _readIndex = 0;
  233. _writeIndex = (int)size;
  234. }
  235. }
  236. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  237. public void Dispose()
  238. {
  239. unsafe
  240. {
  241. if (ptr != null)
  242. MemoryUtilities.Free((IntPtr) ptr, allocator);
  243. ptr = null;
  244. _writeIndex = 0;
  245. capacity = 0;
  246. }
  247. }
  248. [MethodImpl(MethodImplOptions.AggressiveInlining)]
  249. public void Clear()
  250. {
  251. _writeIndex = 0;
  252. _readIndex = 0;
  253. }
  254. internal int _writeIndex;
  255. internal uint _readIndex;
  256. }
  257. }