HeapBlockMap.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. #include "CommonDefines.h"
  7. namespace Memory
  8. {
  9. class HeapBlockMap32
  10. {
  11. public:
  12. // Segment mapping
  13. static const uint L1Count = 4096;
  14. static const uint L2Count = 256;
  15. static const uint PageSize = AutoSystemInfo::PageSize; // 4096
  16. // Mark bit definitions
  17. static const uint PageMarkBitCount = PageSize / HeapConstants::ObjectGranularity;
  18. static const uint L2ChunkMarkBitCount = L2Count * PageMarkBitCount;
  19. #if defined(TARGET_64)
  20. static const size_t TotalSize = 0x100000000; // 4GB
  21. #endif
  22. typedef BVStatic<PageMarkBitCount> PageMarkBitVector;
  23. typedef BVStatic<L2ChunkMarkBitCount> L2ChunkMarkBitVector;
  24. // Max size of GetWriteWatch batching.
  25. // This must be >= PageSegment page count for small heap block segments,
  26. // so set it to the MaxPageCount for PageSegments.
  27. static const uint MaxGetWriteWatchPages = PageSegment::MaxPageCount;
  28. #if defined(TARGET_64)
  29. HeapBlockMap32(__in char * startAddress);
  30. #else
  31. HeapBlockMap32();
  32. #endif
  33. ~HeapBlockMap32();
  34. bool EnsureHeapBlock(void * address, uint pageCount);
  35. void SetHeapBlockNoCheck(void * address, uint pageCount, HeapBlock * heapBlock, HeapBlock::HeapBlockType blockType, byte bucketIndex);
  36. bool SetHeapBlock(void * address, uint pageCount, HeapBlock * heapBlock, HeapBlock::HeapBlockType blockType, byte bucketIndex);
  37. void ClearHeapBlock(void * address, uint pageCount);
  38. HeapBlock * GetHeapBlock(void * address);
  39. bool Empty() const { return count == 0; }
  40. // Heap block map marking
  41. PageMarkBitVector * GetPageMarkBitVector(void * address);
  42. template <size_t BitCount>
  43. BVStatic<BitCount>* GetMarkBitVectorForPages(void * address);
  44. uint GetMarkCount(void* address, uint pageCount);
  45. template <bool interlocked, bool doSpecialMark>
  46. void Mark(void * candidate, MarkContext * markContext);
  47. template <bool interlocked, bool doSpecialMark>
  48. void MarkInterior(void * candidate, MarkContext * markContext);
  49. bool IsMarked(void * address) const;
  50. void SetMark(void * address);
  51. bool TestAndSetMark(void * address);
  52. void ResetMarks();
  53. #if ENABLE_CONCURRENT_GC || ENABLE_PARTIAL_GC
  54. void ResetDirtyPages(Recycler * recycler);
  55. uint Rescan(Recycler * recycler, bool resetWriteWatch);
  56. #endif
  57. void MakeAllPagesReadOnly(Recycler* recycler);
  58. void MakeAllPagesReadWrite(Recycler* recycler);
  59. void Cleanup(bool concurrentFindImplicitRoot);
  60. bool OOMRescan(Recycler * recycler);
  61. #ifdef RECYCLER_STRESS
  62. void InduceFalsePositives(Recycler * recycler);
  63. #endif
  64. #ifdef RECYCLER_VERIFY_MARK
  65. bool IsAddressInNewChunk(void * address);
  66. #endif
  67. private:
  68. friend class PageSegmentBase<VirtualAllocWrapper>;
  69. template <class Fn>
  70. void ForEachSegment(Recycler * recycler, Fn func);
  71. void ChangeProtectionLevel(Recycler* recycler, DWORD protectFlags, DWORD expectedOldFlags);
  72. static uint GetLevel1Id(void * address)
  73. {
  74. return ::Math::PointerCastToIntegralTruncate<uint>(address) / L2Count / PageSize;
  75. }
  76. public:
  77. static const uint L2CountPageSizePow2Modulo = (L2Count * PageSize) - 1;
  78. static uint GetLevel2Id(void * address)
  79. {
  80. Assert((::Math::PointerCastToIntegralTruncate<uint>(address) & L2CountPageSizePow2Modulo) / PageSize
  81. == (::Math::PointerCastToIntegralTruncate<uint>(address) % (L2Count * PageSize)) / PageSize); // see if L2Count * PageSize is no longer Pow2
  82. return (::Math::PointerCastToIntegralTruncate<uint>(address) & L2CountPageSizePow2Modulo) / PageSize;
  83. }
  84. private:
  85. #if ENABLE_CONCURRENT_GC
  86. #ifdef RECYCLER_WRITE_WATCH
  87. static UINT GetWriteWatchHelper(Recycler * recycler, DWORD writeWatchFlags, void* baseAddress, size_t regionSize,
  88. void** addresses, ULONG_PTR* count, LPDWORD granularity);
  89. static UINT GetWriteWatchHelperOnOOM(DWORD writeWatchFlags, _In_ void* baseAddress, size_t regionSize,
  90. _Out_writes_(*count) void** addresses, _Inout_ ULONG_PTR* count, LPDWORD granularity);
  91. #endif
  92. #endif
  93. static void * GetAddressFromIds(uint id1, uint id2)
  94. {
  95. Assert(id1 < L1Count);
  96. Assert(id2 < L2Count);
  97. return (void *)(((id1 * L2Count) + id2) * PageSize);
  98. }
  99. struct HeapBlockInfo
  100. {
  101. HeapBlock::HeapBlockType blockType;
  102. byte bucketIndex;
  103. };
  104. // We want HeapBlockInfo to be as small as possible to get the best cache locality for this info.
  105. CompileAssert(sizeof(HeapBlockInfo) == sizeof(ushort));
  106. class L2MapChunk
  107. {
  108. public:
  109. L2MapChunk();
  110. ~L2MapChunk();
  111. HeapBlock * Get(void * address);
  112. void Set(uint id2, uint pageCount, HeapBlock * heapBlock, HeapBlock::HeapBlockType blockType, byte bucketIndex);
  113. void Clear(uint id2, uint pageCount);
  114. bool IsEmpty() const;
  115. PageMarkBitVector * GetPageMarkBitVector(void * address);
  116. PageMarkBitVector * GetPageMarkBitVector(uint pageIndex);
  117. template <size_t BitCount> BVStatic<BitCount>* GetMarkBitVectorForPages(void * address);
  118. template <size_t BitCount> BVStatic<BitCount>* GetMarkBitVectorForPages(uint pageIndex);
  119. bool IsMarked(void * address) const;
  120. void SetMark(void * address);
  121. static uint GetMarkBitIndex(void * address)
  122. {
  123. uint bitIndex = (::Math::PointerCastToIntegralTruncate<uint>(address) % (L2Count * PageSize)) / HeapConstants::ObjectGranularity;
  124. Assert(bitIndex < L2ChunkMarkBitCount);
  125. return bitIndex;
  126. }
  127. // Mark bits for objects that live in this particular chunk
  128. // Each L2 chunk has 1 bit for each object that lives in this chunk
  129. // The L2 chunk represents 256 * 4096 bytes = 1 MB of address space
  130. // This means, that on 32 bit systems, where each object is at least 16 bytes, we can have 64K objects
  131. // On 64 bit systems, where each object is at least 32 bytes, we can have 32K objects
  132. // Therefore, that on 32 bit systems, the mark bits take up 8KB, and on 64 bit systems, they take up 4KB
  133. // This is more general purpose than if the mark bits are on the heap block, and is more runtime efficient
  134. // However, it's less memory efficient (e.g. a large object which is 1 MB + 1 byte, rounded up to 1028 KB,
  135. // would before take up 1 byte for it's mark bits- now it'll have a cost of 16KB, one for each of the L2 segments it spans)
  136. L2ChunkMarkBitVector markBits;
  137. // HeapBlockInfo for each page in our range
  138. HeapBlockInfo blockInfo[L2Count];
  139. // HeapBlock * for each page in our range (or nullptr, if no block)
  140. HeapBlock* map[L2Count];
  141. #ifdef RECYCLER_VERIFY_MARK
  142. bool isNewChunk;
  143. #endif
  144. #if DBG
  145. ushort pageMarkCount[L2Count];
  146. #endif
  147. };
  148. template <bool interlocked>
  149. bool MarkInternal(L2MapChunk * chunk, void * candidate);
  150. void OnSpecialMark(L2MapChunk * chunk, void * candidate);
  151. template <bool interlocked, bool updateChunk>
  152. bool MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& chunk, void * originalCandidate, void * realCandidate);
  153. template <typename TBlockType>
  154. TBlockType* GetHeapBlockForRescan(L2MapChunk* chunk, uint id2) const;
  155. template <class TBlockType>
  156. bool RescanHeapBlock(void * dirtyPage, HeapBlock::HeapBlockType blockType, L2MapChunk* chunk, uint id2, bool* anyObjectsMarkedOnPage, Recycler * recycler);
  157. template <class TBlockType>
  158. bool RescanHeapBlockOnOOM(TBlockType* heapBlock, char* pageAddress, HeapBlock::HeapBlockType blockType, uint bucketIndex, L2MapChunk * chunk, Recycler * recycler);
  159. bool RescanPage(void * dirtyPage, bool* anyObjectsMarkedOnPage, Recycler * recycler);
  160. uint count;
  161. L2MapChunk * map[L1Count];
  162. bool anyHeapBlockRescannedDuringOOM;
  163. #if defined(TARGET_64)
  164. // On 64 bit, this structure only maps one particular 32 bit space.
  165. // Store the startAddress of that 32 bit space so we know which it is.
  166. // This value should always be 4GB aligned.
  167. char * startAddress;
  168. #endif
  169. public:
  170. #if DBG
  171. ushort GetPageMarkCount(void * address) const;
  172. void SetPageMarkCount(void * address, ushort markCount);
  173. template <uint BitVectorCount>
  174. void VerifyMarkCountForPages(void * address, uint pageCount);
  175. #endif
  176. private:
  177. template <class Fn>
  178. void ForEachChunkInAddressRange(void * address, size_t pageCount, Fn fn);
  179. };
  180. #if defined(TARGET_64)
  181. class HeapBlockMap64
  182. {
  183. public:
  184. HeapBlockMap64();
  185. ~HeapBlockMap64();
  186. bool EnsureHeapBlock(void * address, size_t pageCount);
  187. void SetHeapBlockNoCheck(void * address, size_t pageCount, HeapBlock * heapBlock, HeapBlock::HeapBlockType blockType, byte bucketIndex);
  188. bool SetHeapBlock(void * address, size_t pageCount, HeapBlock * heapBlock, HeapBlock::HeapBlockType blockType, byte bucketIndex);
  189. void ClearHeapBlock(void * address, size_t pageCount);
  190. HeapBlock * GetHeapBlock(void * address);
  191. HeapBlockMap32::PageMarkBitVector * GetPageMarkBitVector(void * address);
  192. template <size_t BitCount>
  193. BVStatic<BitCount>* GetMarkBitVectorForPages(void * address);
  194. uint GetMarkCount(void* address, uint pageCount);
  195. template <bool interlocked, bool doSpecialMark>
  196. void Mark(void * candidate, MarkContext * markContext);
  197. template <bool interlocked, bool doSpecialMark>
  198. void MarkInterior(void * candidate, MarkContext * markContext);
  199. bool IsMarked(void * address) const;
  200. void SetMark(void * address);
  201. bool TestAndSetMark(void * address);
  202. void ResetMarks();
  203. #if ENABLE_CONCURRENT_GC || ENABLE_PARTIAL_GC
  204. void ResetDirtyPages(Recycler * recycler);
  205. uint Rescan(Recycler * recycler, bool resetWriteWatch);
  206. #endif
  207. void MakeAllPagesReadOnly(Recycler* recycler);
  208. void MakeAllPagesReadWrite(Recycler* recycler);
  209. bool OOMRescan(Recycler * recycler);
  210. void Cleanup(bool concurrentFindImplicitRoot);
  211. #ifdef RECYCLER_STRESS
  212. void InduceFalsePositives(Recycler * recycler);
  213. #endif
  214. #ifdef RECYCLER_VERIFY_MARK
  215. bool IsAddressInNewChunk(void * address);
  216. #endif
  217. private:
  218. friend class HeapBlockMap32;
  219. struct Node
  220. {
  221. Node(__in char * startAddress) : map(startAddress) { }
  222. uint nodeIndex;
  223. Node * next;
  224. HeapBlockMap32 map;
  225. };
  226. static const uint PagesPer4GB = 1 << 20; // = 1M, assume page size = 4K
  227. static uint GetNodeIndex(void * address)
  228. {
  229. return GetNodeIndex((ULONG64)address);
  230. }
  231. static uint GetNodeIndex(ULONG64 address)
  232. {
  233. return (uint)((ULONG64)address >> 32);
  234. }
  235. Node * FindOrInsertNode(void * address);
  236. Node * FindNode(void * address) const;
  237. template <class Fn>
  238. void ForEachNodeInAddressRange(void * address, size_t pageCount, Fn fn);
  239. Node * list;
  240. public:
  241. #if DBG
  242. ushort GetPageMarkCount(void * address) const;
  243. void SetPageMarkCount(void * address, ushort markCount);
  244. template <uint BitVectorCount>
  245. void VerifyMarkCountForPages(void * address, uint pageCount);
  246. #endif
  247. static char * GetNodeStartAddress(void * address)
  248. {
  249. return (char *)(((size_t)address) & ~(HeapBlockMap32::TotalSize - 1));
  250. }
  251. };
  252. typedef HeapBlockMap64 HeapBlockMap;
  253. #else
  254. typedef HeapBlockMap32 HeapBlockMap;
  255. #endif
  256. }