HeapBlockMap.inl 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. template <bool interlocked>
  7. inline
  8. bool
  9. HeapBlockMap32::MarkInternal(L2MapChunk * chunk, void * candidate)
  10. {
  11. uint bitIndex = chunk->GetMarkBitIndex(candidate);
  12. if (interlocked)
  13. {
  14. // Use an interlocked BTS instruction to ensure atomicity.
  15. // Since this is expensive, do a non-interlocked test first.
  16. // Mark bits never go from set to clear during marking, so if we find the bit is already set, we're done.
  17. if (chunk->markBits.TestIntrinsic(bitIndex))
  18. {
  19. // Already marked; no further processing needed
  20. return true;
  21. }
  22. if (chunk->markBits.TestAndSetInterlocked(bitIndex))
  23. {
  24. // Already marked; no further processing needed
  25. return true;
  26. }
  27. }
  28. else
  29. {
  30. if (chunk->markBits.TestAndSet(bitIndex))
  31. {
  32. // Already marked; no further processing needed
  33. return true;
  34. }
  35. }
  36. #if DBG
  37. InterlockedIncrement16((short *)&chunk->pageMarkCount[GetLevel2Id(candidate)]);
  38. #endif
  39. return false;
  40. }
  41. //
  42. // Mark a particular object
  43. // If the object is already marked, or if it's invalid, return true
  44. // (indicating there's no further processing to be done for this object)
  45. // If the object is newly marked, then the out param heapBlock is written to, and false is returned
  46. //
  47. template <bool interlocked, bool doSpecialMark>
  48. inline
  49. void
  50. HeapBlockMap32::Mark(void * candidate, MarkContext * markContext)
  51. {
  52. uint id1 = GetLevel1Id(candidate);
  53. L2MapChunk * chunk = map[id1];
  54. if (chunk == nullptr)
  55. {
  56. // False reference; no further processing needed.
  57. return;
  58. }
  59. if (MarkInternal<interlocked>(chunk, candidate))
  60. {
  61. if (doSpecialMark)
  62. {
  63. this->OnSpecialMark(chunk, candidate);
  64. }
  65. return;
  66. }
  67. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  68. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  69. {
  70. Recycler::WBVerifyBitIsSet((char*)markContext->parentRef, (char*)candidate);
  71. }
  72. #endif
  73. uint id2 = GetLevel2Id(candidate);
  74. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  75. Assert(blockType == HeapBlock::HeapBlockType::FreeBlockType || chunk->map[id2]->GetHeapBlockType() == blockType);
  76. // Switch on the HeapBlockType to determine how to process the newly marked object.
  77. switch (blockType)
  78. {
  79. case HeapBlock::HeapBlockType::FreeBlockType:
  80. // False reference. Do nothing.
  81. break;
  82. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  83. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  84. // Leaf blocks don't need to be scanned. Do nothing.
  85. break;
  86. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  87. #ifdef RECYCLER_WRITE_BARRIER
  88. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  89. #endif
  90. {
  91. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  92. // See if it's an invalid offset using the invalid bit vector and if so, do nothing.
  93. if (!HeapInfo::GetInvalidBitVectorForBucket<SmallAllocationBlockAttributes>(bucketIndex)->Test(SmallHeapBlock::GetAddressBitIndex(candidate)))
  94. {
  95. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<SmallAllocationBlockAttributes>(bucketIndex);
  96. if (!markContext->AddMarkedObject(candidate, objectSize))
  97. {
  98. // Failed to mark due to OOM.
  99. ((SmallHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  100. }
  101. }
  102. }
  103. break;
  104. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  105. #ifdef RECYCLER_WRITE_BARRIER
  106. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  107. #endif
  108. {
  109. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  110. // See if it's an invalid offset using the invalid bit vector and if so, do nothing.
  111. if (!HeapInfo::GetInvalidBitVectorForBucket<MediumAllocationBlockAttributes>(bucketIndex)->Test(MediumHeapBlock::GetAddressBitIndex(candidate)))
  112. {
  113. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<MediumAllocationBlockAttributes>(bucketIndex);
  114. if (!markContext->AddMarkedObject(candidate, objectSize))
  115. {
  116. // Failed to mark due to OOM.
  117. ((MediumHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  118. }
  119. }
  120. }
  121. break;
  122. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  123. #ifdef RECYCLER_WRITE_BARRIER
  124. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  125. #endif
  126. ((SmallFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(candidate, markContext);
  127. break;
  128. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  129. #ifdef RECYCLER_WRITE_BARRIER
  130. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  131. #endif
  132. ((MediumFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(candidate, markContext);
  133. break;
  134. case HeapBlock::HeapBlockType::LargeBlockType:
  135. ((LargeHeapBlock*)chunk->map[id2])->Mark<doSpecialMark>(candidate, markContext);
  136. break;
  137. case HeapBlock::HeapBlockType::BlockTypeCount:
  138. AssertMsg(false, "code should be unreachable");
  139. break;
  140. #if DBG
  141. default:
  142. AssertMsg(false, "what's the new heap block type?");
  143. #endif
  144. }
  145. }
  146. inline
  147. void
  148. HeapBlockMap32::OnSpecialMark(L2MapChunk * chunk, void * candidate)
  149. {
  150. uint id2 = GetLevel2Id(candidate);
  151. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  152. Assert(blockType == HeapBlock::HeapBlockType::FreeBlockType || chunk->map[id2]->GetHeapBlockType() == blockType);
  153. unsigned char attributes = ObjectInfoBits::NoBit;
  154. bool success = false;
  155. // If the block is finalizable, we may still have to take special mark action.
  156. switch (blockType)
  157. {
  158. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  159. #ifdef RECYCLER_WRITE_BARRIER
  160. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  161. #endif
  162. {
  163. SmallFinalizableHeapBlock *smallBlock = (SmallFinalizableHeapBlock*)chunk->map[id2];
  164. success = smallBlock->TryGetAttributes(candidate, &attributes);
  165. break;
  166. }
  167. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  168. #ifdef RECYCLER_WRITE_BARRIER
  169. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  170. #endif
  171. {
  172. MediumFinalizableHeapBlock *mediumBlock = (MediumFinalizableHeapBlock*)chunk->map[id2];
  173. success = mediumBlock->TryGetAttributes(candidate, &attributes);
  174. break;
  175. }
  176. case HeapBlock::HeapBlockType::LargeBlockType:
  177. {
  178. LargeHeapBlock *largeBlock = (LargeHeapBlock*)chunk->map[id2];
  179. success = largeBlock->TryGetAttributes(candidate, &attributes);
  180. break;
  181. }
  182. default:
  183. break;
  184. }
  185. if (success && (attributes & FinalizeBit))
  186. {
  187. FinalizableObject *trackedObject = (FinalizableObject*)candidate;
  188. trackedObject->OnMark();
  189. }
  190. }
  191. template <bool interlocked, bool largeBlockType>
  192. inline
  193. bool
  194. HeapBlockMap32::MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& chunk, void * originalCandidate, void * realCandidate)
  195. {
  196. if (originalCandidate == realCandidate)
  197. {
  198. // The initial mark performed was correct (we had a base pointer)
  199. return false;
  200. }
  201. if (realCandidate == nullptr)
  202. {
  203. // We had an invalid interior pointer, so we bail out
  204. return true;
  205. }
  206. if (largeBlockType)
  207. {
  208. #if defined(_M_IX86_OR_ARM32)
  209. // we only check the first MaxLargeObjectMarkOffset byte for marking purpuse.
  210. if ( (size_t)originalCandidate - (size_t)realCandidate > HeapConstants::MaxLargeObjectMarkOffset )
  211. return true;
  212. #endif
  213. #if defined(_M_X64_OR_ARM64)
  214. if (HeapBlockMap64::GetNodeIndex(originalCandidate) != HeapBlockMap64::GetNodeIndex(realCandidate))
  215. {
  216. // We crossed a node boundary (very rare) so we should just re-start from the real candidate.
  217. // In this case we are no longer marking an interior reference.
  218. markContext->GetRecycler()->heapBlockMap.Mark<interlocked, false>(realCandidate, markContext);
  219. // This mark code therefore has nothing to do (it has already happened).
  220. return true;
  221. }
  222. #endif
  223. // Update the chunk as the interior pointer may cross an L2 boundary (e.g., a large object)
  224. chunk = map[GetLevel1Id(realCandidate)];
  225. }
  226. // Perform the actual mark for the interior pointer
  227. return MarkInternal<interlocked>(chunk, realCandidate);
  228. }
  229. template <bool interlocked>
  230. inline
  231. void
  232. HeapBlockMap32::MarkInterior(void * candidate, MarkContext * markContext)
  233. {
  234. // Align the candidate to object granularity
  235. candidate = reinterpret_cast<void*>(reinterpret_cast<size_t>(candidate) & ~HeapInfo::ObjectAlignmentMask);
  236. uint id1 = GetLevel1Id(candidate);
  237. L2MapChunk * chunk = map[id1];
  238. if (chunk == nullptr)
  239. {
  240. // False reference; no further processing needed.
  241. return;
  242. }
  243. if (MarkInternal<interlocked>(chunk, candidate))
  244. {
  245. // Already marked (mark internal-then-actual first)
  246. return;
  247. }
  248. uint id2 = GetLevel2Id(candidate);
  249. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  250. // Switch on the HeapBlockType to determine how to map interior->base and process object.
  251. switch (blockType)
  252. {
  253. case HeapBlock::HeapBlockType::FreeBlockType:
  254. // False reference. Do nothing.
  255. break;
  256. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  257. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  258. // Leaf blocks don't need to be scanned. Do nothing.
  259. break;
  260. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  261. #ifdef RECYCLER_WRITE_BARRIER
  262. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  263. #endif
  264. {
  265. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  266. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<SmallAllocationBlockAttributes>(bucketIndex);
  267. void * realCandidate = SmallHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  268. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  269. {
  270. break;
  271. }
  272. if (!markContext->AddMarkedObject(realCandidate, objectSize))
  273. {
  274. // Failed to mark due to OOM.
  275. ((SmallHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  276. }
  277. }
  278. break;
  279. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  280. #ifdef RECYCLER_WRITE_BARRIER
  281. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  282. #endif
  283. {
  284. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  285. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<MediumAllocationBlockAttributes>(bucketIndex);
  286. void * realCandidate = MediumHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  287. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  288. {
  289. break;
  290. }
  291. if (!markContext->AddMarkedObject(realCandidate, objectSize))
  292. {
  293. // Failed to mark due to OOM.
  294. ((MediumHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  295. }
  296. }
  297. break;
  298. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  299. #ifdef RECYCLER_WRITE_BARRIER
  300. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  301. #endif
  302. {
  303. void * realCandidate = ((SmallFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  304. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  305. {
  306. break;
  307. }
  308. ((SmallFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<false>(realCandidate, markContext);
  309. }
  310. break;
  311. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  312. #ifdef RECYCLER_WRITE_BARRIER
  313. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  314. #endif
  315. {
  316. void * realCandidate = ((MediumFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  317. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  318. {
  319. break;
  320. }
  321. ((MediumFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<false>(realCandidate, markContext);
  322. }
  323. break;
  324. case HeapBlock::HeapBlockType::LargeBlockType:
  325. {
  326. void * realCandidate = ((LargeHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  327. if (MarkInteriorInternal<interlocked, true>(markContext, chunk, candidate, realCandidate))
  328. {
  329. break;
  330. }
  331. ((LargeHeapBlock*)chunk->map[GetLevel2Id(realCandidate)])->Mark<false>(realCandidate, markContext);
  332. }
  333. break;
  334. case HeapBlock::HeapBlockType::BlockTypeCount:
  335. AssertMsg(false, "code should be unreachable");
  336. break;
  337. #if DBG
  338. default:
  339. AssertMsg(false, "what's the new heap block type?");
  340. #endif
  341. }
  342. }
  343. #if defined(_M_X64_OR_ARM64)
  344. //
  345. // 64-bit Mark
  346. // See HeapBlockMap32::Mark for explanation of return values
  347. //
  348. template <bool interlocked, bool doSpecialMark>
  349. inline
  350. void
  351. HeapBlockMap64::Mark(void * candidate, MarkContext * markContext)
  352. {
  353. uint index = GetNodeIndex(candidate);
  354. Node * node = list;
  355. while (node != nullptr)
  356. {
  357. if (node->nodeIndex == index)
  358. {
  359. // Found the correct Node.
  360. // Process the mark and return.
  361. node->map.Mark<interlocked, doSpecialMark>(candidate, markContext);
  362. return;
  363. }
  364. node = node->next;
  365. }
  366. // No Node found; must be an invalid reference. Do nothing.
  367. }
  368. template <bool interlocked>
  369. inline
  370. void
  371. HeapBlockMap64::MarkInterior(void * candidate, MarkContext * markContext)
  372. {
  373. uint index = GetNodeIndex(candidate);
  374. Node * node = list;
  375. while (node != nullptr)
  376. {
  377. if (node->nodeIndex == index)
  378. {
  379. // Found the correct Node.
  380. // Process the mark and return.
  381. node->map.MarkInterior<interlocked>(candidate, markContext);
  382. return;
  383. }
  384. node = node->next;
  385. }
  386. // No Node found; must be an invalid reference. Do nothing.
  387. }
  388. #endif // defined(_M_X64_OR_ARM64)