HeapBlockMap.inl 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft Corporation and contributors. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. template <bool interlocked>
  7. inline
  8. bool
  9. HeapBlockMap32::MarkInternal(L2MapChunk * chunk, void * candidate)
  10. {
  11. uint bitIndex = chunk->GetMarkBitIndex(candidate);
  12. if (interlocked)
  13. {
  14. // Use an interlocked BTS instruction to ensure atomicity.
  15. // Since this is expensive, do a non-interlocked test first.
  16. // Mark bits never go from set to clear during marking, so if we find the bit is already set, we're done.
  17. if (chunk->markBits.TestIntrinsic(bitIndex))
  18. {
  19. // Already marked; no further processing needed
  20. return true;
  21. }
  22. if (chunk->markBits.TestAndSetInterlocked(bitIndex))
  23. {
  24. // Already marked; no further processing needed
  25. return true;
  26. }
  27. }
  28. else
  29. {
  30. if (chunk->markBits.TestAndSet(bitIndex))
  31. {
  32. // Already marked; no further processing needed
  33. return true;
  34. }
  35. }
  36. #if DBG
  37. InterlockedIncrement16((short *)&chunk->pageMarkCount[GetLevel2Id(candidate)]);
  38. #endif
  39. return false;
  40. }
  41. //
  42. // Mark a particular object
  43. // If the object is already marked, or if it's invalid, return true
  44. // (indicating there's no further processing to be done for this object)
  45. // If the object is newly marked, then the out param heapBlock is written to, and false is returned
  46. //
  47. template <bool interlocked, bool doSpecialMark>
  48. inline
  49. void
  50. HeapBlockMap32::Mark(void * candidate, MarkContext * markContext)
  51. {
  52. uint id1 = GetLevel1Id(candidate);
  53. L2MapChunk * chunk = map[id1];
  54. if (chunk == nullptr)
  55. {
  56. // False reference; no further processing needed.
  57. return;
  58. }
  59. if (MarkInternal<interlocked>(chunk, candidate))
  60. {
  61. if (doSpecialMark)
  62. {
  63. this->OnSpecialMark(chunk, candidate);
  64. }
  65. return;
  66. }
  67. #if DBG && GLOBAL_ENABLE_WRITE_BARRIER
  68. if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit))
  69. {
  70. Recycler::WBVerifyBitIsSet((char*)markContext->parentRef, (char*)candidate);
  71. }
  72. #endif
  73. uint id2 = GetLevel2Id(candidate);
  74. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  75. Assert(blockType == HeapBlock::HeapBlockType::FreeBlockType || chunk->map[id2]->GetHeapBlockType() == blockType);
  76. // Switch on the HeapBlockType to determine how to process the newly marked object.
  77. switch (blockType)
  78. {
  79. case HeapBlock::HeapBlockType::FreeBlockType:
  80. // False reference. Do nothing.
  81. break;
  82. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  83. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  84. // Leaf blocks don't need to be scanned. Do nothing.
  85. break;
  86. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  87. #ifdef RECYCLER_WRITE_BARRIER
  88. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  89. #endif
  90. {
  91. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  92. // See if it's an invalid offset using the invalid bit vector and if so, do nothing.
  93. if (!HeapInfo::GetInvalidBitVectorForBucket<SmallAllocationBlockAttributes>(bucketIndex)->Test(SmallHeapBlock::GetAddressBitIndex(candidate)))
  94. {
  95. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<SmallAllocationBlockAttributes>(bucketIndex);
  96. if (!markContext->AddMarkedObject(candidate, objectSize))
  97. {
  98. // Failed to mark due to OOM.
  99. ((SmallHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  100. }
  101. }
  102. }
  103. break;
  104. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  105. #ifdef RECYCLER_WRITE_BARRIER
  106. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  107. #endif
  108. {
  109. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  110. // See if it's an invalid offset using the invalid bit vector and if so, do nothing.
  111. if (!HeapInfo::GetInvalidBitVectorForBucket<MediumAllocationBlockAttributes>(bucketIndex)->Test(MediumHeapBlock::GetAddressBitIndex(candidate)))
  112. {
  113. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<MediumAllocationBlockAttributes>(bucketIndex);
  114. if (!markContext->AddMarkedObject(candidate, objectSize))
  115. {
  116. // Failed to mark due to OOM.
  117. ((MediumHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  118. }
  119. }
  120. }
  121. break;
  122. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  123. #ifdef RECYCLER_WRITE_BARRIER
  124. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  125. #endif
  126. ((SmallFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(candidate, markContext);
  127. break;
  128. #ifdef RECYCLER_VISITED_HOST
  129. case HeapBlock::HeapBlockType::SmallRecyclerVisitedHostBlockType:
  130. {
  131. void * realCandidate = ((SmallFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  132. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  133. {
  134. break;
  135. }
  136. ((SmallRecyclerVisitedHostHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  137. }
  138. break;
  139. #endif
  140. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  141. #ifdef RECYCLER_WRITE_BARRIER
  142. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  143. #endif
  144. ((MediumFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(candidate, markContext);
  145. break;
  146. #ifdef RECYCLER_VISITED_HOST
  147. case HeapBlock::HeapBlockType::MediumRecyclerVisitedHostBlockType:
  148. {
  149. void * realCandidate = ((MediumFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  150. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  151. {
  152. break;
  153. }
  154. ((MediumRecyclerVisitedHostHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  155. }
  156. break;
  157. #endif
  158. case HeapBlock::HeapBlockType::LargeBlockType:
  159. ((LargeHeapBlock*)chunk->map[id2])->Mark<doSpecialMark>(candidate, markContext);
  160. break;
  161. case HeapBlock::HeapBlockType::BlockTypeCount:
  162. AssertMsg(false, "code should be unreachable");
  163. break;
  164. #if DBG
  165. default:
  166. AssertMsg(false, "what's the new heap block type?");
  167. #endif
  168. }
  169. }
  170. inline
  171. void
  172. HeapBlockMap32::OnSpecialMark(L2MapChunk * chunk, void * candidate)
  173. {
  174. uint id2 = GetLevel2Id(candidate);
  175. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  176. Assert(blockType == HeapBlock::HeapBlockType::FreeBlockType || chunk->map[id2]->GetHeapBlockType() == blockType);
  177. unsigned char attributes = ObjectInfoBits::NoBit;
  178. bool success = false;
  179. // If the block is finalizable, we may still have to take special mark action.
  180. switch (blockType)
  181. {
  182. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  183. #ifdef RECYCLER_WRITE_BARRIER
  184. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  185. #endif
  186. {
  187. SmallFinalizableHeapBlock *smallBlock = (SmallFinalizableHeapBlock*)chunk->map[id2];
  188. success = smallBlock->TryGetAttributes(candidate, &attributes);
  189. break;
  190. }
  191. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  192. #ifdef RECYCLER_WRITE_BARRIER
  193. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  194. #endif
  195. {
  196. MediumFinalizableHeapBlock *mediumBlock = (MediumFinalizableHeapBlock*)chunk->map[id2];
  197. success = mediumBlock->TryGetAttributes(candidate, &attributes);
  198. break;
  199. }
  200. case HeapBlock::HeapBlockType::LargeBlockType:
  201. {
  202. LargeHeapBlock *largeBlock = (LargeHeapBlock*)chunk->map[id2];
  203. success = largeBlock->TryGetAttributes(candidate, &attributes);
  204. break;
  205. }
  206. default:
  207. break;
  208. }
  209. if (success && (attributes & FinalizeBit))
  210. {
  211. FinalizableObject *trackedObject = (FinalizableObject*)candidate;
  212. trackedObject->OnMark();
  213. }
  214. }
  215. template <bool interlocked, bool largeBlockType>
  216. inline
  217. bool
  218. HeapBlockMap32::MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& chunk, void * originalCandidate, void * realCandidate)
  219. {
  220. if (originalCandidate == realCandidate)
  221. {
  222. // The initial mark performed was correct (we had a base pointer)
  223. return false;
  224. }
  225. if (realCandidate == nullptr)
  226. {
  227. // We had an invalid interior pointer, so we bail out
  228. return true;
  229. }
  230. if (largeBlockType)
  231. {
  232. #if defined(TARGET_32)
  233. // we only check the first MaxLargeObjectMarkOffset byte for marking purpuse.
  234. if ( (size_t)originalCandidate - (size_t)realCandidate > HeapConstants::MaxLargeObjectMarkOffset )
  235. return true;
  236. #endif
  237. #if defined(TARGET_64)
  238. if (HeapBlockMap64::GetNodeIndex(originalCandidate) != HeapBlockMap64::GetNodeIndex(realCandidate))
  239. {
  240. // We crossed a node boundary (very rare) so we should just re-start from the real candidate.
  241. // In this case we are no longer marking an interior reference.
  242. markContext->GetRecycler()->heapBlockMap.Mark<interlocked, false>(realCandidate, markContext);
  243. // This mark code therefore has nothing to do (it has already happened).
  244. return true;
  245. }
  246. #endif
  247. // Update the chunk as the interior pointer may cross an L2 boundary (e.g., a large object)
  248. chunk = map[GetLevel1Id(realCandidate)];
  249. }
  250. // Perform the actual mark for the interior pointer
  251. return MarkInternal<interlocked>(chunk, realCandidate);
  252. }
  253. template <bool interlocked, bool doSpecialMark>
  254. inline
  255. void
  256. HeapBlockMap32::MarkInterior(void * candidate, MarkContext * markContext)
  257. {
  258. // Align the candidate to object granularity
  259. candidate = reinterpret_cast<void*>(reinterpret_cast<size_t>(candidate) & ~HeapInfo::ObjectAlignmentMask);
  260. uint id1 = GetLevel1Id(candidate);
  261. L2MapChunk * chunk = map[id1];
  262. if (chunk == nullptr)
  263. {
  264. // False reference; no further processing needed.
  265. return;
  266. }
  267. if (MarkInternal<interlocked>(chunk, candidate))
  268. {
  269. if (doSpecialMark)
  270. {
  271. this->OnSpecialMark(chunk, candidate);
  272. }
  273. // Already marked (mark internal-then-actual first)
  274. return;
  275. }
  276. uint id2 = GetLevel2Id(candidate);
  277. HeapBlock::HeapBlockType blockType = chunk->blockInfo[id2].blockType;
  278. // Switch on the HeapBlockType to determine how to map interior->base and process object.
  279. switch (blockType)
  280. {
  281. case HeapBlock::HeapBlockType::FreeBlockType:
  282. // False reference. Do nothing.
  283. break;
  284. case HeapBlock::HeapBlockType::SmallLeafBlockType:
  285. {
  286. // We want to scan leaf blocks for preventing UAFs due to interior pointers on stack.
  287. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  288. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<SmallAllocationBlockAttributes>(bucketIndex);
  289. void * realCandidate = SmallLeafHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  290. MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate);
  291. // Leaf object doesn't need to be added to the mark stack.
  292. }
  293. break;
  294. case HeapBlock::HeapBlockType::SmallNormalBlockType:
  295. #ifdef RECYCLER_WRITE_BARRIER
  296. case HeapBlock::HeapBlockType::SmallNormalBlockWithBarrierType:
  297. #endif
  298. {
  299. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  300. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<SmallAllocationBlockAttributes>(bucketIndex);
  301. void * realCandidate = SmallHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  302. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  303. {
  304. break;
  305. }
  306. if (!markContext->AddMarkedObject(realCandidate, objectSize))
  307. {
  308. // Failed to mark due to OOM.
  309. ((SmallHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  310. }
  311. }
  312. break;
  313. case HeapBlock::HeapBlockType::MediumLeafBlockType:
  314. {
  315. // We want to scan leaf blocks for preventing UAFs due to interior pointers on stack.
  316. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  317. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<MediumAllocationBlockAttributes>(bucketIndex);
  318. void * realCandidate = MediumLeafHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  319. MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate);
  320. // Leaf object doesn't need to be added to the mark stack.
  321. }
  322. break;
  323. case HeapBlock::HeapBlockType::MediumNormalBlockType:
  324. #ifdef RECYCLER_WRITE_BARRIER
  325. case HeapBlock::HeapBlockType::MediumNormalBlockWithBarrierType:
  326. #endif
  327. {
  328. byte bucketIndex = chunk->blockInfo[id2].bucketIndex;
  329. uint objectSize = HeapInfo::GetObjectSizeForBucketIndex<MediumAllocationBlockAttributes>(bucketIndex);
  330. void * realCandidate = MediumHeapBlock::GetRealAddressFromInterior(candidate, objectSize, bucketIndex);
  331. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  332. {
  333. break;
  334. }
  335. if (!markContext->AddMarkedObject(realCandidate, objectSize))
  336. {
  337. // Failed to mark due to OOM.
  338. ((MediumHeapBlock *)chunk->map[id2])->SetNeedOOMRescan(markContext->GetRecycler());
  339. }
  340. }
  341. break;
  342. case HeapBlock::HeapBlockType::SmallFinalizableBlockType:
  343. #ifdef RECYCLER_WRITE_BARRIER
  344. case HeapBlock::HeapBlockType::SmallFinalizableBlockWithBarrierType:
  345. #endif
  346. {
  347. void * realCandidate = ((SmallFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  348. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  349. {
  350. break;
  351. }
  352. ((SmallFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  353. }
  354. break;
  355. case HeapBlock::HeapBlockType::MediumFinalizableBlockType:
  356. #ifdef RECYCLER_WRITE_BARRIER
  357. case HeapBlock::HeapBlockType::MediumFinalizableBlockWithBarrierType:
  358. #endif
  359. {
  360. void * realCandidate = ((MediumFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  361. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  362. {
  363. break;
  364. }
  365. ((MediumFinalizableHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  366. }
  367. break;
  368. #ifdef RECYCLER_VISITED_HOST
  369. case HeapBlock::HeapBlockType::SmallRecyclerVisitedHostBlockType:
  370. {
  371. void * realCandidate = ((SmallFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  372. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  373. {
  374. break;
  375. }
  376. ((SmallRecyclerVisitedHostHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  377. }
  378. break;
  379. case HeapBlock::HeapBlockType::MediumRecyclerVisitedHostBlockType:
  380. {
  381. void * realCandidate = ((MediumFinalizableHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  382. if (MarkInteriorInternal<interlocked, false>(markContext, chunk, candidate, realCandidate))
  383. {
  384. break;
  385. }
  386. ((MediumRecyclerVisitedHostHeapBlock*)chunk->map[id2])->ProcessMarkedObject<doSpecialMark>(realCandidate, markContext);
  387. }
  388. break;
  389. #endif
  390. case HeapBlock::HeapBlockType::LargeBlockType:
  391. {
  392. void * realCandidate = ((LargeHeapBlock*)chunk->map[id2])->GetRealAddressFromInterior(candidate);
  393. if (MarkInteriorInternal<interlocked, true>(markContext, chunk, candidate, realCandidate))
  394. {
  395. break;
  396. }
  397. ((LargeHeapBlock*)chunk->map[GetLevel2Id(realCandidate)])->Mark<doSpecialMark>(realCandidate, markContext);
  398. }
  399. break;
  400. case HeapBlock::HeapBlockType::BlockTypeCount:
  401. AssertMsg(false, "code should be unreachable");
  402. break;
  403. #if DBG
  404. default:
  405. AssertMsg(false, "what's the new heap block type?");
  406. #endif
  407. }
  408. }
  409. #if defined(TARGET_64)
  410. //
  411. // 64-bit Mark
  412. // See HeapBlockMap32::Mark for explanation of return values
  413. //
  414. template <bool interlocked, bool doSpecialMark>
  415. inline
  416. void
  417. HeapBlockMap64::Mark(void * candidate, MarkContext * markContext)
  418. {
  419. if (!list || !HeapInfo::IsAlignedAddress(candidate) || (size_t)candidate < 0x10000)
  420. {
  421. return;
  422. }
  423. uint index = GetNodeIndex(candidate);
  424. Node * node = list;
  425. while (node != nullptr)
  426. {
  427. if (node->nodeIndex == index)
  428. {
  429. // Found the correct Node.
  430. // Process the mark and return.
  431. node->map.Mark<interlocked, doSpecialMark>(candidate, markContext);
  432. return;
  433. }
  434. node = node->next;
  435. }
  436. // No Node found; must be an invalid reference. Do nothing.
  437. }
  438. template <bool interlocked, bool doSpecialMark>
  439. inline
  440. void
  441. HeapBlockMap64::MarkInterior(void * candidate, MarkContext * markContext)
  442. {
  443. if (!list || (size_t)candidate < 0x10000)
  444. {
  445. return;
  446. }
  447. uint index = GetNodeIndex(candidate);
  448. Node * node = list;
  449. while (node != nullptr)
  450. {
  451. if (node->nodeIndex == index)
  452. {
  453. // Found the correct Node.
  454. // Process the mark and return.
  455. node->map.MarkInterior<interlocked, doSpecialMark>(candidate, markContext);
  456. return;
  457. }
  458. node = node->next;
  459. }
  460. // No Node found; must be an invalid reference. Do nothing.
  461. }
  462. #endif // defined(TARGET_64)