RecyclerSweep.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if ENABLE_PARTIAL_GC
  7. #define KILOBYTES * 1024
  8. #define MEGABYTES * 1024 KILOBYTES
  9. #define MEGABYTES_OF_PAGES * 1024 * 1024 / AutoSystemInfo::PageSize;
  10. const uint RecyclerSweep::MinPartialUncollectedNewPageCount = 4 MEGABYTES_OF_PAGES;
  11. const uint RecyclerSweep::MaxPartialCollectRescanRootBytes = 5 MEGABYTES;
  12. static const uint MinPartialCollectRescanRootBytes = 128 KILOBYTES;
  13. // Maximum unused partial collect free bytes before we get out of partial GC mode
  14. static const uint MaxUnusedPartialCollectFreeBytes = 16 MEGABYTES;
  15. // Have to collected at least 10% before we would partial GC
  16. // CONSIDER: It may be good to do partial with low efficacy once we have concurrent partial
  17. // because old object are not getting collected as well, but without concurrent partial, we will have to mark
  18. // new objects in thread.
  19. static const double MinPartialCollectEfficacy = 0.1;
  20. #endif
  21. bool
  22. RecyclerSweep::IsMemProtectMode()
  23. {
  24. return recycler->IsMemProtectMode();
  25. }
  26. #if ENABLE_PARTIAL_GC
  27. void
  28. RecyclerSweep::BeginSweep(Recycler * recycler, size_t rescanRootBytes, bool adjustPartialHeuristics)
  29. #else
  30. void
  31. RecyclerSweep::BeginSweep(Recycler * recycler)
  32. #endif
  33. {
  34. {
  35. // We are about to sweep, give the runtime a chance to see the now-immutable state of the world.
  36. // And clean up all the cache not monitor by the GC (e.g. inline caches)
  37. AUTO_NO_EXCEPTION_REGION;
  38. recycler->collectionWrapper->PreSweepCallback();
  39. }
  40. Assert(!recycler->IsSweeping());
  41. Assert(recycler->recyclerSweep == nullptr);
  42. memset(this, 0, sizeof(RecyclerSweep));
  43. this->recycler = recycler;
  44. recycler->recyclerSweep = this;
  45. // We might still have block that has disposed but not put back into the allocable
  46. // heap block list yet, which happens if we finish disposing object during concurrent
  47. // reset mark and can't
  48. // modify the heap block lists
  49. // CONCURRENT-TODO: Consider doing it during FinishDisposeObjects to get these block
  50. // available sooner as well. We will still need it here as we only always get to
  51. // finish dispose before sweep.
  52. this->FlushPendingTransferDisposedObjects();
  53. #if ENABLE_CONCURRENT_GC
  54. // Take the small heap block new heap block list and store in RecyclerSweep temporary
  55. // We get merge later before we start sweeping the bucket.
  56. leafData.pendingMergeNewHeapBlockList = recycler->autoHeap.newLeafHeapBlockList;
  57. normalData.pendingMergeNewHeapBlockList = recycler->autoHeap.newNormalHeapBlockList;
  58. #ifdef RECYCLER_WRITE_BARRIER
  59. withBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newNormalWithBarrierHeapBlockList;
  60. finalizableWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newFinalizableWithBarrierHeapBlockList;
  61. #endif
  62. finalizableData.pendingMergeNewHeapBlockList = recycler->autoHeap.newFinalizableHeapBlockList;
  63. #ifdef RECYCLER_VISITED_HOST
  64. recyclerVisitedHostData.pendingMergeNewHeapBlockList = recycler->autoHeap.newRecyclerVisitedHostHeapBlockList;
  65. #endif
  66. mediumLeafData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumLeafHeapBlockList;
  67. mediumNormalData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumNormalHeapBlockList;
  68. #ifdef RECYCLER_WRITE_BARRIER
  69. mediumWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumNormalWithBarrierHeapBlockList;
  70. mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumFinalizableWithBarrierHeapBlockList;
  71. #endif
  72. mediumFinalizableData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumFinalizableHeapBlockList;
  73. #ifdef RECYCLER_VISITED_HOST
  74. mediumRecyclerVisitedHostData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumRecyclerVisitedHostHeapBlockList;
  75. #endif
  76. recycler->autoHeap.newLeafHeapBlockList = nullptr;
  77. recycler->autoHeap.newNormalHeapBlockList = nullptr;
  78. recycler->autoHeap.newFinalizableHeapBlockList = nullptr;
  79. #ifdef RECYCLER_VISITED_HOST
  80. recycler->autoHeap.newRecyclerVisitedHostHeapBlockList = nullptr;
  81. #endif
  82. #ifdef RECYCLER_WRITE_BARRIER
  83. recycler->autoHeap.newNormalWithBarrierHeapBlockList = nullptr;
  84. recycler->autoHeap.newFinalizableWithBarrierHeapBlockList = nullptr;
  85. #endif
  86. recycler->autoHeap.newMediumLeafHeapBlockList = nullptr;
  87. recycler->autoHeap.newMediumNormalHeapBlockList = nullptr;
  88. recycler->autoHeap.newMediumFinalizableHeapBlockList = nullptr;
  89. #ifdef RECYCLER_VISITED_HOST
  90. recycler->autoHeap.newMediumRecyclerVisitedHostHeapBlockList = nullptr;
  91. #endif
  92. #ifdef RECYCLER_WRITE_BARRIER
  93. recycler->autoHeap.newMediumNormalWithBarrierHeapBlockList = nullptr;
  94. recycler->autoHeap.newMediumFinalizableWithBarrierHeapBlockList = nullptr;
  95. #endif
  96. #endif
  97. #if ENABLE_PARTIAL_GC
  98. Assert(recycler->clientTrackedObjectList.Empty());
  99. // We should not have partialUncollectedAllocBytes unless we are in partial collect at this point
  100. Assert(recycler->partialUncollectedAllocBytes == 0 || recycler->inPartialCollectMode);
  101. Assert(recycler->autoHeap.uncollectedAllocBytes >= recycler->partialUncollectedAllocBytes);
  102. // if the cost of rescan is too high, we want to disable partial GC starting from the
  103. // upcoming Sweep. We basically move the check up from AdjustPartialHeuristics to here
  104. // such that we can have the decision before sweep.
  105. this->rescanRootBytes = rescanRootBytes;
  106. RECYCLER_STATS_SET(recycler, rescanRootBytes, rescanRootBytes);
  107. if (this->DoPartialCollectMode())
  108. {
  109. // enable partial collect for sweep & next round of GC
  110. DebugOnly(this->partial = true);
  111. // REVIEW: is adjustPartialHeuristicsMode the same as in PartialCollectMode?
  112. this->adjustPartialHeuristics = adjustPartialHeuristics;
  113. this->StartPartialCollectMode();
  114. }
  115. else
  116. {
  117. // disable partial collect
  118. if (recycler->inPartialCollectMode)
  119. {
  120. recycler->FinishPartialCollect();
  121. }
  122. Assert(recycler->partialUncollectedAllocBytes == 0);
  123. Assert(!recycler->inPartialCollectMode);
  124. }
  125. if (this->inPartialCollect)
  126. {
  127. // We just did a partial collect.
  128. // We only want to count objects that survived this collect towards the next full GC.
  129. // Thus, clear out uncollectedAllocBytes here; we will adjust to account for objects that
  130. // survived this partial collect in EndSweep.
  131. recycler->ResetHeuristicCounters();
  132. }
  133. else
  134. #endif
  135. {
  136. Assert(!this->inPartialCollect);
  137. // We just did a full collect.
  138. // We reset uncollectedAllocBytes when we kicked off the collection,
  139. // so don't reset it here (but do reset partial heuristics).
  140. recycler->ResetPartialHeuristicCounters();
  141. }
  142. }
  143. void
  144. RecyclerSweep::FinishSweep()
  145. {
  146. #if ENABLE_PARTIAL_GC
  147. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  148. if (recycler->collectionState == CollectionStateConcurrentSweepPass2)
  149. {
  150. GCETW_INTERNAL(GC_START, (recycler, ETWEvent_ConcurrentSweep_Pass2));
  151. }
  152. #endif
  153. Assert(this->partial == recycler->inPartialCollectMode);
  154. // Adjust heuristics
  155. if (recycler->inPartialCollectMode)
  156. {
  157. if (this->AdjustPartialHeuristics())
  158. {
  159. GCETW(GC_SWEEP_PARTIAL_REUSE_PAGE_START, (recycler));
  160. // If we are doing a full concurrent GC, all allocated bytes are consider "collected".
  161. // We only start accumulating uncollected allocate bytes during partial GC.
  162. // FinishPartialCollect will reset it to 0 if we are not doing a partial GC
  163. recycler->partialUncollectedAllocBytes = this->InPartialCollect()? this->nextPartialUncollectedAllocBytes : 0;
  164. #ifdef RECYCLER_TRACE
  165. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  166. {
  167. Output::Print(_u("AdjustPartialHeuristics returned true\n"));
  168. Output::Print(_u(" partialUncollectedAllocBytes = %d\n"), recycler->partialUncollectedAllocBytes);
  169. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  170. }
  171. #endif
  172. recycler->autoHeap.SweepPartialReusePages(*this);
  173. GCETW(GC_SWEEP_PARTIAL_REUSE_PAGE_STOP, (recycler));
  174. #ifdef RECYCLER_WRITE_WATCH
  175. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  176. {
  177. if (!this->IsBackground())
  178. {
  179. RECYCLER_PROFILE_EXEC_BEGIN(recycler, Js::ResetWriteWatchPhase);
  180. if (!recycler->recyclerPageAllocator.ResetWriteWatch() ||
  181. !recycler->recyclerLargeBlockPageAllocator.ResetWriteWatch())
  182. {
  183. // Shouldn't happen
  184. Assert(false);
  185. recycler->enablePartialCollect = false;
  186. recycler->FinishPartialCollect(this);
  187. }
  188. RECYCLER_PROFILE_EXEC_END(recycler, Js::ResetWriteWatchPhase);
  189. }
  190. }
  191. #endif
  192. }
  193. else
  194. {
  195. #ifdef RECYCLER_TRACE
  196. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  197. {
  198. Output::Print(_u("AdjustPartialHeuristics returned false\n"));
  199. }
  200. #endif
  201. #if ENABLE_CONCURRENT_GC
  202. if (this->IsBackground())
  203. {
  204. recycler->BackgroundFinishPartialCollect(this);
  205. }
  206. else
  207. #endif
  208. {
  209. recycler->FinishPartialCollect(this);
  210. }
  211. }
  212. }
  213. else
  214. {
  215. Assert(!this->adjustPartialHeuristics);
  216. // Initial value or Sweep should have called FinishPartialCollect to these if we are not doing partial
  217. Assert(recycler->partialUncollectedAllocBytes == 0);
  218. }
  219. #if ENABLE_CONCURRENT_GC
  220. recycler->SweepPendingObjects(*this);
  221. #endif
  222. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  223. if (recycler->collectionState == CollectionStateConcurrentSweepPass2)
  224. {
  225. GCETW_INTERNAL(GC_STOP, (recycler, ETWEvent_ConcurrentSweep_Pass2));
  226. }
  227. #endif
  228. #endif
  229. }
  230. void
  231. RecyclerSweep::EndSweep()
  232. {
  233. #if ENABLE_PARTIAL_GC
  234. // We clear out the old uncollectedAllocBytes, restore it now to get the adjustment for partial
  235. // We clear it again after we are done collecting and if we are not in partial collect
  236. if (this->inPartialCollect)
  237. {
  238. recycler->autoHeap.uncollectedAllocBytes += this->nextPartialUncollectedAllocBytes;
  239. #ifdef RECYCLER_TRACE
  240. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  241. {
  242. Output::Print(_u("EndSweep for partial sweep\n"));
  243. Output::Print(_u(" uncollectedAllocBytes = %d\n"), recycler->autoHeap.uncollectedAllocBytes);
  244. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  245. }
  246. #endif
  247. }
  248. #endif
  249. recycler->recyclerSweep = nullptr;
  250. // Clean up the HeapBlockMap.
  251. // This will release any internal structures that are no longer needed after Sweep.
  252. recycler->heapBlockMap.Cleanup(recycler->IsMemProtectMode());
  253. }
  254. #if ENABLE_CONCURRENT_GC
  255. void
  256. RecyclerSweep::BackgroundSweep()
  257. {
  258. this->BeginBackground(forceForeground);
  259. // Finish the concurrent part of the first pass
  260. this->recycler->autoHeap.SweepSmallNonFinalizable(*this);
  261. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  262. if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !this->recycler->AllowAllocationsDuringConcurrentSweep())
  263. #endif
  264. {
  265. // Finish the rest of the sweep
  266. this->FinishSweep();
  267. this->EndBackground();
  268. }
  269. }
  270. #endif
  271. Recycler *
  272. RecyclerSweep::GetRecycler() const
  273. {
  274. return recycler;
  275. }
  276. bool
  277. RecyclerSweep::IsBackground() const
  278. {
  279. return this->background;
  280. }
  281. bool
  282. RecyclerSweep::HasSetupBackgroundSweep() const
  283. {
  284. return this->IsBackground() || this->forceForeground;
  285. }
  286. void
  287. RecyclerSweep::FlushPendingTransferDisposedObjects()
  288. {
  289. if (recycler->hasPendingTransferDisposedObjects)
  290. {
  291. // If recycler->inResolveExternalWeakReferences is true, the recycler isn't really disposing anymore
  292. // so it's safe to call transferDisposedObjects
  293. Assert(!recycler->inDispose || recycler->inResolveExternalWeakReferences);
  294. Assert(!recycler->hasDisposableObject);
  295. recycler->autoHeap.TransferDisposedObjects();
  296. }
  297. }
  298. void
  299. RecyclerSweep::ShutdownCleanup()
  300. {
  301. // REVIEW: Does this need to be controlled more granularly, say with ENABLE_PARTIAL_GC?
  302. #if ENABLE_CONCURRENT_GC
  303. SmallLeafHeapBucketT<SmallAllocationBlockAttributes>::DeleteHeapBlockList(this->leafData.pendingMergeNewHeapBlockList, recycler);
  304. SmallNormalHeapBucket::DeleteHeapBlockList(this->normalData.pendingMergeNewHeapBlockList, recycler);
  305. #ifdef RECYCLER_WRITE_BARRIER
  306. SmallNormalWithBarrierHeapBucket::DeleteHeapBlockList(this->withBarrierData.pendingMergeNewHeapBlockList, recycler);
  307. SmallFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->finalizableWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  308. #endif
  309. SmallFinalizableHeapBucket::DeleteHeapBlockList(this->finalizableData.pendingMergeNewHeapBlockList, recycler);
  310. for (uint i = 0; i < HeapConstants::BucketCount; i++)
  311. {
  312. // For leaf, we can always reuse the page as we don't need to rescan them for partial GC
  313. // It should have been swept immediately during Sweep
  314. Assert(this->leafData.bucketData[i].pendingSweepList == nullptr);
  315. SmallNormalHeapBucket::DeleteHeapBlockList(this->normalData.bucketData[i].pendingSweepList, recycler);
  316. SmallFinalizableHeapBucket::DeleteHeapBlockList(this->finalizableData.bucketData[i].pendingSweepList, recycler);
  317. #ifdef RECYCLER_WRITE_BARRIER
  318. SmallFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->finalizableWithBarrierData.bucketData[i].pendingSweepList, recycler);
  319. #endif
  320. SmallLeafHeapBucket::DeleteEmptyHeapBlockList(this->leafData.bucketData[i].pendingEmptyBlockList);
  321. SmallNormalHeapBucket::DeleteEmptyHeapBlockList(this->normalData.bucketData[i].pendingEmptyBlockList);
  322. #ifdef RECYCLER_WRITE_BARRIER
  323. SmallNormalWithBarrierHeapBucket::DeleteEmptyHeapBlockList(this->withBarrierData.bucketData[i].pendingEmptyBlockList);
  324. Assert(this->finalizableWithBarrierData.bucketData[i].pendingEmptyBlockList == nullptr);
  325. #endif
  326. Assert(this->finalizableData.bucketData[i].pendingEmptyBlockList == nullptr);
  327. }
  328. MediumLeafHeapBucket::DeleteHeapBlockList(this->mediumLeafData.pendingMergeNewHeapBlockList, recycler);
  329. MediumNormalHeapBucket::DeleteHeapBlockList(this->mediumNormalData.pendingMergeNewHeapBlockList, recycler);
  330. #ifdef RECYCLER_WRITE_BARRIER
  331. MediumNormalWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  332. MediumFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  333. #endif
  334. MediumFinalizableHeapBucket::DeleteHeapBlockList(this->mediumFinalizableData.pendingMergeNewHeapBlockList, recycler);
  335. for (uint i = 0; i < HeapConstants::MediumBucketCount; i++)
  336. {
  337. // For leaf, we can always reuse the page as we don't need to rescan them for partial GC
  338. // It should have been swept immediately during Sweep
  339. Assert(this->mediumLeafData.bucketData[i].pendingSweepList == nullptr);
  340. MediumNormalHeapBucket::DeleteHeapBlockList(this->mediumNormalData.bucketData[i].pendingSweepList, recycler);
  341. MediumFinalizableHeapBucket::DeleteHeapBlockList(this->mediumFinalizableData.bucketData[i].pendingSweepList, recycler);
  342. #ifdef RECYCLER_WRITE_BARRIER
  343. MediumFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumFinalizableWithBarrierData.bucketData[i].pendingSweepList, recycler);
  344. #endif
  345. MediumLeafHeapBucket::DeleteEmptyHeapBlockList(this->mediumLeafData.bucketData[i].pendingEmptyBlockList);
  346. MediumNormalHeapBucket::DeleteEmptyHeapBlockList(this->mediumNormalData.bucketData[i].pendingEmptyBlockList);
  347. #ifdef RECYCLER_WRITE_BARRIER
  348. MediumNormalWithBarrierHeapBucket::DeleteEmptyHeapBlockList(this->mediumWithBarrierData.bucketData[i].pendingEmptyBlockList);
  349. Assert(this->mediumFinalizableWithBarrierData.bucketData[i].pendingEmptyBlockList == nullptr);
  350. #endif
  351. Assert(this->mediumFinalizableData.bucketData[i].pendingEmptyBlockList == nullptr);
  352. }
  353. #endif
  354. }
  355. #if ENABLE_CONCURRENT_GC
  356. template <typename TBlockType>
  357. void
  358. RecyclerSweep::MergePendingNewHeapBlockList()
  359. {
  360. TBlockType *& blockList = this->GetData<TBlockType>().pendingMergeNewHeapBlockList;
  361. TBlockType * list = blockList;
  362. blockList = nullptr;
  363. HeapInfo& heapInfo = recycler->autoHeap;
  364. HeapBlockList::ForEachEditing(list, [&heapInfo](TBlockType * heapBlock)
  365. {
  366. auto& bucket = heapInfo.GetBucket<TBlockType::RequiredAttributes>(heapBlock->GetObjectSize());
  367. bucket.MergeNewHeapBlock(heapBlock);
  368. });
  369. }
  370. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallLeafHeapBlock>();
  371. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallNormalHeapBlock>();
  372. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallFinalizableHeapBlock>();
  373. #ifdef RECYCLER_VISITED_HOST
  374. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallRecyclerVisitedHostHeapBlock>();
  375. #endif
  376. #ifdef RECYCLER_WRITE_BARRIER
  377. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallNormalWithBarrierHeapBlock>();
  378. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallFinalizableWithBarrierHeapBlock>();
  379. #endif
  380. template <typename TBlockType>
  381. void
  382. RecyclerSweep::MergePendingNewMediumHeapBlockList()
  383. {
  384. TBlockType *& blockList = this->GetData<TBlockType>().pendingMergeNewHeapBlockList;
  385. TBlockType * list = blockList;
  386. blockList = nullptr;
  387. HeapInfo& heapInfo = recycler->autoHeap;
  388. HeapBlockList::ForEachEditing(list, [&heapInfo](TBlockType * heapBlock)
  389. {
  390. auto& bucket = heapInfo.GetMediumBucket<TBlockType::RequiredAttributes>(heapBlock->GetObjectSize());
  391. bucket.MergeNewHeapBlock(heapBlock);
  392. });
  393. }
  394. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumLeafHeapBlock>();
  395. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumNormalHeapBlock>();
  396. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumFinalizableHeapBlock>();
  397. #ifdef RECYCLER_VISITED_HOST
  398. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumRecyclerVisitedHostHeapBlock>();
  399. #endif
  400. #ifdef RECYCLER_WRITE_BARRIER
  401. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumNormalWithBarrierHeapBlock>();
  402. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumFinalizableWithBarrierHeapBlock>();
  403. #endif
  404. bool
  405. RecyclerSweep::HasPendingEmptyBlocks() const
  406. {
  407. return this->hasPendingEmptyBlocks;
  408. }
  409. bool
  410. RecyclerSweep::HasPendingSweepSmallHeapBlocks() const
  411. {
  412. return this->hasPendingSweepSmallHeapBlocks;
  413. }
  414. void
  415. RecyclerSweep::SetHasPendingSweepSmallHeapBlocks()
  416. {
  417. this->hasPendingSweepSmallHeapBlocks = true;
  418. }
  419. void
  420. RecyclerSweep::BeginBackground(bool forceForeground)
  421. {
  422. Assert(!background);
  423. this->background = !forceForeground;
  424. this->forceForeground = forceForeground;
  425. }
  426. void
  427. RecyclerSweep::EndBackground()
  428. {
  429. Assert(this->background || this->forceForeground);
  430. this->background = false;
  431. }
  432. #if DBG
  433. bool
  434. RecyclerSweep::HasPendingNewHeapBlocks() const
  435. {
  436. return leafData.pendingMergeNewHeapBlockList != nullptr
  437. || normalData.pendingMergeNewHeapBlockList != nullptr
  438. || finalizableData.pendingMergeNewHeapBlockList != nullptr
  439. #ifdef RECYCLER_WRITE_BARRIER
  440. || withBarrierData.pendingMergeNewHeapBlockList != nullptr
  441. || finalizableWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  442. #endif
  443. || mediumLeafData.pendingMergeNewHeapBlockList != nullptr
  444. || mediumNormalData.pendingMergeNewHeapBlockList != nullptr
  445. || mediumFinalizableData.pendingMergeNewHeapBlockList != nullptr
  446. #ifdef RECYCLER_WRITE_BARRIER
  447. || mediumWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  448. || mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  449. #endif
  450. ;
  451. }
  452. #endif
  453. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  454. size_t
  455. RecyclerSweep::SetPendingMergeNewHeapBlockCount()
  456. {
  457. return HeapBlockList::Count(leafData.pendingMergeNewHeapBlockList)
  458. + HeapBlockList::Count(normalData.pendingMergeNewHeapBlockList)
  459. + HeapBlockList::Count(finalizableData.pendingMergeNewHeapBlockList)
  460. #ifdef RECYCLER_VISITED_HOST
  461. + HeapBlockList::Count(recyclerVisitedHostData.pendingMergeNewHeapBlockList)
  462. + HeapBlockList::Count(mediumRecyclerVisitedHostData.pendingMergeNewHeapBlockList)
  463. #endif
  464. #ifdef RECYCLER_WRITE_BARRIER
  465. + HeapBlockList::Count(withBarrierData.pendingMergeNewHeapBlockList)
  466. + HeapBlockList::Count(finalizableWithBarrierData.pendingMergeNewHeapBlockList)
  467. #endif
  468. + HeapBlockList::Count(mediumLeafData.pendingMergeNewHeapBlockList)
  469. + HeapBlockList::Count(mediumNormalData.pendingMergeNewHeapBlockList)
  470. + HeapBlockList::Count(mediumFinalizableData.pendingMergeNewHeapBlockList)
  471. #ifdef RECYCLER_WRITE_BARRIER
  472. + HeapBlockList::Count(mediumWithBarrierData.pendingMergeNewHeapBlockList)
  473. + HeapBlockList::Count(mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList)
  474. #endif
  475. ;
  476. }
  477. #endif
  478. #endif
  479. #if ENABLE_PARTIAL_GC
  480. bool
  481. RecyclerSweep::InPartialCollectMode() const
  482. {
  483. return recycler->inPartialCollectMode;
  484. }
  485. bool
  486. RecyclerSweep::InPartialCollect() const
  487. {
  488. return this->inPartialCollect;
  489. }
  490. void
  491. RecyclerSweep::StartPartialCollectMode()
  492. {
  493. // Save the in partial collect, the main thread reset it after returning to the script
  494. // and the background thread still needs it
  495. this->inPartialCollect = recycler->inPartialCollectMode;
  496. recycler->inPartialCollectMode = true;
  497. // Tracks the unallocated alloc bytes for partial GC
  498. // Keep a copy Last collection's uncollected allocation bytes, so we can use it to calculate
  499. // the new object that is allocated since the last GC
  500. Assert(recycler->partialUncollectedAllocBytes == 0 || this->inPartialCollect);
  501. this->lastPartialUncollectedAllocBytes = recycler->partialUncollectedAllocBytes;
  502. size_t currentUncollectedAllocBytes = recycler->autoHeap.uncollectedAllocBytes;
  503. Assert(currentUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes);
  504. if (!this->inPartialCollect)
  505. {
  506. // If we did a full collect, then we need to include lastUncollectedAllocBytes
  507. // in the partialUncollectedAllocBytes calculation, because all objects allocated
  508. // since the previous GC are considered new, but we cleared uncollectedAllocBytes
  509. // when we kicked off the GC.
  510. currentUncollectedAllocBytes += recycler->autoHeap.lastUncollectedAllocBytes;
  511. }
  512. // Initially, the partial uncollected alloc bytes is the current uncollectedAllocBytes
  513. recycler->partialUncollectedAllocBytes = currentUncollectedAllocBytes;
  514. this->nextPartialUncollectedAllocBytes = currentUncollectedAllocBytes;
  515. #ifdef RECYCLER_TRACE
  516. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  517. {
  518. Output::Print(_u("StartPartialCollectMode\n"));
  519. Output::Print(_u(" was inPartialCollectMode = %d\n"), this->inPartialCollect);
  520. Output::Print(_u(" lastPartialUncollectedAllocBytes = %d\n"), this->lastPartialUncollectedAllocBytes);
  521. Output::Print(_u(" uncollectedAllocBytes = %d\n"), recycler->autoHeap.uncollectedAllocBytes);
  522. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  523. }
  524. #endif
  525. }
  526. #endif
  527. // Called by prepare sweep to track the new allocated bytes on block that is not fully allocated yet.
  528. template <typename TBlockAttributes>
  529. void
  530. RecyclerSweep::AddUnaccountedNewObjectAllocBytes(SmallHeapBlockT<TBlockAttributes> * heapBlock)
  531. {
  532. #if ENABLE_PARTIAL_GC
  533. // Only need to update the unaccounted alloc bytes if we are in partial collect mode
  534. if (recycler->inPartialCollectMode)
  535. {
  536. uint unaccountedAllocBytes = heapBlock->GetAndClearUnaccountedAllocBytes();
  537. Assert(heapBlock->lastUncollectedAllocBytes == 0 || unaccountedAllocBytes == 0);
  538. DebugOnly(heapBlock->lastUncollectedAllocBytes += unaccountedAllocBytes);
  539. recycler->partialUncollectedAllocBytes += unaccountedAllocBytes;
  540. this->nextPartialUncollectedAllocBytes += unaccountedAllocBytes;
  541. }
  542. else
  543. #endif
  544. {
  545. // We don't care, clear the unaccounted to start tracking for new object for next GC
  546. heapBlock->ClearAllAllocBytes();
  547. }
  548. }
  549. template void RecyclerSweep::AddUnaccountedNewObjectAllocBytes<SmallAllocationBlockAttributes>(SmallHeapBlock * heapBlock);
  550. template void RecyclerSweep::AddUnaccountedNewObjectAllocBytes<MediumAllocationBlockAttributes>(MediumHeapBlock * heapBlock);
  551. #if ENABLE_PARTIAL_GC
  552. void
  553. RecyclerSweep::SubtractSweepNewObjectAllocBytes(size_t newObjectExpectSweepByteCount)
  554. {
  555. Assert(recycler->inPartialCollectMode);
  556. // We shouldn't free more then we allocated
  557. Assert(this->nextPartialUncollectedAllocBytes >= newObjectExpectSweepByteCount);
  558. Assert(this->nextPartialUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes + newObjectExpectSweepByteCount);
  559. this->nextPartialUncollectedAllocBytes -= newObjectExpectSweepByteCount;
  560. }
  561. /*--------------------------------------------------------------------------------------------
  562. * Determine we want to go into partial collect mode for the next GC before we sweep,
  563. * based on the number bytes needed to rescan (<= 5MB)
  564. *--------------------------------------------------------------------------------------------*/
  565. bool
  566. RecyclerSweep::DoPartialCollectMode()
  567. {
  568. if (!recycler->enablePartialCollect)
  569. {
  570. return false;
  571. }
  572. // If we exceed 16MB of unused memory in partial blocks, get out of partial collect to avoid
  573. // memory fragmentation.
  574. if (recycler->autoHeap.unusedPartialCollectFreeBytes > MaxUnusedPartialCollectFreeBytes)
  575. {
  576. return false;
  577. }
  578. return this->rescanRootBytes <= MaxPartialCollectRescanRootBytes;
  579. }
  580. // Heuristic ratio is ((c * e + (1 - e)) * (1 - p)) + p and use that to linearly scale between min and max
  581. // This give cost/efficacy/pressure equal weight, while each can push it pass where partial GC is not
  582. // beneficial
  583. bool
  584. RecyclerSweep::AdjustPartialHeuristics()
  585. {
  586. Assert(recycler->inPartialCollectMode);
  587. Assert(this->adjustPartialHeuristics);
  588. Assert(this->InPartialCollect() || recycler->autoHeap.unusedPartialCollectFreeBytes == 0);
  589. // DoPartialCollectMode should have rejected these already
  590. Assert(this->rescanRootBytes <= (size_t)MaxPartialCollectRescanRootBytes);
  591. Assert(recycler->autoHeap.unusedPartialCollectFreeBytes <= MaxUnusedPartialCollectFreeBytes);
  592. // Page reuse Heuristics
  593. double collectEfficacy;
  594. const size_t allocBytes = this->GetNewObjectAllocBytes();
  595. if (allocBytes == 0)
  596. {
  597. // We may get collections without allocating memory (e.g. unpin heuristics).
  598. collectEfficacy = 1.0; // assume 100% efficacy
  599. this->partialCollectSmallHeapBlockReuseMinFreeBytes = 0; // reuse all pages
  600. }
  601. else
  602. {
  603. const size_t freedBytes = this->GetNewObjectFreeBytes();
  604. Assert(freedBytes <= allocBytes);
  605. collectEfficacy = (double)freedBytes / (double)allocBytes;
  606. // If we collected less then 10% of the memory, let's not do partial GC.
  607. // CONSIDER: It may be good to do partial with low efficacy once we have concurrent partial
  608. // because old object are not getting collected as well, but without concurrent partial, we will have to mark
  609. // new objects in thread.
  610. if (collectEfficacy < MinPartialCollectEfficacy)
  611. {
  612. return false;
  613. }
  614. // Scale the efficacy linearly such that an efficacy of MinPartialCollectEfficacy translates to an adjusted efficacy of
  615. // 0.0, and an efficacy of 1.0 translates to an adjusted efficacy of 1.0
  616. collectEfficacy = (collectEfficacy - MinPartialCollectEfficacy) / (1.0 - MinPartialCollectEfficacy);
  617. Assert(collectEfficacy <= 1.0);
  618. this->partialCollectSmallHeapBlockReuseMinFreeBytes = (size_t)(AutoSystemInfo::PageSize * collectEfficacy);
  619. }
  620. #ifdef RECYCLER_STATS
  621. recycler->collectionStats.collectEfficacy = collectEfficacy;
  622. recycler->collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes = this->partialCollectSmallHeapBlockReuseMinFreeBytes;
  623. #endif
  624. // Blocks which are being reused are likely to be touched again from allocation and contribute to Rescan cost.
  625. // If there are many of these, adjust rescanRootBytes to account for this.
  626. const size_t estimatedPartialReuseBlocks = (size_t)((double)this->reuseHeapBlockCount * (1.0 - collectEfficacy));
  627. const size_t estimatedPartialReuseBytes = estimatedPartialReuseBlocks * AutoSystemInfo::PageSize;
  628. const size_t newRescanRootBytes = max(this->rescanRootBytes, estimatedPartialReuseBytes);
  629. RECYCLER_STATS_SET(recycler, estimatedPartialReuseBytes, estimatedPartialReuseBytes);
  630. // Recheck the rescanRootBytes
  631. if (newRescanRootBytes > MaxPartialCollectRescanRootBytes)
  632. {
  633. return false;
  634. }
  635. double collectCost = (double)newRescanRootBytes / MaxPartialCollectRescanRootBytes;
  636. RECYCLER_STATS_SET(recycler, collectCost, collectCost);
  637. // Include the efficacy in equal portion, which is related to the cost of marking through new objects.
  638. // r = c * e + 1 - e;
  639. const double reuseRatio = 1.0 - collectEfficacy;
  640. double ratio = collectCost * collectEfficacy + reuseRatio;
  641. if (this->InPartialCollect())
  642. {
  643. // Avoid ratio of uncollectedBytesPressure > 1.0
  644. if (this->nextPartialUncollectedAllocBytes > RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect)
  645. {
  646. return false;
  647. }
  648. // Only add full collect pressure if we are doing partial collect,
  649. // account for the amount of uncollected bytes and unused bytes to increase
  650. // pressure to do a full GC by rising the partial GC new page heuristic
  651. double uncollectedBytesPressure = (double)this->nextPartialUncollectedAllocBytes / (double)RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect;
  652. double collectFullCollectPressure =
  653. (double)recycler->autoHeap.unusedPartialCollectFreeBytes / (double)MaxUnusedPartialCollectFreeBytes
  654. * (1.0 - uncollectedBytesPressure) + uncollectedBytesPressure;
  655. ratio = ratio * (1.0 - collectFullCollectPressure) + collectFullCollectPressure;
  656. }
  657. Assert(0.0 <= ratio && ratio <= 1.0);
  658. // Linear scale the partial GC new page heuristic using the ratio calculated
  659. recycler->uncollectedNewPageCountPartialCollect = MinPartialUncollectedNewPageCount
  660. + (size_t)((double)(RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount - MinPartialUncollectedNewPageCount) * ratio);
  661. Assert(recycler->uncollectedNewPageCountPartialCollect >= MinPartialUncollectedNewPageCount &&
  662. recycler->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
  663. // If the number of new page to reach the partial heuristics plus the existing uncollectedAllocBytes
  664. // and the memory we are going to reuse (assume we use it all) is greater then the full GC max size heuristic
  665. // (with 1M fudge factor), we trigger a full GC anyways, so let's not get into partial GC
  666. const size_t estimatedPartialReusedFreeByteCount = (size_t)((double)this->reuseByteCount * reuseRatio);
  667. if (recycler->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize
  668. + this->nextPartialUncollectedAllocBytes + estimatedPartialReusedFreeByteCount >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect)
  669. {
  670. return false;
  671. }
  672. #if ENABLE_CONCURRENT_GC
  673. recycler->partialConcurrentNextCollection = RecyclerHeuristic::PartialConcurrentNextCollection(ratio, recycler->GetRecyclerFlagsTable());
  674. #endif
  675. return true;
  676. }
  677. size_t
  678. RecyclerSweep::GetNewObjectAllocBytes() const
  679. {
  680. Assert(recycler->inPartialCollectMode);
  681. Assert(recycler->partialUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes);
  682. return recycler->partialUncollectedAllocBytes - this->lastPartialUncollectedAllocBytes;
  683. }
  684. size_t
  685. RecyclerSweep::GetNewObjectFreeBytes() const
  686. {
  687. Assert(recycler->inPartialCollectMode);
  688. Assert(recycler->partialUncollectedAllocBytes >= this->nextPartialUncollectedAllocBytes);
  689. return recycler->partialUncollectedAllocBytes - this->nextPartialUncollectedAllocBytes;
  690. }
  691. size_t
  692. RecyclerSweep::GetPartialUnusedFreeByteCount() const
  693. {
  694. return partialUnusedFreeByteCount;
  695. }
  696. size_t
  697. RecyclerSweep::GetPartialCollectSmallHeapBlockReuseMinFreeBytes() const
  698. {
  699. return partialCollectSmallHeapBlockReuseMinFreeBytes;
  700. }
  701. template <typename TBlockAttributes>
  702. void
  703. RecyclerSweep::NotifyAllocableObjects(SmallHeapBlockT<TBlockAttributes> * heapBlock)
  704. {
  705. this->reuseByteCount += heapBlock->GetExpectedFreeBytes();
  706. if (!heapBlock->IsLeafBlock())
  707. {
  708. this->reuseHeapBlockCount++;
  709. }
  710. }
  711. template void RecyclerSweep::NotifyAllocableObjects<SmallAllocationBlockAttributes>(SmallHeapBlock* heapBlock);
  712. template void RecyclerSweep::NotifyAllocableObjects<MediumAllocationBlockAttributes>(MediumHeapBlock* heapBlock);
  713. void
  714. RecyclerSweep::AddUnusedFreeByteCount(uint expectFreeByteCount)
  715. {
  716. this->partialUnusedFreeByteCount += expectFreeByteCount;
  717. }
  718. bool
  719. RecyclerSweep::DoAdjustPartialHeuristics() const
  720. {
  721. return this->adjustPartialHeuristics;
  722. }
  723. #endif