RecyclerSweep.cpp 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if ENABLE_PARTIAL_GC
  7. #define KILOBYTES * 1024
  8. #define MEGABYTES * 1024 KILOBYTES
  9. #define MEGABYTES_OF_PAGES * 1024 * 1024 / AutoSystemInfo::PageSize;
  10. const uint RecyclerSweep::MinPartialUncollectedNewPageCount = 4 MEGABYTES_OF_PAGES;
  11. const uint RecyclerSweep::MaxPartialCollectRescanRootBytes = 5 MEGABYTES;
  12. static const uint MinPartialCollectRescanRootBytes = 128 KILOBYTES;
  13. // Maximum unused partial collect free bytes before we get out of partial GC mode
  14. static const uint MaxUnusedPartialCollectFreeBytes = 16 MEGABYTES;
  15. // Have to collected at least 10% before we would partial GC
  16. // CONSIDER: It may be good to do partial with low efficacy once we have concurrent partial
  17. // because old object are not getting collected as well, but without concurrent partial, we will have to mark
  18. // new objects in thread.
  19. static const double MinPartialCollectEfficacy = 0.1;
  20. #endif
  21. bool
  22. RecyclerSweep::IsMemProtectMode()
  23. {
  24. return recycler->IsMemProtectMode();
  25. }
  26. #if ENABLE_PARTIAL_GC
  27. void
  28. RecyclerSweep::BeginSweep(Recycler * recycler, size_t rescanRootBytes, bool adjustPartialHeuristics)
  29. #else
  30. void
  31. RecyclerSweep::BeginSweep(Recycler * recycler)
  32. #endif
  33. {
  34. {
  35. // We are about to sweep, give the runtime a chance to see the now-immutable state of the world.
  36. // And clean up all the cache not monitor by the GC (e.g. inline caches)
  37. AUTO_NO_EXCEPTION_REGION;
  38. recycler->collectionWrapper->PreSweepCallback();
  39. }
  40. Assert(!recycler->IsSweeping());
  41. Assert(recycler->recyclerSweep == nullptr);
  42. memset(this, 0, sizeof(RecyclerSweep));
  43. this->recycler = recycler;
  44. recycler->recyclerSweep = this;
  45. // We might still have block that has disposed but not put back into the allocable
  46. // heap block list yet, which happens if we finish disposing object during concurrent
  47. // reset mark and can't
  48. // modify the heap block lists
  49. // CONCURRENT-TODO: Consider doing it during FinishDisposeObjects to get these block
  50. // available sooner as well. We will still need it here as we only always get to
  51. // finish dispose before sweep.
  52. this->FlushPendingTransferDisposedObjects();
  53. #if ENABLE_CONCURRENT_GC
  54. // Take the small heap block new heap block list and store in RecyclerSweep temporary
  55. // We get merge later before we start sweeping the bucket.
  56. leafData.pendingMergeNewHeapBlockList = recycler->autoHeap.newLeafHeapBlockList;
  57. normalData.pendingMergeNewHeapBlockList = recycler->autoHeap.newNormalHeapBlockList;
  58. #ifdef RECYCLER_WRITE_BARRIER
  59. withBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newNormalWithBarrierHeapBlockList;
  60. finalizableWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newFinalizableWithBarrierHeapBlockList;
  61. #endif
  62. finalizableData.pendingMergeNewHeapBlockList = recycler->autoHeap.newFinalizableHeapBlockList;
  63. mediumLeafData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumLeafHeapBlockList;
  64. mediumNormalData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumNormalHeapBlockList;
  65. #ifdef RECYCLER_WRITE_BARRIER
  66. mediumWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumNormalWithBarrierHeapBlockList;
  67. mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumFinalizableWithBarrierHeapBlockList;
  68. #endif
  69. mediumFinalizableData.pendingMergeNewHeapBlockList = recycler->autoHeap.newMediumFinalizableHeapBlockList;
  70. recycler->autoHeap.newLeafHeapBlockList = nullptr;
  71. recycler->autoHeap.newNormalHeapBlockList = nullptr;
  72. recycler->autoHeap.newFinalizableHeapBlockList = nullptr;
  73. #ifdef RECYCLER_WRITE_BARRIER
  74. recycler->autoHeap.newNormalWithBarrierHeapBlockList = nullptr;
  75. recycler->autoHeap.newFinalizableWithBarrierHeapBlockList = nullptr;
  76. #endif
  77. recycler->autoHeap.newMediumLeafHeapBlockList = nullptr;
  78. recycler->autoHeap.newMediumNormalHeapBlockList = nullptr;
  79. recycler->autoHeap.newMediumFinalizableHeapBlockList = nullptr;
  80. #ifdef RECYCLER_WRITE_BARRIER
  81. recycler->autoHeap.newMediumNormalWithBarrierHeapBlockList = nullptr;
  82. recycler->autoHeap.newMediumFinalizableWithBarrierHeapBlockList = nullptr;
  83. #endif
  84. #endif
  85. #if ENABLE_PARTIAL_GC
  86. Assert(recycler->clientTrackedObjectList.Empty());
  87. // We should not have partialUncollectedAllocBytes unless we are in partial collect at this point
  88. Assert(recycler->partialUncollectedAllocBytes == 0 || recycler->inPartialCollectMode);
  89. Assert(recycler->autoHeap.uncollectedAllocBytes >= recycler->partialUncollectedAllocBytes);
  90. // if the cost of rescan is too high, we want to disable partial GC starting from the
  91. // upcoming Sweep. We basically move the check up from AdjustPartialHeuristics to here
  92. // such that we can have the decision before sweep.
  93. this->rescanRootBytes = rescanRootBytes;
  94. RECYCLER_STATS_SET(recycler, rescanRootBytes, rescanRootBytes);
  95. if (this->DoPartialCollectMode())
  96. {
  97. // enable partial collect for sweep & next round of GC
  98. DebugOnly(this->partial = true);
  99. // REVIEW: is adjustPartialHeuristicsMode the same as in PartialCollectMode?
  100. this->adjustPartialHeuristics = adjustPartialHeuristics;
  101. this->StartPartialCollectMode();
  102. }
  103. else
  104. {
  105. // disable partial collect
  106. if (recycler->inPartialCollectMode)
  107. {
  108. recycler->FinishPartialCollect();
  109. }
  110. Assert(recycler->partialUncollectedAllocBytes == 0);
  111. Assert(!recycler->inPartialCollectMode);
  112. }
  113. if (this->inPartialCollect)
  114. {
  115. // We just did a partial collect.
  116. // We only want to count objects that survived this collect towards the next full GC.
  117. // Thus, clear out uncollectedAllocBytes here; we will adjust to account for objects that
  118. // survived this partial collect in EndSweep.
  119. recycler->ResetHeuristicCounters();
  120. }
  121. else
  122. #endif
  123. {
  124. Assert(!this->inPartialCollect);
  125. // We just did a full collect.
  126. // We reset uncollectedAllocBytes when we kicked off the collection,
  127. // so don't reset it here (but do reset partial heuristics).
  128. recycler->ResetPartialHeuristicCounters();
  129. }
  130. }
  131. void
  132. RecyclerSweep::FinishSweep()
  133. {
  134. #if ENABLE_PARTIAL_GC
  135. Assert(this->partial == recycler->inPartialCollectMode);
  136. // Adjust heuristics
  137. if (recycler->inPartialCollectMode)
  138. {
  139. if (this->AdjustPartialHeuristics())
  140. {
  141. GCETW(GC_SWEEP_PARTIAL_REUSE_PAGE_START, (recycler));
  142. // If we are doing a full concurrent GC, all allocated bytes are consider "collected".
  143. // We only start accumulating uncollected allocate bytes during partial GC.
  144. // FinishPartialCollect will reset it to 0 if we are not doing a partial GC
  145. recycler->partialUncollectedAllocBytes = this->InPartialCollect()? this->nextPartialUncollectedAllocBytes : 0;
  146. #ifdef RECYCLER_TRACE
  147. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  148. {
  149. Output::Print(_u("AdjustPartialHeuristics returned true\n"));
  150. Output::Print(_u(" partialUncollectedAllocBytes = %d\n"), recycler->partialUncollectedAllocBytes);
  151. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  152. }
  153. #endif
  154. recycler->autoHeap.SweepPartialReusePages(*this);
  155. GCETW(GC_SWEEP_PARTIAL_REUSE_PAGE_STOP, (recycler));
  156. #ifdef RECYCLER_WRITE_WATCH
  157. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  158. {
  159. if (!this->IsBackground())
  160. {
  161. RECYCLER_PROFILE_EXEC_BEGIN(recycler, Js::ResetWriteWatchPhase);
  162. if (!recycler->recyclerPageAllocator.ResetWriteWatch() ||
  163. !recycler->recyclerLargeBlockPageAllocator.ResetWriteWatch())
  164. {
  165. // Shouldn't happen
  166. Assert(false);
  167. recycler->enablePartialCollect = false;
  168. recycler->FinishPartialCollect(this);
  169. }
  170. RECYCLER_PROFILE_EXEC_END(recycler, Js::ResetWriteWatchPhase);
  171. }
  172. }
  173. #endif
  174. }
  175. else
  176. {
  177. #ifdef RECYCLER_TRACE
  178. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  179. {
  180. Output::Print(_u("AdjustPartialHeuristics returned false\n"));
  181. }
  182. #endif
  183. #if ENABLE_CONCURRENT_GC
  184. if (this->IsBackground())
  185. {
  186. recycler->BackgroundFinishPartialCollect(this);
  187. }
  188. else
  189. #endif
  190. {
  191. recycler->FinishPartialCollect(this);
  192. }
  193. }
  194. }
  195. else
  196. {
  197. Assert(!this->adjustPartialHeuristics);
  198. // Initial value or Sweep should have called FinishPartialCollect to these if we are not doing partial
  199. Assert(recycler->partialUncollectedAllocBytes == 0);
  200. }
  201. #if ENABLE_CONCURRENT_GC
  202. recycler->SweepPendingObjects(*this);
  203. #endif
  204. #endif
  205. }
  206. void
  207. RecyclerSweep::EndSweep()
  208. {
  209. #if ENABLE_PARTIAL_GC
  210. // We clear out the old uncollectedAllocBytes, restore it now to get the adjustment for partial
  211. // We clear it again after we are done collecting and if we are not in partial collect
  212. if (this->inPartialCollect)
  213. {
  214. recycler->autoHeap.uncollectedAllocBytes += this->nextPartialUncollectedAllocBytes;
  215. #ifdef RECYCLER_TRACE
  216. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  217. {
  218. Output::Print(_u("EndSweep for partial sweep\n"));
  219. Output::Print(_u(" uncollectedAllocBytes = %d\n"), recycler->autoHeap.uncollectedAllocBytes);
  220. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  221. }
  222. #endif
  223. }
  224. #endif
  225. recycler->recyclerSweep = nullptr;
  226. // Clean up the HeapBlockMap.
  227. // This will release any internal structures that are no longer needed after Sweep.
  228. recycler->heapBlockMap.Cleanup(!recycler->IsMemProtectMode());
  229. }
  230. #if ENABLE_CONCURRENT_GC
  231. void
  232. RecyclerSweep::BackgroundSweep()
  233. {
  234. this->BeginBackground(forceForeground);
  235. // Finish the concurrent part of the first pass
  236. this->recycler->autoHeap.SweepSmallNonFinalizable(*this);
  237. // Finish the rest of the sweep
  238. this->FinishSweep();
  239. this->EndBackground();
  240. }
  241. #endif
  242. Recycler *
  243. RecyclerSweep::GetRecycler() const
  244. {
  245. return recycler;
  246. }
  247. bool
  248. RecyclerSweep::IsBackground() const
  249. {
  250. return this->background;
  251. }
  252. bool
  253. RecyclerSweep::HasSetupBackgroundSweep() const
  254. {
  255. return this->IsBackground() || this->forceForeground;
  256. }
  257. void
  258. RecyclerSweep::FlushPendingTransferDisposedObjects()
  259. {
  260. if (recycler->hasPendingTransferDisposedObjects)
  261. {
  262. // If recycler->inResolveExternalWeakReferences is true, the recycler isn't really disposing anymore
  263. // so it's safe to call transferDisposedObjects
  264. Assert(!recycler->inDispose || recycler->inResolveExternalWeakReferences);
  265. Assert(!recycler->hasDisposableObject);
  266. recycler->autoHeap.TransferDisposedObjects();
  267. }
  268. }
  269. void
  270. RecyclerSweep::ShutdownCleanup()
  271. {
  272. // REVIEW: Does this need to be controlled more granularly, say with ENABLE_PARTIAL_GC?
  273. #if ENABLE_CONCURRENT_GC
  274. SmallLeafHeapBucketT<SmallAllocationBlockAttributes>::DeleteHeapBlockList(this->leafData.pendingMergeNewHeapBlockList, recycler);
  275. SmallNormalHeapBucket::DeleteHeapBlockList(this->normalData.pendingMergeNewHeapBlockList, recycler);
  276. #ifdef RECYCLER_WRITE_BARRIER
  277. SmallNormalWithBarrierHeapBucket::DeleteHeapBlockList(this->withBarrierData.pendingMergeNewHeapBlockList, recycler);
  278. SmallFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->finalizableWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  279. #endif
  280. SmallFinalizableHeapBucket::DeleteHeapBlockList(this->finalizableData.pendingMergeNewHeapBlockList, recycler);
  281. for (uint i = 0; i < HeapConstants::BucketCount; i++)
  282. {
  283. // For leaf, we can always reuse the page as we don't need to rescan them for partial GC
  284. // It should have been swept immediately during Sweep
  285. Assert(this->leafData.bucketData[i].pendingSweepList == nullptr);
  286. SmallNormalHeapBucket::DeleteHeapBlockList(this->normalData.bucketData[i].pendingSweepList, recycler);
  287. SmallFinalizableHeapBucket::DeleteHeapBlockList(this->finalizableData.bucketData[i].pendingSweepList, recycler);
  288. #ifdef RECYCLER_WRITE_BARRIER
  289. SmallFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->finalizableWithBarrierData.bucketData[i].pendingSweepList, recycler);
  290. #endif
  291. SmallLeafHeapBucket::DeleteEmptyHeapBlockList(this->leafData.bucketData[i].pendingEmptyBlockList);
  292. SmallNormalHeapBucket::DeleteEmptyHeapBlockList(this->normalData.bucketData[i].pendingEmptyBlockList);
  293. #ifdef RECYCLER_WRITE_BARRIER
  294. SmallNormalWithBarrierHeapBucket::DeleteEmptyHeapBlockList(this->withBarrierData.bucketData[i].pendingEmptyBlockList);
  295. Assert(this->finalizableWithBarrierData.bucketData[i].pendingEmptyBlockList == nullptr);
  296. #endif
  297. Assert(this->finalizableData.bucketData[i].pendingEmptyBlockList == nullptr);
  298. }
  299. MediumLeafHeapBucket::DeleteHeapBlockList(this->mediumLeafData.pendingMergeNewHeapBlockList, recycler);
  300. MediumNormalHeapBucket::DeleteHeapBlockList(this->mediumNormalData.pendingMergeNewHeapBlockList, recycler);
  301. #ifdef RECYCLER_WRITE_BARRIER
  302. MediumNormalWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  303. MediumFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList, recycler);
  304. #endif
  305. MediumFinalizableHeapBucket::DeleteHeapBlockList(this->mediumFinalizableData.pendingMergeNewHeapBlockList, recycler);
  306. for (uint i = 0; i < HeapConstants::MediumBucketCount; i++)
  307. {
  308. // For leaf, we can always reuse the page as we don't need to rescan them for partial GC
  309. // It should have been swept immediately during Sweep
  310. Assert(this->mediumLeafData.bucketData[i].pendingSweepList == nullptr);
  311. MediumNormalHeapBucket::DeleteHeapBlockList(this->mediumNormalData.bucketData[i].pendingSweepList, recycler);
  312. MediumFinalizableHeapBucket::DeleteHeapBlockList(this->mediumFinalizableData.bucketData[i].pendingSweepList, recycler);
  313. #ifdef RECYCLER_WRITE_BARRIER
  314. MediumFinalizableWithBarrierHeapBucket::DeleteHeapBlockList(this->mediumFinalizableWithBarrierData.bucketData[i].pendingSweepList, recycler);
  315. #endif
  316. MediumLeafHeapBucket::DeleteEmptyHeapBlockList(this->mediumLeafData.bucketData[i].pendingEmptyBlockList);
  317. MediumNormalHeapBucket::DeleteEmptyHeapBlockList(this->mediumNormalData.bucketData[i].pendingEmptyBlockList);
  318. #ifdef RECYCLER_WRITE_BARRIER
  319. MediumNormalWithBarrierHeapBucket::DeleteEmptyHeapBlockList(this->mediumWithBarrierData.bucketData[i].pendingEmptyBlockList);
  320. Assert(this->mediumFinalizableWithBarrierData.bucketData[i].pendingEmptyBlockList == nullptr);
  321. #endif
  322. Assert(this->mediumFinalizableData.bucketData[i].pendingEmptyBlockList == nullptr);
  323. }
  324. #endif
  325. }
  326. #if ENABLE_CONCURRENT_GC
  327. template <typename TBlockType>
  328. void
  329. RecyclerSweep::MergePendingNewHeapBlockList()
  330. {
  331. TBlockType *& blockList = this->GetData<TBlockType>().pendingMergeNewHeapBlockList;
  332. TBlockType * list = blockList;
  333. blockList = nullptr;
  334. HeapInfo& heapInfo = recycler->autoHeap;
  335. HeapBlockList::ForEachEditing(list, [&heapInfo](TBlockType * heapBlock)
  336. {
  337. auto& bucket = heapInfo.GetBucket<TBlockType::RequiredAttributes>(heapBlock->GetObjectSize());
  338. bucket.MergeNewHeapBlock(heapBlock);
  339. });
  340. }
  341. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallLeafHeapBlock>();
  342. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallNormalHeapBlock>();
  343. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallFinalizableHeapBlock>();
  344. #ifdef RECYCLER_WRITE_BARRIER
  345. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallNormalWithBarrierHeapBlock>();
  346. template void RecyclerSweep::MergePendingNewHeapBlockList<SmallFinalizableWithBarrierHeapBlock>();
  347. #endif
  348. template <typename TBlockType>
  349. void
  350. RecyclerSweep::MergePendingNewMediumHeapBlockList()
  351. {
  352. TBlockType *& blockList = this->GetData<TBlockType>().pendingMergeNewHeapBlockList;
  353. TBlockType * list = blockList;
  354. blockList = nullptr;
  355. HeapInfo& heapInfo = recycler->autoHeap;
  356. HeapBlockList::ForEachEditing(list, [&heapInfo](TBlockType * heapBlock)
  357. {
  358. auto& bucket = heapInfo.GetMediumBucket<TBlockType::RequiredAttributes>(heapBlock->GetObjectSize());
  359. bucket.MergeNewHeapBlock(heapBlock);
  360. });
  361. }
  362. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumLeafHeapBlock>();
  363. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumNormalHeapBlock>();
  364. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumFinalizableHeapBlock>();
  365. #ifdef RECYCLER_WRITE_BARRIER
  366. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumNormalWithBarrierHeapBlock>();
  367. template void RecyclerSweep::MergePendingNewMediumHeapBlockList<MediumFinalizableWithBarrierHeapBlock>();
  368. #endif
  369. bool
  370. RecyclerSweep::HasPendingEmptyBlocks() const
  371. {
  372. return this->hasPendingEmptyBlocks;
  373. }
  374. bool
  375. RecyclerSweep::HasPendingSweepSmallHeapBlocks() const
  376. {
  377. return this->hasPendingSweepSmallHeapBlocks;
  378. }
  379. void
  380. RecyclerSweep::SetHasPendingSweepSmallHeapBlocks()
  381. {
  382. this->hasPendingSweepSmallHeapBlocks = true;
  383. }
  384. void
  385. RecyclerSweep::BeginBackground(bool forceForeground)
  386. {
  387. Assert(!background);
  388. this->background = !forceForeground;
  389. this->forceForeground = forceForeground;
  390. }
  391. void
  392. RecyclerSweep::EndBackground()
  393. {
  394. Assert(this->background || this->forceForeground);
  395. this->background = false;
  396. }
  397. #if DBG
  398. bool
  399. RecyclerSweep::HasPendingNewHeapBlocks() const
  400. {
  401. return leafData.pendingMergeNewHeapBlockList != nullptr
  402. || normalData.pendingMergeNewHeapBlockList != nullptr
  403. || finalizableData.pendingMergeNewHeapBlockList != nullptr
  404. #ifdef RECYCLER_WRITE_BARRIER
  405. || withBarrierData.pendingMergeNewHeapBlockList != nullptr
  406. || finalizableWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  407. #endif
  408. || mediumLeafData.pendingMergeNewHeapBlockList != nullptr
  409. || mediumNormalData.pendingMergeNewHeapBlockList != nullptr
  410. || mediumFinalizableData.pendingMergeNewHeapBlockList != nullptr
  411. #ifdef RECYCLER_WRITE_BARRIER
  412. || mediumWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  413. || mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList != nullptr
  414. #endif
  415. ;
  416. }
  417. #endif
  418. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  419. size_t
  420. RecyclerSweep::SetPendingMergeNewHeapBlockCount()
  421. {
  422. return HeapBlockList::Count(leafData.pendingMergeNewHeapBlockList)
  423. + HeapBlockList::Count(normalData.pendingMergeNewHeapBlockList)
  424. + HeapBlockList::Count(finalizableData.pendingMergeNewHeapBlockList)
  425. #ifdef RECYCLER_WRITE_BARRIER
  426. + HeapBlockList::Count(withBarrierData.pendingMergeNewHeapBlockList)
  427. + HeapBlockList::Count(finalizableWithBarrierData.pendingMergeNewHeapBlockList)
  428. #endif
  429. + HeapBlockList::Count(mediumLeafData.pendingMergeNewHeapBlockList)
  430. + HeapBlockList::Count(mediumNormalData.pendingMergeNewHeapBlockList)
  431. + HeapBlockList::Count(mediumFinalizableData.pendingMergeNewHeapBlockList)
  432. #ifdef RECYCLER_WRITE_BARRIER
  433. + HeapBlockList::Count(mediumWithBarrierData.pendingMergeNewHeapBlockList)
  434. + HeapBlockList::Count(mediumFinalizableWithBarrierData.pendingMergeNewHeapBlockList)
  435. #endif
  436. ;
  437. }
  438. #endif
  439. #endif
  440. #if ENABLE_PARTIAL_GC
  441. bool
  442. RecyclerSweep::InPartialCollectMode() const
  443. {
  444. return recycler->inPartialCollectMode;
  445. }
  446. bool
  447. RecyclerSweep::InPartialCollect() const
  448. {
  449. return this->inPartialCollect;
  450. }
  451. void
  452. RecyclerSweep::StartPartialCollectMode()
  453. {
  454. // Save the in partial collect, the main thread reset it after returning to the script
  455. // and the background thread still needs it
  456. this->inPartialCollect = recycler->inPartialCollectMode;
  457. recycler->inPartialCollectMode = true;
  458. // Tracks the unallocated alloc bytes for partial GC
  459. // Keep a copy Last collection's uncollected allocation bytes, so we can use it to calculate
  460. // the new object that is allocated since the last GC
  461. Assert(recycler->partialUncollectedAllocBytes == 0 || this->inPartialCollect);
  462. this->lastPartialUncollectedAllocBytes = recycler->partialUncollectedAllocBytes;
  463. size_t currentUncollectedAllocBytes = recycler->autoHeap.uncollectedAllocBytes;
  464. Assert(currentUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes);
  465. if (!this->inPartialCollect)
  466. {
  467. // If we did a full collect, then we need to include lastUncollectedAllocBytes
  468. // in the partialUncollectedAllocBytes calculation, because all objects allocated
  469. // since the previous GC are considered new, but we cleared uncollectedAllocBytes
  470. // when we kicked off the GC.
  471. currentUncollectedAllocBytes += recycler->autoHeap.lastUncollectedAllocBytes;
  472. }
  473. // Initially, the partial uncollected alloc bytes is the current uncollectedAllocBytes
  474. recycler->partialUncollectedAllocBytes = currentUncollectedAllocBytes;
  475. this->nextPartialUncollectedAllocBytes = currentUncollectedAllocBytes;
  476. #ifdef RECYCLER_TRACE
  477. if (recycler->GetRecyclerFlagsTable().Trace.IsEnabled(Js::PartialCollectPhase))
  478. {
  479. Output::Print(_u("StartPartialCollectMode\n"));
  480. Output::Print(_u(" was inPartialCollectMode = %d\n"), this->inPartialCollect);
  481. Output::Print(_u(" lastPartialUncollectedAllocBytes = %d\n"), this->lastPartialUncollectedAllocBytes);
  482. Output::Print(_u(" uncollectedAllocBytes = %d\n"), recycler->autoHeap.uncollectedAllocBytes);
  483. Output::Print(_u(" nextPartialUncollectedAllocBytes = %d\n"), this->nextPartialUncollectedAllocBytes);
  484. }
  485. #endif
  486. }
  487. #endif
  488. // Called by prepare sweep to track the new allocated bytes on block that is not fully allocated yet.
  489. template <typename TBlockAttributes>
  490. void
  491. RecyclerSweep::AddUnaccountedNewObjectAllocBytes(SmallHeapBlockT<TBlockAttributes> * heapBlock)
  492. {
  493. #if ENABLE_PARTIAL_GC
  494. // Only need to update the unaccounted alloc bytes if we are in partial collect mode
  495. if (recycler->inPartialCollectMode)
  496. {
  497. uint unaccountedAllocBytes = heapBlock->GetAndClearUnaccountedAllocBytes();
  498. Assert(heapBlock->lastUncollectedAllocBytes == 0 || unaccountedAllocBytes == 0);
  499. DebugOnly(heapBlock->lastUncollectedAllocBytes += unaccountedAllocBytes);
  500. recycler->partialUncollectedAllocBytes += unaccountedAllocBytes;
  501. this->nextPartialUncollectedAllocBytes += unaccountedAllocBytes;
  502. }
  503. else
  504. #endif
  505. {
  506. // We don't care, clear the unaccounted to start tracking for new object for next GC
  507. heapBlock->ClearAllAllocBytes();
  508. }
  509. }
  510. template void RecyclerSweep::AddUnaccountedNewObjectAllocBytes<SmallAllocationBlockAttributes>(SmallHeapBlock * heapBlock);
  511. template void RecyclerSweep::AddUnaccountedNewObjectAllocBytes<MediumAllocationBlockAttributes>(MediumHeapBlock * heapBlock);
  512. #if ENABLE_PARTIAL_GC
  513. void
  514. RecyclerSweep::SubtractSweepNewObjectAllocBytes(size_t newObjectExpectSweepByteCount)
  515. {
  516. Assert(recycler->inPartialCollectMode);
  517. // We shouldn't free more then we allocated
  518. Assert(this->nextPartialUncollectedAllocBytes >= newObjectExpectSweepByteCount);
  519. Assert(this->nextPartialUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes + newObjectExpectSweepByteCount);
  520. this->nextPartialUncollectedAllocBytes -= newObjectExpectSweepByteCount;
  521. }
  522. /*--------------------------------------------------------------------------------------------
  523. * Determine we want to go into partial collect mode for the next GC before we sweep,
  524. * based on the number bytes needed to rescan (<= 5MB)
  525. *--------------------------------------------------------------------------------------------*/
  526. bool
  527. RecyclerSweep::DoPartialCollectMode()
  528. {
  529. if (!recycler->enablePartialCollect)
  530. {
  531. return false;
  532. }
  533. // If we exceed 16MB of unused memory in partial blocks, get out of partial collect to avoid
  534. // memory fragmentation.
  535. if (recycler->autoHeap.unusedPartialCollectFreeBytes > MaxUnusedPartialCollectFreeBytes)
  536. {
  537. return false;
  538. }
  539. return this->rescanRootBytes <= MaxPartialCollectRescanRootBytes;
  540. }
  541. // Heuristic ratio is ((c * e + (1 - e)) * (1 - p)) + p and use that to linearly scale between min and max
  542. // This give cost/efficacy/pressure equal weight, while each can push it pass where partial GC is not
  543. // beneficial
  544. bool
  545. RecyclerSweep::AdjustPartialHeuristics()
  546. {
  547. Assert(recycler->inPartialCollectMode);
  548. Assert(this->adjustPartialHeuristics);
  549. Assert(this->InPartialCollect() || recycler->autoHeap.unusedPartialCollectFreeBytes == 0);
  550. // DoPartialCollectMode should have rejected these already
  551. Assert(this->rescanRootBytes <= (size_t)MaxPartialCollectRescanRootBytes);
  552. Assert(recycler->autoHeap.unusedPartialCollectFreeBytes <= MaxUnusedPartialCollectFreeBytes);
  553. // Page reuse Heuristics
  554. double collectEfficacy;
  555. const size_t allocBytes = this->GetNewObjectAllocBytes();
  556. if (allocBytes == 0)
  557. {
  558. // We may get collections without allocating memory (e.g. unpin heuristics).
  559. collectEfficacy = 1.0; // assume 100% efficacy
  560. this->partialCollectSmallHeapBlockReuseMinFreeBytes = 0; // reuse all pages
  561. }
  562. else
  563. {
  564. const size_t freedBytes = this->GetNewObjectFreeBytes();
  565. Assert(freedBytes <= allocBytes);
  566. collectEfficacy = (double)freedBytes / (double)allocBytes;
  567. // If we collected less then 10% of the memory, let's not do partial GC.
  568. // CONSIDER: It may be good to do partial with low efficacy once we have concurrent partial
  569. // because old object are not getting collected as well, but without concurrent partial, we will have to mark
  570. // new objects in thread.
  571. if (collectEfficacy < MinPartialCollectEfficacy)
  572. {
  573. return false;
  574. }
  575. // Scale the efficacy linearly such that an efficacy of MinPartialCollectEfficacy translates to an adjusted efficacy of
  576. // 0.0, and an efficacy of 1.0 translates to an adjusted efficacy of 1.0
  577. collectEfficacy = (collectEfficacy - MinPartialCollectEfficacy) / (1.0 - MinPartialCollectEfficacy);
  578. Assert(collectEfficacy <= 1.0);
  579. this->partialCollectSmallHeapBlockReuseMinFreeBytes = (size_t)(AutoSystemInfo::PageSize * collectEfficacy);
  580. }
  581. #ifdef RECYCLER_STATS
  582. recycler->collectionStats.collectEfficacy = collectEfficacy;
  583. recycler->collectionStats.partialCollectSmallHeapBlockReuseMinFreeBytes = this->partialCollectSmallHeapBlockReuseMinFreeBytes;
  584. #endif
  585. // Blocks which are being reused are likely to be touched again from allocation and contribute to Rescan cost.
  586. // If there are many of these, adjust rescanRootBytes to account for this.
  587. const size_t estimatedPartialReuseBlocks = (size_t)((double)this->reuseHeapBlockCount * (1.0 - collectEfficacy));
  588. const size_t estimatedPartialReuseBytes = estimatedPartialReuseBlocks * AutoSystemInfo::PageSize;
  589. const size_t newRescanRootBytes = max(this->rescanRootBytes, estimatedPartialReuseBytes);
  590. RECYCLER_STATS_SET(recycler, estimatedPartialReuseBytes, estimatedPartialReuseBytes);
  591. // Recheck the rescanRootBytes
  592. if (newRescanRootBytes > MaxPartialCollectRescanRootBytes)
  593. {
  594. return false;
  595. }
  596. double collectCost = (double)newRescanRootBytes / MaxPartialCollectRescanRootBytes;
  597. RECYCLER_STATS_SET(recycler, collectCost, collectCost);
  598. // Include the efficacy in equal portion, which is related to the cost of marking through new objects.
  599. // r = c * e + 1 - e;
  600. const double reuseRatio = 1.0 - collectEfficacy;
  601. double ratio = collectCost * collectEfficacy + reuseRatio;
  602. if (this->InPartialCollect())
  603. {
  604. // Avoid ratio of uncollectedBytesPressure > 1.0
  605. if (this->nextPartialUncollectedAllocBytes > RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect)
  606. {
  607. return false;
  608. }
  609. // Only add full collect pressure if we are doing partial collect,
  610. // account for the amount of uncollected bytes and unused bytes to increase
  611. // pressure to do a full GC by rising the partial GC new page heuristic
  612. double uncollectedBytesPressure = (double)this->nextPartialUncollectedAllocBytes / (double)RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect;
  613. double collectFullCollectPressure =
  614. (double)recycler->autoHeap.unusedPartialCollectFreeBytes / (double)MaxUnusedPartialCollectFreeBytes
  615. * (1.0 - uncollectedBytesPressure) + uncollectedBytesPressure;
  616. ratio = ratio * (1.0 - collectFullCollectPressure) + collectFullCollectPressure;
  617. }
  618. Assert(0.0 <= ratio && ratio <= 1.0);
  619. // Linear scale the partial GC new page heuristic using the ratio calculated
  620. recycler->uncollectedNewPageCountPartialCollect = MinPartialUncollectedNewPageCount
  621. + (size_t)((double)(RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount - MinPartialUncollectedNewPageCount) * ratio);
  622. Assert(recycler->uncollectedNewPageCountPartialCollect >= MinPartialUncollectedNewPageCount &&
  623. recycler->uncollectedNewPageCountPartialCollect <= RecyclerHeuristic::Instance.MaxPartialUncollectedNewPageCount);
  624. // If the number of new page to reach the partial heuristics plus the existing uncollectedAllocBytes
  625. // and the memory we are going to reuse (assume we use it all) is greater then the full GC max size heuristic
  626. // (with 1M fudge factor), we trigger a full GC anyways, so let's not get into partial GC
  627. const size_t estimatedPartialReusedFreeByteCount = (size_t)((double)this->reuseByteCount * reuseRatio);
  628. if (recycler->uncollectedNewPageCountPartialCollect * AutoSystemInfo::PageSize
  629. + this->nextPartialUncollectedAllocBytes + estimatedPartialReusedFreeByteCount >= RecyclerHeuristic::Instance.MaxUncollectedAllocBytesPartialCollect)
  630. {
  631. return false;
  632. }
  633. #if ENABLE_CONCURRENT_GC
  634. recycler->partialConcurrentNextCollection = RecyclerHeuristic::PartialConcurrentNextCollection(ratio, recycler->GetRecyclerFlagsTable());
  635. #endif
  636. return true;
  637. }
  638. size_t
  639. RecyclerSweep::GetNewObjectAllocBytes() const
  640. {
  641. Assert(recycler->inPartialCollectMode);
  642. Assert(recycler->partialUncollectedAllocBytes >= this->lastPartialUncollectedAllocBytes);
  643. return recycler->partialUncollectedAllocBytes - this->lastPartialUncollectedAllocBytes;
  644. }
  645. size_t
  646. RecyclerSweep::GetNewObjectFreeBytes() const
  647. {
  648. Assert(recycler->inPartialCollectMode);
  649. Assert(recycler->partialUncollectedAllocBytes >= this->nextPartialUncollectedAllocBytes);
  650. return recycler->partialUncollectedAllocBytes - this->nextPartialUncollectedAllocBytes;
  651. }
  652. size_t
  653. RecyclerSweep::GetPartialUnusedFreeByteCount() const
  654. {
  655. return partialUnusedFreeByteCount;
  656. }
  657. size_t
  658. RecyclerSweep::GetPartialCollectSmallHeapBlockReuseMinFreeBytes() const
  659. {
  660. return partialCollectSmallHeapBlockReuseMinFreeBytes;
  661. }
  662. template <typename TBlockAttributes>
  663. void
  664. RecyclerSweep::NotifyAllocableObjects(SmallHeapBlockT<TBlockAttributes> * heapBlock)
  665. {
  666. this->reuseByteCount += heapBlock->GetExpectedFreeBytes();
  667. if (!heapBlock->IsLeafBlock())
  668. {
  669. this->reuseHeapBlockCount++;
  670. }
  671. }
  672. template void RecyclerSweep::NotifyAllocableObjects<SmallAllocationBlockAttributes>(SmallHeapBlock* heapBlock);
  673. template void RecyclerSweep::NotifyAllocableObjects<MediumAllocationBlockAttributes>(MediumHeapBlock* heapBlock);
  674. void
  675. RecyclerSweep::AddUnusedFreeByteCount(uint expectFreeByteCount)
  676. {
  677. this->partialUnusedFreeByteCount += expectFreeByteCount;
  678. }
  679. bool
  680. RecyclerSweep::DoAdjustPartialHeuristics() const
  681. {
  682. return this->adjustPartialHeuristics;
  683. }
  684. #endif