HeapBucket.cpp 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. HeapBucket::HeapBucket() :
  7. heapInfo(nullptr),
  8. sizeCat(0)
  9. {
  10. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  11. heapBlockCount = 0;
  12. newHeapBlockCount = 0;
  13. #endif
  14. #if defined(RECYCLER_SLOW_CHECK_ENABLED)
  15. emptyHeapBlockCount = 0;
  16. #endif
  17. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  18. this->allocationsStartedDuringConcurrentSweep = false;
  19. this->concurrentSweepAllocationsThresholdExceeded = false;
  20. #endif
  21. #ifdef RECYCLER_PAGE_HEAP
  22. isPageHeapEnabled = false;
  23. #endif
  24. }
  25. HeapInfo *
  26. HeapBucket::GetHeapInfo() const
  27. {
  28. return this->heapInfo;
  29. }
  30. uint
  31. HeapBucket::GetSizeCat() const
  32. {
  33. return this->sizeCat;
  34. }
  35. uint
  36. HeapBucket::GetBucketIndex() const
  37. {
  38. return HeapInfo::GetBucketIndex(this->sizeCat);
  39. }
  40. uint
  41. HeapBucket::GetMediumBucketIndex() const
  42. {
  43. return HeapInfo::GetMediumBucketIndex(this->sizeCat);
  44. }
  45. namespace Memory
  46. {
  47. template <typename TBlockType>
  48. HeapBucketT<TBlockType>::HeapBucketT() :
  49. nextAllocableBlockHead(nullptr),
  50. emptyBlockList(nullptr),
  51. fullBlockList(nullptr),
  52. heapBlockList(nullptr),
  53. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  54. #if SUPPORT_WIN32_SLIST
  55. lastKnownNextAllocableBlockHead(nullptr),
  56. allocableHeapBlockListHead(nullptr),
  57. sweepableHeapBlockList(nullptr),
  58. #endif
  59. #endif
  60. explicitFreeList(nullptr),
  61. lastExplicitFreeListAllocator(nullptr)
  62. {
  63. #ifdef RECYCLER_PAGE_HEAP
  64. explicitFreeLockBlockList = nullptr;
  65. #endif
  66. isAllocationStopped = false;
  67. }
  68. template <typename TBlockType>
  69. HeapBucketT<TBlockType>::~HeapBucketT()
  70. {
  71. DeleteHeapBlockList(this->heapBlockList);
  72. DeleteHeapBlockList(this->fullBlockList);
  73. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  74. #if SUPPORT_WIN32_SLIST
  75. if (allocableHeapBlockListHead != nullptr)
  76. {
  77. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  78. {
  79. FlushInterlockedSList(this->allocableHeapBlockListHead);
  80. }
  81. _aligned_free(this->allocableHeapBlockListHead);
  82. }
  83. DeleteHeapBlockList(this->sweepableHeapBlockList);
  84. #endif
  85. #endif
  86. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  87. Assert(this->heapBlockCount + this->newHeapBlockCount == 0);
  88. #endif
  89. RECYCLER_SLOW_CHECK(Assert(this->emptyHeapBlockCount == HeapBlockList::Count(this->emptyBlockList)));
  90. DeleteEmptyHeapBlockList(this->emptyBlockList);
  91. #if defined(RECYCLER_SLOW_CHECK_ENABLED)
  92. Assert(this->heapBlockCount + this->newHeapBlockCount + this->emptyHeapBlockCount == 0);
  93. #endif
  94. }
  95. };
  96. template <typename TBlockType>
  97. void
  98. HeapBucketT<TBlockType>::DeleteHeapBlockList(TBlockType * list, Recycler * recycler)
  99. {
  100. HeapBlockList::ForEachEditing(list, [recycler](TBlockType * heapBlock)
  101. {
  102. #if DBG
  103. heapBlock->ReleasePagesShutdown(recycler);
  104. #endif
  105. TBlockType::Delete(heapBlock);
  106. });
  107. }
  108. template <typename TBlockType>
  109. void
  110. HeapBucketT<TBlockType>::DeleteEmptyHeapBlockList(TBlockType * list)
  111. {
  112. HeapBlockList::ForEachEditing(list, [](TBlockType * heapBlock)
  113. {
  114. TBlockType::Delete(heapBlock);
  115. });
  116. }
  117. template <typename TBlockType>
  118. void
  119. HeapBucketT<TBlockType>::DeleteHeapBlockList(TBlockType * list)
  120. {
  121. DeleteHeapBlockList(list, this->heapInfo->recycler);
  122. }
  123. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  124. template<typename TBlockType>
  125. bool
  126. HeapBucketT<TBlockType>::PushHeapBlockToSList(PSLIST_HEADER list, TBlockType * heapBlock)
  127. {
  128. Assert(list != nullptr);
  129. HeapBlockSListItem<TBlockType> * currentBlock = (HeapBlockSListItem<TBlockType> *) _aligned_malloc(sizeof(HeapBlockSListItem<TBlockType>), MEMORY_ALLOCATION_ALIGNMENT);
  130. if (currentBlock == nullptr)
  131. {
  132. return false;
  133. }
  134. // While in the SLIST the blocks live as standalone, when they come out they
  135. // will go into appropriate list and the Next block will be set accordingly.
  136. heapBlock->SetNextBlock(nullptr);
  137. currentBlock->itemHeapBlock = heapBlock;
  138. ::InterlockedPushEntrySList(list, &(currentBlock->itemEntry));
  139. return true;
  140. }
  141. template<typename TBlockType>
  142. TBlockType *
  143. HeapBucketT<TBlockType>::PopHeapBlockFromSList(PSLIST_HEADER list)
  144. {
  145. Assert(list != nullptr);
  146. TBlockType * heapBlock = nullptr;
  147. PSLIST_ENTRY top = ::InterlockedPopEntrySList(list);
  148. if (top != nullptr)
  149. {
  150. HeapBlockSListItem<TBlockType> * topItem = (HeapBlockSListItem<TBlockType> *) top;
  151. heapBlock = topItem->itemHeapBlock;
  152. Assert(heapBlock != nullptr);
  153. _aligned_free(top);
  154. }
  155. return heapBlock;
  156. }
  157. template<typename TBlockType>
  158. ushort
  159. HeapBucketT<TBlockType>::QueryDepthInterlockedSList(PSLIST_HEADER list)
  160. {
  161. Assert(list != nullptr);
  162. return ::QueryDepthSList(list);
  163. }
  164. template<typename TBlockType>
  165. void
  166. HeapBucketT<TBlockType>::FlushInterlockedSList(PSLIST_HEADER list)
  167. {
  168. Assert(list != nullptr);
  169. if (::QueryDepthSList(list) > 0)
  170. {
  171. PSLIST_ENTRY listEntry = ::InterlockedPopEntrySList(list);
  172. while (listEntry != nullptr)
  173. {
  174. _aligned_free(listEntry);
  175. listEntry = ::InterlockedPopEntrySList(list);
  176. }
  177. }
  178. ::InterlockedFlushSList(list);
  179. }
  180. #endif
  181. template <typename TBlockType>
  182. void
  183. HeapBucketT<TBlockType>::Initialize(HeapInfo * heapInfo, uint sizeCat)
  184. {
  185. this->heapInfo = heapInfo;
  186. #ifdef RECYCLER_PAGE_HEAP
  187. this->isPageHeapEnabled = heapInfo->IsPageHeapEnabledForBlock<typename TBlockType::HeapBlockAttributes>(sizeCat);
  188. #endif
  189. this->sizeCat = sizeCat;
  190. allocatorHead.Initialize();
  191. #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY)
  192. allocatorHead.bucket = this;
  193. #endif
  194. this->lastExplicitFreeListAllocator = &allocatorHead;
  195. }
  196. template <typename TBlockType>
  197. template <class Fn>
  198. void
  199. HeapBucketT<TBlockType>::ForEachAllocator(Fn fn)
  200. {
  201. TBlockAllocatorType * current = &allocatorHead;
  202. do
  203. {
  204. fn(current);
  205. current = current->GetNext();
  206. }
  207. while (current != &allocatorHead);
  208. }
  209. template <typename TBlockType>
  210. void
  211. HeapBucketT<TBlockType>::UpdateAllocators()
  212. {
  213. ForEachAllocator([](TBlockAllocatorType * allocator) { allocator->UpdateHeapBlock(); });
  214. }
  215. template <typename TBlockType>
  216. void
  217. HeapBucketT<TBlockType>::ClearAllocators()
  218. {
  219. ForEachAllocator([](TBlockAllocatorType * allocator) { ClearAllocator(allocator); });
  220. #ifdef RECYCLER_PAGE_HEAP
  221. #endif
  222. #ifdef RECYCLER_MEMORY_VERIFY
  223. FreeObject* freeObject = this->explicitFreeList;
  224. while (freeObject)
  225. {
  226. HeapBlock* heapBlock = this->GetRecycler()->FindHeapBlock((void*)freeObject);
  227. Assert(heapBlock != nullptr);
  228. Assert(!heapBlock->IsLargeHeapBlock());
  229. TBlockType* smallBlock = (TBlockType*)heapBlock;
  230. smallBlock->ClearExplicitFreeBitForObject((void*)freeObject);
  231. freeObject = freeObject->GetNext();
  232. }
  233. #endif
  234. this->explicitFreeList = nullptr;
  235. }
  236. #if ENABLE_CONCURRENT_GC
  237. template <typename TBlockType>
  238. void
  239. HeapBucketT<TBlockType>::PrepareSweep()
  240. {
  241. // CONCURRENT-TODO: Technically, We don't really need to invalidate allocators here,
  242. // but currently invalidating may update the unallocateCount which is
  243. // used to calculate the partial heuristics, so it needs to be done
  244. // before sweep. When the partial heuristic changes, we can remove this
  245. // (And remove rescan from leaf bucket, so this function doesn't need to exist)
  246. ClearAllocators();
  247. }
  248. #endif
  249. template <typename TBlockType>
  250. void
  251. HeapBucketT<TBlockType>::AddAllocator(TBlockAllocatorType * allocator)
  252. {
  253. Assert(allocator != &this->allocatorHead);
  254. allocator->Initialize();
  255. allocator->next = this->allocatorHead.next;
  256. allocator->prev = &this->allocatorHead;
  257. allocator->next->prev = allocator;
  258. this->allocatorHead.next = allocator;
  259. #if defined(PROFILE_RECYCLER_ALLOC) || defined(RECYCLER_MEMORY_VERIFY)
  260. allocator->bucket = this;
  261. #endif
  262. }
  263. template <typename TBlockType>
  264. void
  265. HeapBucketT<TBlockType>::RemoveAllocator(TBlockAllocatorType * allocator)
  266. {
  267. Assert(allocator != &this->allocatorHead);
  268. ClearAllocator(allocator);
  269. allocator->next->prev = allocator->prev;
  270. allocator->prev->next = allocator->next;
  271. if (allocator == this->lastExplicitFreeListAllocator)
  272. {
  273. this->lastExplicitFreeListAllocator = &allocatorHead;
  274. }
  275. }
  276. template <typename TBlockType>
  277. void
  278. HeapBucketT<TBlockType>::ClearAllocator(TBlockAllocatorType * allocator)
  279. {
  280. allocator->Clear();
  281. }
  282. template <typename TBlockType>
  283. bool
  284. HeapBucketT<TBlockType>::IntegrateBlock(char * blockAddress, PageSegment * segment, Recycler * recycler)
  285. {
  286. // Add a new heap block
  287. TBlockType * heapBlock = GetUnusedHeapBlock();
  288. if (heapBlock == nullptr)
  289. {
  290. return false;
  291. }
  292. // TODO: Consider supporting guard pages for this codepath
  293. if (!heapBlock->SetPage(blockAddress, segment, recycler))
  294. {
  295. FreeHeapBlock(heapBlock);
  296. return false;
  297. }
  298. heapBlock->SetNextBlock(this->fullBlockList);
  299. this->fullBlockList = heapBlock;
  300. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  301. this->heapBlockCount++;
  302. #endif
  303. recycler->autoHeap.uncollectedAllocBytes += heapBlock->GetAndClearLastFreeCount() * heapBlock->GetObjectSize();
  304. RecyclerMemoryTracking::ReportAllocation(recycler, blockAddress, heapBlock->GetObjectSize() * heapBlock->GetObjectCount());
  305. RECYCLER_PERF_COUNTER_ADD(LiveObject,heapBlock->GetObjectCount());
  306. RECYCLER_PERF_COUNTER_ADD(LiveObjectSize, heapBlock->GetObjectSize() * heapBlock->GetObjectCount());
  307. if (heapBlock->IsLargeHeapBlock())
  308. {
  309. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockLiveObject,heapBlock->GetObjectCount());
  310. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockLiveObjectSize, heapBlock->GetObjectSize() * heapBlock->GetObjectCount());
  311. }
  312. else
  313. {
  314. RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObject,heapBlock->GetObjectCount());
  315. RECYCLER_PERF_COUNTER_ADD(SmallHeapBlockLiveObjectSize, heapBlock->GetObjectSize() * heapBlock->GetObjectCount());
  316. }
  317. #if DBG
  318. heapBlock->SetIsIntegratedBlock();
  319. #endif
  320. return true;
  321. }
  322. #if DBG
  323. template <typename TBlockType>
  324. bool
  325. HeapBucketT<TBlockType>::AllocatorsAreEmpty() const
  326. {
  327. TBlockAllocatorType const * current = &allocatorHead;
  328. do
  329. {
  330. if (current->GetHeapBlock() != nullptr || current->GetExplicitFreeList() != nullptr)
  331. {
  332. return false;
  333. }
  334. current = current->GetNext();
  335. }
  336. while (current != &allocatorHead);
  337. return true;
  338. }
  339. template <typename TBlockType>
  340. bool
  341. HeapBucketT<TBlockType>::HasPendingDisposeHeapBlocks() const
  342. {
  343. #ifdef RECYCLER_WRITE_BARRIER
  344. return (IsFinalizableBucket || IsFinalizableWriteBarrierBucket) &&
  345. ((SmallFinalizableHeapBucketT<typename TBlockType::HeapBlockAttributes> *)this)->pendingDisposeList != nullptr;
  346. #else
  347. return IsFinalizableBucket && ((SmallFinalizableHeapBucketT<typename TBlockType::HeapBlockAttributes> *)this)->pendingDisposeList != nullptr;
  348. #endif
  349. }
  350. #endif
  351. template <typename TBlockType>
  352. void
  353. HeapBucketT<TBlockType>::AssertCheckHeapBlockNotInAnyList(TBlockType * heapBlock)
  354. {
  355. #if DBG
  356. AssertMsg(!HeapBlockList::Contains(heapBlock, heapBlockList), "The heap block already exists in the heapBlockList.");
  357. AssertMsg(!HeapBlockList::Contains(heapBlock, fullBlockList), "The heap block already exists in the fullBlockList.");
  358. AssertMsg(!HeapBlockList::Contains(heapBlock, emptyBlockList), "The heap block already exists in the emptyBlockList.");
  359. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  360. AssertMsg(!HeapBlockList::Contains(heapBlock, sweepableHeapBlockList), "The heap block already exists in the sweepableHeapBlockList.");
  361. #endif
  362. #endif
  363. }
  364. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  365. template <typename TBlockType>
  366. size_t
  367. HeapBucketT<TBlockType>::GetNonEmptyHeapBlockCount(bool checkCount) const
  368. {
  369. size_t currentHeapBlockCount = HeapBlockList::Count(fullBlockList);
  370. currentHeapBlockCount += HeapBlockList::Count(heapBlockList);
  371. bool allocatingDuringConcurrentSweep = false;
  372. #if ENABLE_CONCURRENT_GC
  373. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  374. #if SUPPORT_WIN32_SLIST
  375. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  376. {
  377. allocatingDuringConcurrentSweep = true;
  378. // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
  379. // Items are added to it by the allocator when allocations are allowed during concurrent sweep. The list is drained during the next sweep while
  380. // allocation are stopped.
  381. debugSweepableHeapBlockListLock.Enter();
  382. if (allocableHeapBlockListHead != nullptr)
  383. {
  384. currentHeapBlockCount += QueryDepthInterlockedSList(allocableHeapBlockListHead);
  385. }
  386. currentHeapBlockCount += HeapBlockList::Count(sweepableHeapBlockList);
  387. debugSweepableHeapBlockListLock.Leave();
  388. }
  389. #endif
  390. #endif
  391. // Recycler can be null if we have OOM in the ctor
  392. if (this->GetRecycler() && this->GetRecycler()->recyclerSweepManager != nullptr)
  393. {
  394. currentHeapBlockCount += this->GetRecycler()->recyclerSweepManager->GetHeapBlockCount(this);
  395. }
  396. #endif
  397. // There is no way to determine the number of item in an SLIST if there are >= 65535 items in the list.
  398. RECYCLER_SLOW_CHECK(Assert(!checkCount || heapBlockCount == currentHeapBlockCount || (heapBlockCount >= 65535 && allocatingDuringConcurrentSweep)));
  399. return currentHeapBlockCount;
  400. }
  401. template <typename TBlockType>
  402. size_t
  403. HeapBucketT<TBlockType>::GetEmptyHeapBlockCount() const
  404. {
  405. size_t count = HeapBlockList::Count(this->emptyBlockList);
  406. RECYCLER_SLOW_CHECK(Assert(count == this->emptyHeapBlockCount));
  407. return count;
  408. }
  409. #endif
  410. template <typename TBlockType>
  411. char *
  412. HeapBucketT<TBlockType>::TryAlloc(Recycler * recycler, TBlockAllocatorType * allocator, size_t sizeCat, ObjectInfoBits attributes)
  413. {
  414. AUTO_NO_EXCEPTION_REGION;
  415. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  416. ClearAllocator(allocator);
  417. TBlockType * heapBlock = this->nextAllocableBlockHead;
  418. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  419. #if SUPPORT_WIN32_SLIST
  420. bool heapBlockFromAllocableHeapBlockList = false;
  421. DebugOnly(bool heapBlockInPendingSweepPrepList = false);
  422. if (heapBlock == nullptr && this->allocationsStartedDuringConcurrentSweep)
  423. {
  424. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  425. // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
  426. // Items are added to it by the allocator when allocations are allowed during concurrent sweep. The list is drained during the next sweep while
  427. // allocation are stopped.
  428. debugSweepableHeapBlockListLock.Enter();
  429. #endif
  430. heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
  431. if (heapBlock != nullptr)
  432. {
  433. Assert(!this->IsAnyFinalizableBucket());
  434. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  435. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  436. heapBlock->wasAllocatedFromDuringSweep = true;
  437. #endif
  438. #if DBG || defined(RECYCLER_TRACE)
  439. if (heapBlock->isPendingConcurrentSweepPrep)
  440. {
  441. AssertMsg(heapBlock->objectsAllocatedDuringConcurrentSweepCount == 0, "We just picked up this block for allocations during concurrent sweep, we haven't allocated from it yet.");
  442. #ifdef RECYCLER_TRACE
  443. recycler->PrintBlockStatus(this, heapBlock, _u("[**31**] pending Pass1 prep, picked up for allocations during concurrent sweep."));
  444. #endif
  445. DebugOnly(heapBlockInPendingSweepPrepList = true);
  446. }
  447. else
  448. {
  449. // Put the block in the sweepable heap block list so we don't lose track of it. The block will eventually be moved to the
  450. // heapBlockList or fullBlockList as appropriate during the next sweep.
  451. AssertMsg(!HeapBlockList::Contains(heapBlock, sweepableHeapBlockList), "The heap block already exists in this list.");
  452. #ifdef RECYCLER_TRACE
  453. recycler->PrintBlockStatus(this, heapBlock, _u("[**32**] picked up for allocations during concurrent sweep."));
  454. #endif
  455. }
  456. #endif
  457. heapBlock->SetNextBlock(sweepableHeapBlockList);
  458. sweepableHeapBlockList = heapBlock;
  459. heapBlockFromAllocableHeapBlockList = true;
  460. }
  461. #if DBG|| defined(RECYCLER_SLOW_CHECK_ENABLED)
  462. debugSweepableHeapBlockListLock.Leave();
  463. #endif
  464. }
  465. #endif
  466. #endif
  467. if (heapBlock != nullptr)
  468. {
  469. Assert(!this->IsAllocationStopped());
  470. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  471. // When allocations are allowed during concurrent sweep we set nextAllocableBlockHead to NULL as the allocator will pick heap blocks from the
  472. // interlocked SLIST. During that time, the heap block at the top of the SLIST is always the nextAllocableBlockHead.
  473. // If the heapBlock was just picked from the SLIST and nextAllocableBlockHead is not NULL then we just resumed normal allocations on the background thread
  474. // while finishing the concurrent sweep, and the nextAllocableBlockHead is already set properly.
  475. if (this->nextAllocableBlockHead != nullptr && !heapBlockFromAllocableHeapBlockList)
  476. #endif
  477. {
  478. this->nextAllocableBlockHead = heapBlock->GetNextBlock();
  479. }
  480. allocator->Set(heapBlock);
  481. }
  482. else if (this->explicitFreeList != nullptr)
  483. {
  484. allocator->SetExplicitFreeList(this->explicitFreeList);
  485. this->lastExplicitFreeListAllocator = allocator;
  486. this->explicitFreeList = nullptr;
  487. }
  488. else
  489. {
  490. return nullptr;
  491. }
  492. // We just found a block we can allocate on
  493. char * memBlock = allocator->template SlowAlloc<false /* disallow fault injection */>(recycler, sizeCat, attributes);
  494. Assert(memBlock != nullptr);
  495. return memBlock;
  496. }
  497. template <typename TBlockType>
  498. char *
  499. HeapBucketT<TBlockType>::TryAllocFromNewHeapBlock(Recycler * recycler, TBlockAllocatorType * allocator, size_t sizeCat, size_t size, ObjectInfoBits attributes)
  500. {
  501. AUTO_NO_EXCEPTION_REGION;
  502. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  503. #ifdef RECYCLER_PAGE_HEAP
  504. if (IsPageHeapEnabled(attributes))
  505. {
  506. return this->PageHeapAlloc(recycler, sizeCat, size, attributes, this->heapInfo->pageHeapMode, true);
  507. }
  508. #endif
  509. TBlockType * heapBlock = CreateHeapBlock(recycler);
  510. if (heapBlock == nullptr)
  511. {
  512. return nullptr;
  513. }
  514. // new heap block added, allocate from that.
  515. allocator->SetNew(heapBlock);
  516. // We just created a block we can allocate on
  517. char * memBlock = allocator->template SlowAlloc<false /* disallow fault injection */>(recycler, sizeCat, attributes);
  518. Assert(memBlock != nullptr || IS_FAULTINJECT_NO_THROW_ON);
  519. return memBlock;
  520. }
  521. Recycler *
  522. HeapBucket::GetRecycler() const
  523. {
  524. return this->heapInfo->recycler;
  525. }
  526. bool
  527. HeapBucket::AllocationsStartedDuringConcurrentSweep() const
  528. {
  529. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  530. return this->allocationsStartedDuringConcurrentSweep;
  531. #else
  532. return false;
  533. #endif
  534. }
  535. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  536. bool
  537. HeapBucket::ConcurrentSweepAllocationsThresholdExceeded() const
  538. {
  539. return this->concurrentSweepAllocationsThresholdExceeded;
  540. }
  541. bool
  542. HeapBucket::DoTwoPassConcurrentSweepPreCheck()
  543. {
  544. this->concurrentSweepAllocationsThresholdExceeded = ((this->heapBlockCount + this->newHeapBlockCount) > RecyclerHeuristic::AllocDuringConcurrentSweepHeapBlockThreshold);
  545. #ifdef RECYCLER_TRACE
  546. if (this->GetRecycler()->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  547. {
  548. if (this->concurrentSweepAllocationsThresholdExceeded)
  549. {
  550. Output::Print(_u("[HeapBucket 0x%p] exceeded concurrent sweep allocations threshold (%d). Total heap block count: %d \n"), this, RecyclerHeuristic::AllocDuringConcurrentSweepHeapBlockThreshold, this->heapBlockCount + this->newHeapBlockCount);
  551. }
  552. }
  553. #endif
  554. return this->concurrentSweepAllocationsThresholdExceeded;
  555. }
  556. #endif
  557. #ifdef RECYCLER_PAGE_HEAP
  558. template <typename TBlockType>
  559. char *
  560. HeapBucketT<TBlockType>::PageHeapAlloc(Recycler * recycler, DECLSPEC_GUARD_OVERFLOW size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow)
  561. {
  562. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("In PageHeapAlloc [Size: 0x%x, Attributes: 0x%x]\n"), size, attributes);
  563. char* addr = heapInfo->largeObjectBucket.PageHeapAlloc(recycler, sizeCat, size, attributes, mode, nothrow);
  564. if (addr)
  565. {
  566. this->GetRecycler()->autoHeap.uncollectedAllocBytes += sizeCat;
  567. }
  568. return addr;
  569. }
  570. #endif
  571. template <typename TBlockType>
  572. char *
  573. HeapBucketT<TBlockType>::SnailAlloc(Recycler * recycler, TBlockAllocatorType * allocator, DECLSPEC_GUARD_OVERFLOW size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow)
  574. {
  575. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("In SnailAlloc [Size: 0x%x, Attributes: 0x%x]\n"), sizeCat, attributes);
  576. Assert(sizeCat == this->sizeCat);
  577. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  578. char * memBlock = this->TryAlloc(recycler, allocator, sizeCat, attributes);
  579. if (memBlock != nullptr)
  580. {
  581. return memBlock;
  582. }
  583. #if ENABLE_CONCURRENT_GC
  584. // No free memory, try to collect with allocated bytes and time heuristic, and concurrently
  585. BOOL collected = recycler->disableCollectOnAllocationHeuristics ? recycler->FinishConcurrent<FinishConcurrentOnAllocation>() :
  586. recycler->CollectNow<CollectOnAllocation>();
  587. #else
  588. BOOL collected = recycler->disableCollectOnAllocationHeuristics ? FALSE : recycler->CollectNow<CollectOnAllocation>();
  589. #endif
  590. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("TryAlloc failed, forced collection on allocation [Collected: %d]\n"), collected);
  591. if (!collected)
  592. {
  593. #if ENABLE_CONCURRENT_GC
  594. #if ENABLE_PARTIAL_GC
  595. // wait for background sweeping finish if there are too many pages allocated during background sweeping
  596. if (recycler->IsConcurrentSweepExecutingState() && recycler->autoHeap.uncollectedNewPageCount > (uint)CONFIG_FLAG(NewPagesCapDuringBGSweeping))
  597. #else
  598. if (recycler->IsConcurrentSweepExecutingState())
  599. #endif
  600. {
  601. recycler->FinishConcurrent<ForceFinishCollection>();
  602. memBlock = this->TryAlloc(recycler, allocator, sizeCat, attributes);
  603. if (memBlock != nullptr)
  604. {
  605. return memBlock;
  606. }
  607. }
  608. #endif
  609. // We didn't collect, try to add a new heap block
  610. memBlock = TryAllocFromNewHeapBlock(recycler, allocator, sizeCat, size, attributes);
  611. if (memBlock != nullptr)
  612. {
  613. return memBlock;
  614. }
  615. // Can't even allocate a new block, we need force a collection and
  616. //allocate some free memory, add a new heap block again, or throw out of memory
  617. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("TryAllocFromNewHeapBlock failed, forcing in-thread collection\n"));
  618. recycler->CollectNow<CollectNowForceInThread>();
  619. }
  620. // Collection might trigger finalizer, which might allocate memory. So the allocator
  621. // might have a heap block already, try to allocate from that first
  622. memBlock = allocator->template SlowAlloc<true /* allow fault injection */>(recycler, sizeCat, attributes);
  623. if (memBlock != nullptr)
  624. {
  625. return memBlock;
  626. }
  627. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("SlowAlloc failed\n"));
  628. // do the allocation
  629. memBlock = this->TryAlloc(recycler, allocator, sizeCat, attributes);
  630. if (memBlock != nullptr)
  631. {
  632. return memBlock;
  633. }
  634. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("TryAlloc failed\n"));
  635. // add a heap block if there are no preallocated memory left.
  636. memBlock = TryAllocFromNewHeapBlock(recycler, allocator, sizeCat, size, attributes);
  637. if (memBlock != nullptr)
  638. {
  639. return memBlock;
  640. }
  641. AllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("TryAllocFromNewHeapBlock failed- triggering OOM handler"));
  642. if (nothrow == false)
  643. {
  644. // Can't add a heap block, we are out of memory
  645. // Since we're allowed to throw, throw right here
  646. recycler->OutOfMemory();
  647. }
  648. return nullptr;
  649. }
  650. template <typename TBlockType>
  651. TBlockType*
  652. HeapBucketT<TBlockType>::GetUnusedHeapBlock()
  653. {
  654. // Add a new heap block
  655. TBlockType * heapBlock = emptyBlockList;
  656. if (heapBlock == nullptr)
  657. {
  658. // We couldn't find a reusable heap block
  659. heapBlock = TBlockType::New(this);
  660. #if defined(RECYCLER_SLOW_CHECK_ENABLED)
  661. Assert(this->emptyHeapBlockCount == 0);
  662. #endif
  663. }
  664. else
  665. {
  666. emptyBlockList = heapBlock->GetNextBlock();
  667. #if defined(RECYCLER_SLOW_CHECK_ENABLED)
  668. this->emptyHeapBlockCount--;
  669. #endif
  670. }
  671. return heapBlock;
  672. }
  673. template <typename TBlockType>
  674. TBlockType *
  675. HeapBucketT<TBlockType>::CreateHeapBlock(Recycler * recycler)
  676. {
  677. FAULTINJECT_MEMORY_NOTHROW(_u("HeapBlock"), sizeof(TBlockType));
  678. // Add a new heap block
  679. TBlockType * heapBlock = GetUnusedHeapBlock();
  680. if (heapBlock == nullptr)
  681. {
  682. return nullptr;
  683. }
  684. if (!heapBlock->ReassignPages(recycler))
  685. {
  686. FreeHeapBlock(heapBlock);
  687. return nullptr;
  688. }
  689. // Add it to head of heap block list so we will keep track of the block
  690. this->heapInfo->AppendNewHeapBlock(heapBlock, this);
  691. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  692. #if ENABLE_CONCURRENT_GC
  693. ::InterlockedIncrement(&this->newHeapBlockCount);
  694. #else
  695. this->heapBlockCount++;
  696. #endif
  697. #endif
  698. return heapBlock;
  699. }
  700. template <typename TBlockType>
  701. void
  702. HeapBucketT<TBlockType>::FreeHeapBlock(TBlockType * heapBlock)
  703. {
  704. heapBlock->Reset();
  705. heapBlock->SetNextBlock(emptyBlockList);
  706. emptyBlockList = heapBlock;
  707. #if defined(RECYCLER_SLOW_CHECK_ENABLED)
  708. this->emptyHeapBlockCount++;
  709. #endif
  710. }
  711. template <typename TBlockType>
  712. void
  713. HeapBucketT<TBlockType>::ResetMarks(ResetMarkFlags flags)
  714. {
  715. RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount((flags & ResetMarkFlags_Background) != 0));
  716. #if !ENABLE_CONCURRENT_GC
  717. Assert((flags & ResetMarkFlags_Background) == 0);
  718. #endif
  719. if ((flags & ResetMarkFlags_Background) == 0)
  720. {
  721. // The is equivalent to the ClearAllocators in Rescan
  722. // But since we are not doing concurrent, we need to do it here.
  723. ClearAllocators();
  724. }
  725. // Note, mark bits are now cleared in HeapBlockMap32::ResetMarks, so we don't need to clear them here.
  726. if ((flags & ResetMarkFlags_ScanImplicitRoot) != 0)
  727. {
  728. HeapBlockList::ForEach(fullBlockList, [flags](TBlockType * heapBlock)
  729. {
  730. heapBlock->MarkImplicitRoots();
  731. Assert(!heapBlock->HasFreeObject());
  732. });
  733. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  734. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  735. {
  736. HeapBlockList::ForEach(sweepableHeapBlockList, [flags](TBlockType * heapBlock)
  737. {
  738. heapBlock->MarkImplicitRoots();
  739. });
  740. }
  741. #endif
  742. HeapBlockList::ForEach(heapBlockList, [flags](TBlockType * heapBlock)
  743. {
  744. heapBlock->MarkImplicitRoots();
  745. });
  746. }
  747. #if DBG
  748. if ((flags & ResetMarkFlags_Background) == 0)
  749. {
  750. // When allocations are enabled for buckets during oncurrent sweep we don't keep track of the nextAllocableBlockHead as it directly
  751. // comes out of the SLIST. As a result, the below validations can't be performed reliably on a heap block.
  752. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  753. if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || this->IsAnyFinalizableBucket())
  754. #endif
  755. {
  756. // Verify that if you are in the heapBlockList, before the nextAllocableBlockHead, we have fully allocated from
  757. // the block already, except if we have cleared from the allocator, or it is still in the allocator
  758. HeapBlockList::ForEach(heapBlockList, nextAllocableBlockHead, [](TBlockType * heapBlock)
  759. {
  760. // If the heap block is in the allocator, then the heap block may or may not have free object still
  761. // So we can't assert. Otherwise, we have free object iff we were cleared from allocator
  762. Assert(heapBlock->IsInAllocator() || heapBlock->HasFreeObject() == heapBlock->IsClearedFromAllocator());
  763. });
  764. // We should still have allocable free object after nextAllocableBlockHead
  765. HeapBlockList::ForEach(nextAllocableBlockHead, [](TBlockType * heapBlock)
  766. {
  767. Assert(heapBlock->HasFreeObject());
  768. });
  769. }
  770. }
  771. #endif
  772. }
  773. template <typename TBlockType>
  774. void
  775. HeapBucketT<TBlockType>::ScanNewImplicitRoots(Recycler * recycler)
  776. {
  777. HeapBlockList::ForEach(fullBlockList, [recycler](TBlockType * heapBlock)
  778. {
  779. heapBlock->ScanNewImplicitRoots(recycler);
  780. });
  781. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  782. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  783. {
  784. HeapBlockList::ForEach(sweepableHeapBlockList, [recycler](TBlockType * heapBlock)
  785. {
  786. heapBlock->ScanNewImplicitRoots(recycler);
  787. });
  788. }
  789. #endif
  790. HeapBlockList::ForEach(heapBlockList, [recycler](TBlockType * heapBlock)
  791. {
  792. heapBlock->ScanNewImplicitRoots(recycler);
  793. });
  794. }
  795. #if DBG
  796. template <typename TBlockType>
  797. void
  798. HeapBucketT<TBlockType>::VerifyBlockConsistencyInList(TBlockType * heapBlock, RecyclerVerifyListConsistencyData& recyclerSweep)
  799. {
  800. bool* expectFull = nullptr;
  801. bool* expectDispose = nullptr;
  802. HeapBlock* nextAllocableBlockHead = nullptr;
  803. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  804. {
  805. expectFull = &recyclerSweep.smallBlockVerifyListConsistencyData.expectFull;
  806. expectDispose = &recyclerSweep.smallBlockVerifyListConsistencyData.expectDispose;
  807. nextAllocableBlockHead = recyclerSweep.smallBlockVerifyListConsistencyData.nextAllocableBlockHead;
  808. }
  809. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  810. {
  811. expectFull = &recyclerSweep.mediumBlockVerifyListConsistencyData.expectFull;
  812. expectDispose = &recyclerSweep.mediumBlockVerifyListConsistencyData.expectDispose;
  813. nextAllocableBlockHead = recyclerSweep.mediumBlockVerifyListConsistencyData.nextAllocableBlockHead;
  814. }
  815. else
  816. {
  817. Assert(false);
  818. }
  819. if (heapBlock == nextAllocableBlockHead)
  820. {
  821. (*expectFull) = false;
  822. }
  823. if (heapBlock->IsClearedFromAllocator())
  824. {
  825. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  826. // As the blocks are added to a SLIST and used from there during concurrent sweep, the expectFull assertion doesn't hold anymore.
  827. // We could do some work to make this work again but there may be perf hit and it may be fragile.
  828. if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  829. #endif
  830. {
  831. Assert(*expectFull && !*expectDispose);
  832. Assert(heapBlock->HasFreeObject());
  833. Assert(!heapBlock->HasAnyDisposeObjects());
  834. }
  835. }
  836. else if (*expectDispose)
  837. {
  838. Assert(heapBlock->IsAnyFinalizableBlock() && heapBlock->template AsFinalizableBlock<typename TBlockType::HeapBlockAttributes>()->IsPendingDispose());
  839. Assert(heapBlock->HasAnyDisposeObjects());
  840. }
  841. else
  842. {
  843. Assert(!heapBlock->HasAnyDisposeObjects());
  844. // ExpectFull is a bit of a misnomer if the list in question is the heap block list. It's there to check
  845. // of the heap block in question is before the nextAllocableBlockHead or not. This is to ensure that
  846. // blocks before nextAllocableBlockHead that are not being bump allocated from must be considered "full".
  847. // However, the exception is if this is the only heap block in this bucket, in which case nextAllocableBlockHead
  848. // would be null
  849. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  850. // As the blocks are added to the SLIST and used from there during concurrent sweep, the expectFull assertion doesn't hold anymore.
  851. if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  852. #endif
  853. {
  854. Assert(*expectFull == (!heapBlock->HasFreeObject() || heapBlock->IsInAllocator()) || nextAllocableBlockHead == nullptr);
  855. }
  856. }
  857. }
  858. template <typename TBlockType>
  859. void
  860. HeapBucketT<TBlockType>::VerifyBlockConsistencyInList(TBlockType * heapBlock, RecyclerVerifyListConsistencyData const& recyclerSweep, SweepState state)
  861. {
  862. bool expectFull = false;
  863. bool expectDispose = false;
  864. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  865. {
  866. expectFull = recyclerSweep.smallBlockVerifyListConsistencyData.expectFull;
  867. expectDispose = recyclerSweep.smallBlockVerifyListConsistencyData.expectDispose;
  868. }
  869. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  870. {
  871. expectFull = recyclerSweep.mediumBlockVerifyListConsistencyData.expectFull;
  872. expectDispose = recyclerSweep.mediumBlockVerifyListConsistencyData.expectDispose;
  873. }
  874. else
  875. {
  876. Assert(false);
  877. }
  878. if (heapBlock->IsClearedFromAllocator())
  879. {
  880. // this function is called during sweep and we are recreating the heap block list
  881. // which would make all the block to be in it's rightful place
  882. heapBlock->SetIsClearedFromAllocator(false);
  883. Assert(SweepStateFull != state);
  884. }
  885. else
  886. {
  887. // You can still be full only if you are full before.
  888. Assert(expectFull || SweepStateFull != state);
  889. }
  890. // If you were pending dispose before, you can only be pending dispose after
  891. Assert(!expectDispose || SweepStatePendingDispose == state);
  892. }
  893. #endif // DBG
  894. #if ENABLE_PARTIAL_GC
  895. template <typename TBlockType>
  896. bool
  897. HeapBucketT<TBlockType>::DoQueuePendingSweep(Recycler * recycler)
  898. {
  899. return IsNormalBucket && recycler->inPartialCollectMode;
  900. }
  901. template <typename TBlockType>
  902. bool
  903. HeapBucketT<TBlockType>::DoPartialReuseSweep(Recycler * recycler)
  904. {
  905. // With leaf, we don't need to do a partial sweep
  906. // WriteBarrier-TODO: We shouldn't need to do this for write barrier heap buckets either
  907. return !IsLeafBucket && recycler->inPartialCollectMode;
  908. }
  909. #endif
  910. template <typename TBlockType>
  911. void
  912. HeapBucketT<TBlockType>::SweepHeapBlockList(RecyclerSweep& recyclerSweep, TBlockType * heapBlockList, bool allocable)
  913. {
  914. #if DBG
  915. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  916. {
  917. Assert(recyclerSweep.smallBlockVerifyListConsistencyData.hasSetupVerifyListConsistencyData);
  918. recyclerSweep.smallBlockVerifyListConsistencyData.hasSetupVerifyListConsistencyData = false;
  919. }
  920. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  921. {
  922. Assert(recyclerSweep.mediumBlockVerifyListConsistencyData.hasSetupVerifyListConsistencyData);
  923. recyclerSweep.mediumBlockVerifyListConsistencyData.hasSetupVerifyListConsistencyData = false;
  924. }
  925. else
  926. {
  927. Assert(false);
  928. }
  929. #endif
  930. Recycler * recycler = recyclerSweep.GetRecycler();
  931. // Whether we run in thread or background thread, we want to queue up pending sweep
  932. // only if we are doing partial GC so we can calculate the heuristics before
  933. // determinate we want to fully sweep the block or partially sweep the block
  934. #if ENABLE_PARTIAL_GC
  935. // CONCURRENT-TODO: Add a mode where we can do in thread sweep, and concurrent partial sweep?
  936. bool const queuePendingSweep = this->DoQueuePendingSweep(recycler);
  937. #else
  938. bool const queuePendingSweep = false;
  939. #endif
  940. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  941. Assert(this->IsAllocationStopped() || this->AllocationsStartedDuringConcurrentSweep());
  942. #else
  943. Assert(this->IsAllocationStopped());
  944. #endif
  945. HeapBlockList::ForEachEditing(heapBlockList, [=, &recyclerSweep](TBlockType * heapBlock)
  946. {
  947. // The whole list need to be consistent
  948. DebugOnly(VerifyBlockConsistencyInList(heapBlock, recyclerSweep));
  949. #ifdef RECYCLER_TRACE
  950. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**1**] starting Sweep Pass1."));
  951. #endif
  952. SweepState state = heapBlock->Sweep(recyclerSweep, queuePendingSweep, allocable);
  953. DebugOnly(VerifyBlockConsistencyInList(heapBlock, recyclerSweep, state));
  954. switch (state)
  955. {
  956. #if ENABLE_CONCURRENT_GC
  957. case SweepStatePendingSweep:
  958. {
  959. Assert(IsNormalBucket);
  960. // blocks that have swept object. Queue up the block for concurrent sweep.
  961. Assert(queuePendingSweep);
  962. TBlockType *& pendingSweepList = recyclerSweep.GetPendingSweepBlockList(this);
  963. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  964. AssertMsg(!HeapBlockList::Contains(heapBlock, pendingSweepList), "The heap block already exists in the pendingSweepList.");
  965. heapBlock->SetNextBlock(pendingSweepList);
  966. pendingSweepList = heapBlock;
  967. #ifdef RECYCLER_TRACE
  968. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**2**] finished Sweep Pass1, heapblock added to pendingSweepList."));
  969. #endif
  970. #if ENABLE_PARTIAL_GC
  971. recyclerSweep.GetManager()->NotifyAllocableObjects(heapBlock);
  972. #endif
  973. break;
  974. }
  975. #endif
  976. case SweepStatePendingDispose:
  977. {
  978. Assert(!recyclerSweep.IsBackground());
  979. #ifdef RECYCLER_WRITE_BARRIER
  980. Assert(IsFinalizableBucket || IsFinalizableWriteBarrierBucket);
  981. #else
  982. Assert(IsFinalizableBucket);
  983. #endif
  984. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  985. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  986. {
  987. AssertMsg(!heapBlock->isPendingConcurrentSweepPrep, "Finalizable blocks don't support allocations during concurrent sweep.");
  988. }
  989. #endif
  990. DebugOnly(heapBlock->template AsFinalizableBlock<typename TBlockType::HeapBlockAttributes>()->SetIsPendingDispose());
  991. // These are the blocks that have swept finalizable object
  992. // We already transferred the non finalizable swept objects when we are not doing
  993. // concurrent collection, so we only need to queue up the blocks that have
  994. // finalizable objects, so that we can go through and call the dispose, and then
  995. // transfer the finalizable object back to the free list.
  996. SmallFinalizableHeapBucketT<typename TBlockType::HeapBlockAttributes> * finalizableHeapBucket = (SmallFinalizableHeapBucketT<typename TBlockType::HeapBlockAttributes>*)this;
  997. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  998. //AssertMsg(!HeapBlockList::Contains(heapBlock, finalizableHeapBucket->pendingDisposeList), "The heap block already exists in the pendingDisposeList.");
  999. heapBlock->template AsFinalizableBlock<typename TBlockType::HeapBlockAttributes>()->SetNextBlock(finalizableHeapBucket->pendingDisposeList);
  1000. finalizableHeapBucket->pendingDisposeList = heapBlock->template AsFinalizableBlock<typename TBlockType::HeapBlockAttributes>();
  1001. Assert(!this->heapInfo->hasPendingTransferDisposedObjects);
  1002. recycler->hasDisposableObject = true;
  1003. #ifdef RECYCLER_TRACE
  1004. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**3**] finished Sweep Pass1, heapblock added to pendingDisposeList."));
  1005. #endif
  1006. break;
  1007. }
  1008. case SweepStateSwept:
  1009. {
  1010. Assert(this->nextAllocableBlockHead == nullptr);
  1011. Assert(heapBlock->HasFreeObject());
  1012. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1013. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1014. if (this->AllocationsStartedDuringConcurrentSweep())
  1015. {
  1016. Assert(!this->IsAnyFinalizableBucket());
  1017. Assert(!heapBlock->isPendingConcurrentSweepPrep);
  1018. bool blockAddedToSList = HeapBucketT<TBlockType>::PushHeapBlockToSList(this->allocableHeapBlockListHead, heapBlock);
  1019. // If we encountered OOM while pushing the heapBlock to the SLIST we must add it to the heapBlockList so we don't lose track of it.
  1020. if (!blockAddedToSList)
  1021. {
  1022. //TODO: akatti: We should handle this gracefully and try to recover from this state.
  1023. AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
  1024. }
  1025. #ifdef RECYCLER_TRACE
  1026. else
  1027. {
  1028. this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**4**] swept and added to SLIST allocableHeapBlockListHead during Pass1."));
  1029. }
  1030. #endif
  1031. }
  1032. else
  1033. #endif
  1034. {
  1035. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1036. AssertMsg(!this->AllowAllocationsDuringConcurrentSweep(), "Why are allocations not started during concurrent sweep?");
  1037. #endif
  1038. heapBlock->SetNextBlock(this->heapBlockList);
  1039. this->heapBlockList = heapBlock;
  1040. }
  1041. #ifdef RECYCLER_TRACE
  1042. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**6**] finished Sweep Pass1, heapblock added to heapBlockList."));
  1043. #endif
  1044. #if ENABLE_PARTIAL_GC
  1045. recyclerSweep.GetManager()->NotifyAllocableObjects(heapBlock);
  1046. #endif
  1047. break;
  1048. }
  1049. case SweepStateFull:
  1050. {
  1051. Assert(!heapBlock->HasFreeObject());
  1052. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1053. heapBlock->SetNextBlock(this->fullBlockList);
  1054. this->fullBlockList = heapBlock;
  1055. #ifdef RECYCLER_TRACE
  1056. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**7**] finished Sweep Pass1, heapblock FULL added to fullBlockList."));
  1057. #endif
  1058. break;
  1059. }
  1060. case SweepStateEmpty:
  1061. {
  1062. // the block is empty, just free them
  1063. #ifdef RECYCLER_MEMORY_VERIFY
  1064. // Let's verify it before we free it
  1065. if (recycler->VerifyEnabled())
  1066. {
  1067. heapBlock->Verify();
  1068. }
  1069. #endif
  1070. RECYCLER_STATS_INC(recycler, numEmptySmallBlocks[heapBlock->GetHeapBlockType()]);
  1071. #if ENABLE_CONCURRENT_GC
  1072. // CONCURRENT-TODO: Finalizable block never have background == true and always be processed
  1073. // in thread, so it will not queue up the pages even if we are doing concurrent GC
  1074. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1075. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1076. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  1077. {
  1078. AssertMsg(heapBlock->objectsAllocatedDuringConcurrentSweepCount == 0, "We allocated to this block during concurrent sweep; it's not EMPTY anymore, it should NOT be freed or queued as EMPTY.");
  1079. }
  1080. #endif
  1081. if (recyclerSweep.IsBackground())
  1082. {
  1083. #ifdef RECYCLER_WRITE_BARRIER
  1084. Assert(!(IsFinalizableBucket || IsFinalizableWriteBarrierBucket));
  1085. #else
  1086. Assert(!IsFinalizableBucket);
  1087. #endif
  1088. // CONCURRENT-TODO: We will zero heap block even if the number free page pool exceed
  1089. // the maximum and will get decommitted anyway
  1090. recyclerSweep.template QueueEmptyHeapBlock<TBlockType>(this, heapBlock);
  1091. RECYCLER_STATS_INC(recycler, numZeroedOutSmallBlocks);
  1092. #ifdef RECYCLER_TRACE
  1093. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**8**] finished Sweep Pass1, heapblock EMPTY added to pendingEmptyBlockList."));
  1094. #endif
  1095. }
  1096. else
  1097. #endif
  1098. {
  1099. // Just free the page in thread (and zero the page)
  1100. heapBlock->ReleasePagesSweep(recycler);
  1101. FreeHeapBlock(heapBlock);
  1102. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1103. this->heapBlockCount--;
  1104. #endif
  1105. #ifdef RECYCLER_TRACE
  1106. recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**9**] finished Sweep Pass1, heapblock EMPTY, was FREED in-thread."));
  1107. #endif
  1108. }
  1109. break;
  1110. }
  1111. }
  1112. });
  1113. }
  1114. template <typename TBlockType>
  1115. void
  1116. HeapBucketT<TBlockType>::SweepBucket(RecyclerSweep& recyclerSweep)
  1117. {
  1118. DebugOnly(TBlockType * savedNextAllocableBlockHead);
  1119. RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep.IsBackground()));
  1120. #if ENABLE_CONCURRENT_GC
  1121. if (recyclerSweep.GetManager()->HasSetupBackgroundSweep())
  1122. {
  1123. // SetupBackgroundSweep set nextAllocableBlockHead to null already
  1124. Assert(IsAllocationStopped());
  1125. DebugOnly(savedNextAllocableBlockHead = recyclerSweep.GetSavedNextAllocableBlockHead(this));
  1126. }
  1127. else
  1128. #endif
  1129. {
  1130. Assert(AllocatorsAreEmpty());
  1131. DebugOnly(savedNextAllocableBlockHead = this->nextAllocableBlockHead);
  1132. this->StopAllocationBeforeSweep();
  1133. }
  1134. // We just started sweeping. These pending lists should be empty
  1135. #if ENABLE_CONCURRENT_GC
  1136. Assert(recyclerSweep.GetPendingSweepBlockList(this) == nullptr);
  1137. #else
  1138. Assert(!recyclerSweep.IsBackground());
  1139. #endif
  1140. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1141. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->sweepableHeapBlockList != nullptr)
  1142. {
  1143. Assert(!this->IsAnyFinalizableBucket());
  1144. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  1145. // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
  1146. // Items are added to it by the allocator when allocations are allowed during concurrent sweep. The list is drained during the next sweep while
  1147. // allocation are stopped.
  1148. debugSweepableHeapBlockListLock.Enter();
  1149. #endif
  1150. // Return the blocks we may have allocated from during the previous concurrent sweep back to the fullBlockList.
  1151. // We need to rebuild the free bit vectors for these blocks.
  1152. HeapBlockList::ForEachEditing(this->sweepableHeapBlockList, [this](TBlockType * heapBlock)
  1153. {
  1154. heapBlock->BuildFreeBitVector();
  1155. AssertMsg(!HeapBlockList::Contains(heapBlock, heapBlockList), "The heap block already exists in the heapBlockList.");
  1156. AssertMsg(!HeapBlockList::Contains(heapBlock, fullBlockList), "The heap block already exists in the fullBlockList.");
  1157. AssertMsg(!HeapBlockList::Contains(heapBlock, emptyBlockList), "The heap block already exists in the emptyBlockList.");
  1158. heapBlock->SetNextBlock(this->fullBlockList);
  1159. this->fullBlockList = heapBlock;
  1160. });
  1161. this->sweepableHeapBlockList = nullptr;
  1162. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  1163. debugSweepableHeapBlockListLock.Leave();
  1164. #endif
  1165. }
  1166. #endif
  1167. #if DBG
  1168. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1169. {
  1170. recyclerSweep.SetupVerifyListConsistencyDataForSmallBlock((SmallHeapBlock*) savedNextAllocableBlockHead, true, false);
  1171. }
  1172. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1173. {
  1174. recyclerSweep.SetupVerifyListConsistencyDataForMediumBlock((MediumHeapBlock*) savedNextAllocableBlockHead, true, false);
  1175. }
  1176. else
  1177. {
  1178. Assert(false);
  1179. }
  1180. #endif
  1181. // Move the list locally. We will relink them during sweep
  1182. TBlockType * currentFullBlockList = fullBlockList;
  1183. TBlockType * currentHeapBlockList = heapBlockList;
  1184. this->heapBlockList = nullptr;
  1185. this->fullBlockList = nullptr;
  1186. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1187. // In order to allow allocations during sweep (Pass-1) we will set aside blocks after nextAllocableBlockHead (excluding) and allow
  1188. // allocations to these blocks as we know that these blocks are not full yet. These will need to be swept later though before starting Pass-2
  1189. // of the sweep.
  1190. this->PrepareForAllocationsDuringConcurrentSweep(currentHeapBlockList);
  1191. #endif
  1192. this->SweepHeapBlockList(recyclerSweep, currentHeapBlockList, true);
  1193. #if DBG
  1194. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1195. {
  1196. recyclerSweep.SetupVerifyListConsistencyDataForSmallBlock(nullptr, true, false);
  1197. }
  1198. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1199. {
  1200. recyclerSweep.SetupVerifyListConsistencyDataForMediumBlock(nullptr, true, false);
  1201. }
  1202. else
  1203. {
  1204. Assert(false);
  1205. }
  1206. #endif
  1207. this->SweepHeapBlockList(recyclerSweep, currentFullBlockList, false);
  1208. // We shouldn't have allocate from any block yet
  1209. Assert(this->nextAllocableBlockHead == nullptr);
  1210. }
  1211. template <typename TBlockType>
  1212. bool
  1213. HeapBucketT<TBlockType>::AllowAllocationsDuringConcurrentSweep()
  1214. {
  1215. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1216. Recycler * recycler = this->GetRecycler();
  1217. if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !recycler->AllowAllocationsDuringConcurrentSweep() || !this->concurrentSweepAllocationsThresholdExceeded)
  1218. {
  1219. return false;
  1220. }
  1221. #if ENABLE_PARTIAL_GC
  1222. bool isPartialGC = (recycler->recyclerSweepManager != nullptr) && recycler->recyclerSweepManager->InPartialCollect();
  1223. #else
  1224. bool isPartialGC = false;
  1225. #endif
  1226. // Allocations are allowed during concurrent sweep for small non-finalizable buckets while not doing a Partial GC.
  1227. return (recycler->IsConcurrentSweepSetupState() || recycler->InConcurrentSweep()) && !this->IsAnyFinalizableBucket() && !isPartialGC;
  1228. #else
  1229. return false;
  1230. #endif
  1231. }
  1232. template <typename TBlockType>
  1233. void
  1234. HeapBucketT<TBlockType>::StopAllocationBeforeSweep()
  1235. {
  1236. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1237. this->allocationsStartedDuringConcurrentSweep = false;
  1238. #if SUPPORT_WIN32_SLIST
  1239. this->lastKnownNextAllocableBlockHead = this->nextAllocableBlockHead;
  1240. #endif
  1241. #endif
  1242. Assert(!this->IsAllocationStopped());
  1243. this->isAllocationStopped = true;
  1244. this->nextAllocableBlockHead = nullptr;
  1245. }
  1246. template <typename TBlockType>
  1247. void
  1248. HeapBucketT<TBlockType>::StartAllocationAfterSweep()
  1249. {
  1250. Assert(this->IsAllocationStopped());
  1251. this->isAllocationStopped = false;
  1252. this->nextAllocableBlockHead = this->heapBlockList;
  1253. }
  1254. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1255. template <typename TBlockType>
  1256. void
  1257. HeapBucketT<TBlockType>::StartAllocationDuringConcurrentSweep()
  1258. {
  1259. Recycler * recycler = this->GetRecycler();
  1260. Assert(!recycler->recyclerSweepManager->InPartialCollect());
  1261. Assert(!this->IsAnyFinalizableBucket());
  1262. Assert(this->IsAllocationStopped());
  1263. this->isAllocationStopped = false;
  1264. Assert(!this->allocationsStartedDuringConcurrentSweep);
  1265. this->allocationsStartedDuringConcurrentSweep = true;
  1266. #if SUPPORT_WIN32_SLIST
  1267. // When allocations are allowed during concurrent sweep we set nextAllocableBlockHead to NULL as the allocator will pick heap blocks from the
  1268. // interlocked SLIST. During that time, the heap block at the top of the SLIST is always the nextAllocableBlockHead.
  1269. this->nextAllocableBlockHead = nullptr;
  1270. this->lastKnownNextAllocableBlockHead = nullptr;
  1271. #endif
  1272. }
  1273. template <typename TBlockType>
  1274. void
  1275. HeapBucketT<TBlockType>::ResumeNormalAllocationAfterConcurrentSweep(TBlockType * newNextAllocableBlockHead)
  1276. {
  1277. this->allocationsStartedDuringConcurrentSweep = false;
  1278. this->isAllocationStopped = false;
  1279. // If the newNextAllocableBlockHead is NULL at this point that means we have exhausted usable blocks and will have to allocate a new block the next time.
  1280. this->nextAllocableBlockHead = newNextAllocableBlockHead;
  1281. }
  1282. /*//////////////////////////////////////////////////////////////////////////////////////////////////////
  1283. If allocations are to be allowed to existing heap blocks during concurrent sweep, we set aside a few
  1284. heap blocks from the heapBlockList prior to beginning sweep. However, we eed to then go back and make
  1285. sure these blocks also swept before this sweep finishes. In order to do this we clearly define concurrent
  1286. sweep having 2 passes now. These passes existed before but were not distiguished as they would always start
  1287. and finish in one go on the background thread. However, whenever allocations are allowed during concurrent
  1288. sweep; the concurrent sweep will start Pass1 on the background thread, wait to finish Pass1 of the blocks
  1289. we set aside to allocate from on the main thread and then go back to finish Pass2 for all heap blocks on
  1290. the background thread. Note that, due to this need to finish Pass1 on the foreground thread the overall
  1291. background sweep will now appear to take longer whenever we chose to do such a two-pass sweep.
  1292. The sequence of things we do to allow allocations during concurrent sweep is described below:
  1293. 1. At the beginning of concurrrent sweep we decide if we will benefit from allowing allocations during concurrent
  1294. sweep for any of the buckets. If there is at-least one bucket for which we think we will benefit we will turn on
  1295. allocations during concurrent sweep. Once turned on we will attempt to enable allocations during concurrent sweep
  1296. for all supported buckets (i.e. small/medium, normal/leaf, non-finalizable buckets.write barrrier bickets are supported
  1297. as well.).
  1298. 2. If allocations are turned on during concurrent sweep, we will see if there are any allocable blocks in the
  1299. heapBlockList after the nextAllocableBlockHead. If we find any such blocks, we move them to a SLIST that the
  1300. allocator can pick these blocks from during sweep.
  1301. 3. CollectionStateConcurrentSweepPass1: We will finish Pass1 of the sweep for all the remaining blocks (other than the
  1302. ones we put in the SLIST in step 2 above) This will generally happen on the background thread unless we are forcing
  1303. in-thread sweep. This state is now specifically identified as CollectionStateConcurrentSweepPass1.
  1304. 4. CollectionStateConcurrentSweepPass1Wait: At this point we need to wait for all the blocks that we put in the SLIST
  1305. to also finish the Pass1 of the sweep. This needs to happen on the foreground thread so we prevent the allocator from
  1306. picking up the blocks from SLIST while we do this. This state is now identified as CollectionStateConcurrentSweepPass1Wait.
  1307. 5. CollectionStateConcurrentSweepPass2: At this point we will do the actual sweeping of all the blocks that are not yet swept,
  1308. for eaxample, any blocks that were put onto the pendingSweepList. As these blocks get swept we keep adding them to the
  1309. SLIST again to allow allocators to allocate from them as soon as they are swept.
  1310. 6. Before the Pass2 can finish we can call this concurrent sweep done we need to move all the blocks off of the SLIST so
  1311. that normal allocations can begin after the sweep. This is the last step of the concurrent sweep.
  1312. //////////////////////////////////////////////////////////////////////////////////////////////////////*/
  1313. template<typename TBlockType>
  1314. void
  1315. HeapBucketT<TBlockType>::PrepareForAllocationsDuringConcurrentSweep(TBlockType * &currentHeapBlockList)
  1316. {
  1317. #if SUPPORT_WIN32_SLIST
  1318. if (this->AllowAllocationsDuringConcurrentSweep())
  1319. {
  1320. this->EnsureAllocableHeapBlockList();
  1321. Assert(!this->IsAnyFinalizableBucket());
  1322. Assert(HeapBucketT<TBlockType>::QueryDepthInterlockedSList(this->allocableHeapBlockListHead) == 0);
  1323. Assert(HeapBlockList::Count(this->sweepableHeapBlockList) == 0);
  1324. TBlockType* startingNextAllocableBlockHead = this->lastKnownNextAllocableBlockHead;
  1325. bool allocationsStarted = false;
  1326. if (startingNextAllocableBlockHead != nullptr)
  1327. {
  1328. // To avoid a race condition between the allocator attempting to allocate from the lastKnownNextAllocableBlockHead and this code
  1329. // where we are adding it to the SLIST we skip the lastKnownNextAllocableBlockHead and pick up the next block to start with.
  1330. // Allocations should have stopped by then; so allocator shouldn't pick up the lastKnownNextAllocableBlockHead->Next block.
  1331. TBlockType* savedNextAllocableBlockHead = startingNextAllocableBlockHead->GetNextBlock();
  1332. startingNextAllocableBlockHead->SetNextBlock(nullptr);
  1333. startingNextAllocableBlockHead = savedNextAllocableBlockHead;
  1334. if (startingNextAllocableBlockHead != nullptr)
  1335. {
  1336. // The allocable blocks, if any are available, will now be added to the allocable blocks SLIST at this time; start allocations now.
  1337. this->StartAllocationDuringConcurrentSweep();
  1338. allocationsStarted = true;
  1339. HeapBlockList::ForEachEditing(startingNextAllocableBlockHead, [this, &allocationsStarted](TBlockType * heapBlock)
  1340. {
  1341. // This heap block is NOT ready to be swept concurrently as it hasn't yet been through sweep prep (i.e. Pass1 of sweep).
  1342. heapBlock->isPendingConcurrentSweepPrep = true;
  1343. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1344. bool blockAddedToSList = HeapBucketT<TBlockType>::PushHeapBlockToSList(this->allocableHeapBlockListHead, heapBlock);
  1345. // If we encountered OOM while pushing the heapBlock to the SLIST we must add it to the heapBlockList so we don't lose track of it.
  1346. if (!blockAddedToSList)
  1347. {
  1348. //TODO: akatti: We should handle this gracefully and try to recover from this state.
  1349. AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
  1350. }
  1351. else
  1352. {
  1353. #ifdef RECYCLER_TRACE
  1354. this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**5**] added to SLIST before Pass1."));
  1355. #endif
  1356. }
  1357. });
  1358. #ifdef RECYCLER_TRACE
  1359. if (this->GetRecycler()->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  1360. {
  1361. size_t currentHeapBlockCount = QueryDepthInterlockedSList(allocableHeapBlockListHead);
  1362. CollectionState collectionState = this->GetRecycler()->collectionState;
  1363. Output::Print(_u("[GC #%d] [HeapBucket 0x%p] Starting allocations during concurrent sweep with %d blocks. [CollectionState: %d] \n"), this->GetRecycler()->collectionCount, this, currentHeapBlockCount, collectionState);
  1364. Output::Print(_u("[GC #%d] [HeapBucket 0x%p] The heapBlockList has %d blocks. Total heapBlockCount is %d.\n\n"), this->GetRecycler()->collectionCount, this, HeapBlockList::Count(this->heapBlockList), this->heapBlockCount);
  1365. }
  1366. #endif
  1367. }
  1368. }
  1369. if (!allocationsStarted)
  1370. {
  1371. // If we didn't start allocations yet, start them now in anticipation of blocks becoming available later as blocks complete sweep.
  1372. this->StartAllocationDuringConcurrentSweep();
  1373. allocationsStarted = true;
  1374. }
  1375. Assert(!this->IsAllocationStopped());
  1376. }
  1377. #endif
  1378. }
  1379. #endif
  1380. template <typename TBlockType>
  1381. bool
  1382. HeapBucketT<TBlockType>::IsAllocationStopped() const
  1383. {
  1384. if (this->isAllocationStopped)
  1385. {
  1386. Assert(this->nextAllocableBlockHead == nullptr);
  1387. return true;
  1388. }
  1389. return false;
  1390. }
  1391. template <typename TBlockType>
  1392. uint
  1393. HeapBucketT<TBlockType>::Rescan(Recycler * recycler, RescanFlags flags)
  1394. {
  1395. #if ENABLE_CONCURRENT_GC
  1396. RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(!!recycler->IsConcurrentMarkState()));
  1397. #else
  1398. RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(false /* background */));
  1399. #endif
  1400. #if ENABLE_CONCURRENT_GC
  1401. // If we do the final rescan concurrently, the main thread will prepare for sweep concurrently
  1402. // If we do rescan in thread, we will need to prepare sweep here.
  1403. // However, if we are in the rescan for OOM, we have already done it, so no need to do it again
  1404. if (!recycler->IsConcurrentMarkState() && !recycler->inEndMarkOnLowMemory)
  1405. {
  1406. this->PrepareSweep();
  1407. }
  1408. #endif
  1409. // By default heap bucket doesn't rescan anything
  1410. return 0;
  1411. }
  1412. #if ENABLE_CONCURRENT_GC
  1413. template <typename TBlockType>
  1414. void
  1415. HeapBucketT<TBlockType>::MergeNewHeapBlock(TBlockType * heapBlock)
  1416. {
  1417. Assert(heapBlock->GetObjectSize() == this->sizeCat);
  1418. heapBlock->SetNextBlock(this->heapBlockList);
  1419. this->heapBlockList = heapBlock;
  1420. #if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1421. ::InterlockedDecrement(&this->newHeapBlockCount);
  1422. this->heapBlockCount++;
  1423. #endif
  1424. }
  1425. template <typename TBlockType>
  1426. void
  1427. HeapBucketT<TBlockType>::SetupBackgroundSweep(RecyclerSweep& recyclerSweep)
  1428. {
  1429. // Don't allocate from existing block temporary when concurrent sweeping
  1430. // Currently Rescan clear allocators, if we remove the uncollectedAllocBytes there, we can
  1431. // avoid it there and do it here.
  1432. Assert(this->AllocatorsAreEmpty());
  1433. DebugOnly(recyclerSweep.SaveNextAllocableBlockHead(this));
  1434. Assert(recyclerSweep.GetPendingSweepBlockList(this) == nullptr);
  1435. this->StopAllocationBeforeSweep();
  1436. }
  1437. #endif
  1438. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  1439. template <typename TBlockType>
  1440. void
  1441. HeapBucketT<TBlockType>::FinishConcurrentSweepPass1(RecyclerSweep& recyclerSweep)
  1442. {
  1443. if (this->concurrentSweepAllocationsThresholdExceeded)
  1444. {
  1445. AssertMsg(this->AllowAllocationsDuringConcurrentSweep(), "Why are we in two pass concurrent sweep?");
  1446. Assert(!this->IsAnyFinalizableBucket());
  1447. // Rebuild the free bit vectors for the blocks we allocated from during concurrent sweep.
  1448. TBlockType * currentPendingSweepPrepHeapBlockList = nullptr;
  1449. TBlockType * currentSweepableHeapBlockList = this->sweepableHeapBlockList;
  1450. this->sweepableHeapBlockList = nullptr;
  1451. HeapBlockList::ForEachEditing(currentSweepableHeapBlockList, [this, &currentPendingSweepPrepHeapBlockList](TBlockType * heapBlock)
  1452. {
  1453. if (heapBlock->isPendingConcurrentSweepPrep)
  1454. {
  1455. ushort previousFreeCount = heapBlock->freeCount;
  1456. heapBlock->BuildFreeBitVector();
  1457. #if ENABLE_PARTIAL_GC
  1458. heapBlock->oldFreeCount = heapBlock->lastFreeCount = heapBlock->freeCount;
  1459. #else
  1460. heapBlock->lastFreeCount = heapBlock->freeCount;
  1461. #endif
  1462. ushort newAllocatedObjects = previousFreeCount - heapBlock->freeCount;
  1463. AssertMsg(newAllocatedObjects == heapBlock->objectsMarkedDuringSweep, "The counts of objects allocated during sweep should match the objects marked during sweep.");
  1464. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  1465. heapBlock->objectsAllocatedDuringConcurrentSweepCount = newAllocatedObjects;
  1466. #endif
  1467. ushort currentMarkCount = (ushort)heapBlock->GetMarkCountForSweep();
  1468. heapBlock->markCount = currentMarkCount;
  1469. #if DBG
  1470. heapBlock->GetRecycler()->heapBlockMap.SetPageMarkCount(heapBlock->GetAddress(), currentMarkCount);
  1471. #endif
  1472. #ifdef RECYCLER_TRACE
  1473. heapBlock->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**13**] ending sweep Pass1, rebuilt free bit vector and set page mark count to match."));
  1474. #endif
  1475. heapBlock->SetNextBlock(currentPendingSweepPrepHeapBlockList);
  1476. currentPendingSweepPrepHeapBlockList = heapBlock;
  1477. }
  1478. else
  1479. {
  1480. heapBlock->SetNextBlock(this->sweepableHeapBlockList);
  1481. this->sweepableHeapBlockList = heapBlock;
  1482. }
  1483. });
  1484. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  1485. // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
  1486. // Items are added to it by the allocator when allocations are allowed during concurrent sweep. The list is drained during the next sweep while
  1487. // allocation are stopped.
  1488. debugSweepableHeapBlockListLock.Enter();
  1489. #endif
  1490. // Pull the blocks from the allocable SLIST that we didn't use. We need to finish the Pass-1 sweep of these blocks too.
  1491. TBlockType * heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
  1492. while (heapBlock != nullptr)
  1493. {
  1494. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1495. if (heapBlock->isPendingConcurrentSweepPrep)
  1496. {
  1497. #ifdef RECYCLER_TRACE
  1498. heapBlock->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**19**] ending sweep Pass1, removed from SLIST."));
  1499. #endif
  1500. heapBlock->SetNextBlock(currentPendingSweepPrepHeapBlockList);
  1501. currentPendingSweepPrepHeapBlockList = heapBlock;
  1502. }
  1503. else
  1504. {
  1505. #ifdef RECYCLER_TRACE
  1506. heapBlock->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**23**] ending sweep Pass1, removed from SLIST and added to sweepableHeapBlockList."));
  1507. #endif
  1508. // Already swept, put it back to the sweepableHeapBlockList list; so it can be processed later.
  1509. heapBlock->SetNextBlock(this->sweepableHeapBlockList);
  1510. this->sweepableHeapBlockList = heapBlock;
  1511. }
  1512. heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
  1513. }
  1514. Assert(QueryDepthInterlockedSList(this->allocableHeapBlockListHead) == 0);
  1515. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  1516. debugSweepableHeapBlockListLock.Leave();
  1517. #endif
  1518. #if DBG
  1519. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1520. {
  1521. recyclerSweep.SetupVerifyListConsistencyDataForSmallBlock(nullptr, true, false);
  1522. }
  1523. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1524. {
  1525. recyclerSweep.SetupVerifyListConsistencyDataForMediumBlock(nullptr, true, false);
  1526. }
  1527. else
  1528. {
  1529. Assert(false);
  1530. }
  1531. #endif
  1532. // Start allocations now as we may start adding blocks to the SLIST during Pass1 sweep below.
  1533. this->StartAllocationDuringConcurrentSweep();
  1534. this->SweepHeapBlockList(recyclerSweep, currentPendingSweepPrepHeapBlockList, true /*allocable*/);
  1535. }
  1536. }
  1537. template <typename TBlockType>
  1538. void
  1539. HeapBucketT<TBlockType>::EnsureAllocableHeapBlockList()
  1540. {
  1541. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1542. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
  1543. {
  1544. if (allocableHeapBlockListHead == nullptr)
  1545. {
  1546. allocableHeapBlockListHead = ((PSLIST_HEADER)_aligned_malloc(sizeof(SLIST_HEADER), MEMORY_ALLOCATION_ALIGNMENT));
  1547. if (allocableHeapBlockListHead == nullptr)
  1548. {
  1549. this->heapInfo->recycler->OutOfMemory();
  1550. }
  1551. else
  1552. {
  1553. ::InitializeSListHead(allocableHeapBlockListHead);
  1554. }
  1555. }
  1556. }
  1557. #endif
  1558. }
  1559. template <typename TBlockType>
  1560. void
  1561. HeapBucketT<TBlockType>::FinishSweepPrep(RecyclerSweep& recyclerSweep)
  1562. {
  1563. if (this->AllocationsStartedDuringConcurrentSweep())
  1564. {
  1565. AssertMsg(this->AllowAllocationsDuringConcurrentSweep(), "Why are allocations started during concurrent sweep, if not allowed?");
  1566. Assert(!this->IsAnyFinalizableBucket());
  1567. this->StopAllocationBeforeSweep();
  1568. this->ClearAllocators();
  1569. }
  1570. }
  1571. template <typename TBlockType>
  1572. void
  1573. HeapBucketT<TBlockType>::FinishConcurrentSweep()
  1574. {
  1575. if (this->AllocationsStartedDuringConcurrentSweep())
  1576. {
  1577. #if SUPPORT_WIN32_SLIST
  1578. Assert(!this->IsAnyFinalizableBucket());
  1579. Assert(this->allocableHeapBlockListHead != nullptr);
  1580. #ifdef RECYCLER_TRACE
  1581. if (this->GetRecycler()->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  1582. {
  1583. CollectionState collectionState = this->GetRecycler()->collectionState;
  1584. Output::Print(_u("[GC #%d] [HeapBucket 0x%p] starting FinishConcurrentSweep [CollectionState: %d] \n"), this->GetRecycler()->collectionCount, this, collectionState);
  1585. }
  1586. #endif
  1587. TBlockType * newNextAllocableBlockHead = nullptr;
  1588. // Put the blocks from the allocable SLIST into the heapBlockList.
  1589. TBlockType * heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
  1590. while (heapBlock != nullptr)
  1591. {
  1592. DebugOnly(this->AssertCheckHeapBlockNotInAnyList(heapBlock));
  1593. AssertMsg(!heapBlock->isPendingConcurrentSweepPrep, "The blocks in the SLIST at this time should NOT have sweep prep i.e. sweep-Pass1 pending.");
  1594. newNextAllocableBlockHead = heapBlock;
  1595. heapBlock->SetNextBlock(this->heapBlockList);
  1596. this->heapBlockList = heapBlock;
  1597. #ifdef RECYCLER_TRACE
  1598. this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**40**] finished FinishConcurrentSweep, heapblock removed from SLIST and added to heapBlockList."));
  1599. #endif
  1600. heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
  1601. }
  1602. Assert(QueryDepthInterlockedSList(this->allocableHeapBlockListHead) == 0);
  1603. this->ResumeNormalAllocationAfterConcurrentSweep(newNextAllocableBlockHead);
  1604. #endif
  1605. Assert(!this->IsAllocationStopped());
  1606. }
  1607. }
  1608. #endif
  1609. template <typename TBlockType>
  1610. void
  1611. HeapBucketT<TBlockType>::AppendAllocableHeapBlockList(TBlockType * list)
  1612. {
  1613. #ifdef RECYCLER_TRACE
  1614. if (this->GetRecycler()->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase) && CONFIG_FLAG_RELEASE(Verbose))
  1615. {
  1616. CollectionState collectionState = this->GetRecycler()->collectionState;
  1617. Output::Print(_u("[GC #%d] [HeapBucket 0x%p] in AppendAllocableHeapBlockList [CollectionState: %d] \n"), this->GetRecycler()->collectionCount, this, collectionState);
  1618. }
  1619. #endif
  1620. // Add the list to the end of the current list
  1621. TBlockType * currentHeapBlockList = this->heapBlockList;
  1622. if (currentHeapBlockList == nullptr)
  1623. {
  1624. // There weren't any heap block list before, just move the list over and start allocate from it
  1625. this->heapBlockList = list;
  1626. this->nextAllocableBlockHead = list;
  1627. }
  1628. else
  1629. {
  1630. // Find the last block and append the list
  1631. TBlockType * tail = HeapBlockList::Tail(currentHeapBlockList);
  1632. Assert(tail != nullptr);
  1633. tail->SetNextBlock(list);
  1634. // If we are not currently allocating from the existing heapBlockList,
  1635. // that means fill all the exiting one already, we should start with what we just appended.
  1636. if (this->nextAllocableBlockHead == nullptr)
  1637. {
  1638. this->nextAllocableBlockHead = list;
  1639. }
  1640. }
  1641. }
  1642. template <typename TBlockType>
  1643. void
  1644. HeapBucketT<TBlockType>::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  1645. {
  1646. UpdateAllocators();
  1647. HeapBucket::EnumerateObjects(fullBlockList, infoBits, CallBackFunction);
  1648. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1649. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  1650. {
  1651. HeapBucket::EnumerateObjects(sweepableHeapBlockList, infoBits, CallBackFunction);
  1652. }
  1653. #endif
  1654. HeapBucket::EnumerateObjects(heapBlockList, infoBits, CallBackFunction);
  1655. }
  1656. #ifdef RECYCLER_SLOW_CHECK_ENABLED
  1657. template <typename TBlockType>
  1658. void
  1659. HeapBucketT<TBlockType>::VerifyHeapBlockCount(bool background)
  1660. {
  1661. // TODO-REFACTOR: GetNonEmptyHeapBlockCount really should be virtual
  1662. static_cast<typename SmallHeapBlockType<TBlockType::RequiredAttributes, typename TBlockType::HeapBlockAttributes>::BucketType *>(this)->GetNonEmptyHeapBlockCount(true);
  1663. if (!background)
  1664. {
  1665. this->GetEmptyHeapBlockCount();
  1666. }
  1667. }
  1668. template <typename TBlockType>
  1669. size_t
  1670. HeapBucketT<TBlockType>::Check(bool checkCount)
  1671. {
  1672. Assert(this->GetRecycler()->recyclerSweepManager == nullptr);
  1673. UpdateAllocators();
  1674. size_t smallHeapBlockCount = HeapInfo::Check(true, false, this->fullBlockList);
  1675. bool allocatingDuringConcurrentSweep = false;
  1676. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1677. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  1678. {
  1679. allocatingDuringConcurrentSweep = true;
  1680. // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
  1681. // Items are added to it by the allocator when allocations are allowed during concurrent sweep. The list is drained during the next sweep while
  1682. // allocation are stopped.
  1683. debugSweepableHeapBlockListLock.Enter();
  1684. smallHeapBlockCount += HeapInfo::Check(true, false, this->sweepableHeapBlockList);
  1685. debugSweepableHeapBlockListLock.Leave();
  1686. }
  1687. #endif
  1688. smallHeapBlockCount += HeapInfo::Check(true, false, this->heapBlockList, this->nextAllocableBlockHead);
  1689. smallHeapBlockCount += HeapInfo::Check(false, false, this->nextAllocableBlockHead);
  1690. Assert(!checkCount || this->heapBlockCount == smallHeapBlockCount || (this->heapBlockCount >= 65535 && allocatingDuringConcurrentSweep));
  1691. return smallHeapBlockCount;
  1692. }
  1693. #endif
  1694. #if ENABLE_MEM_STATS
  1695. template <typename TBlockType>
  1696. void
  1697. HeapBucketT<TBlockType>::AggregateBucketStats()
  1698. {
  1699. HeapBucket::AggregateBucketStats(); // call super
  1700. auto allocatorHead = &this->allocatorHead;
  1701. auto allocatorCurr = allocatorHead;
  1702. do
  1703. {
  1704. TBlockType* allocatorHeapBlock = allocatorCurr->GetHeapBlock();
  1705. if (allocatorHeapBlock)
  1706. {
  1707. allocatorHeapBlock->AggregateBlockStats(this->memStats, true, allocatorCurr->freeObjectList, allocatorCurr->endAddress != 0);
  1708. }
  1709. allocatorCurr = allocatorCurr->GetNext();
  1710. } while (allocatorCurr != allocatorHead);
  1711. auto blockStatsAggregator = [this](TBlockType* heapBlock) {
  1712. heapBlock->AggregateBlockStats(this->memStats);
  1713. };
  1714. HeapBlockList::ForEach(fullBlockList, blockStatsAggregator);
  1715. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1716. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  1717. {
  1718. HeapBlockList::ForEach(sweepableHeapBlockList, blockStatsAggregator);
  1719. }
  1720. #endif
  1721. HeapBlockList::ForEach(heapBlockList, blockStatsAggregator);
  1722. }
  1723. #endif
  1724. #ifdef RECYCLER_MEMORY_VERIFY
  1725. template <typename TBlockType>
  1726. void
  1727. HeapBucketT<TBlockType>::Verify()
  1728. {
  1729. UpdateAllocators();
  1730. #if DBG
  1731. RecyclerVerifyListConsistencyData recyclerVerifyListConsistencyData;
  1732. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1733. {
  1734. recyclerVerifyListConsistencyData.smallBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((SmallHeapBlock*) nullptr, true, false);
  1735. }
  1736. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1737. {
  1738. recyclerVerifyListConsistencyData.mediumBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((MediumHeapBlock*) nullptr, true, false);
  1739. }
  1740. else
  1741. {
  1742. Assert(false);
  1743. }
  1744. #endif
  1745. HeapBlockList::ForEach(fullBlockList, [DebugOnly(&recyclerVerifyListConsistencyData)](TBlockType * heapBlock)
  1746. {
  1747. DebugOnly(VerifyBlockConsistencyInList(heapBlock, recyclerVerifyListConsistencyData));
  1748. heapBlock->Verify();
  1749. });
  1750. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1751. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  1752. {
  1753. #if DBG
  1754. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1755. {
  1756. recyclerVerifyListConsistencyData.smallBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((SmallHeapBlock*) nullptr, true, false);
  1757. }
  1758. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1759. {
  1760. recyclerVerifyListConsistencyData.mediumBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((MediumHeapBlock*) nullptr, true, false);
  1761. }
  1762. else
  1763. {
  1764. Assert(false);
  1765. }
  1766. #endif
  1767. HeapBlockList::ForEach(sweepableHeapBlockList, [DebugOnly(&recyclerVerifyListConsistencyData)](TBlockType * heapBlock)
  1768. {
  1769. DebugOnly(VerifyBlockConsistencyInList(heapBlock, recyclerVerifyListConsistencyData));
  1770. heapBlock->Verify();
  1771. });
  1772. }
  1773. #endif
  1774. #if DBG
  1775. if (TBlockType::HeapBlockAttributes::IsSmallBlock)
  1776. {
  1777. recyclerVerifyListConsistencyData.smallBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((SmallHeapBlock*) this->nextAllocableBlockHead, true, false);
  1778. }
  1779. else if (TBlockType::HeapBlockAttributes::IsMediumBlock)
  1780. {
  1781. recyclerVerifyListConsistencyData.mediumBlockVerifyListConsistencyData.SetupVerifyListConsistencyData((MediumHeapBlock*) this->nextAllocableBlockHead, true, false);
  1782. }
  1783. else
  1784. {
  1785. Assert(false);
  1786. }
  1787. #endif
  1788. HeapBlockList::ForEach(heapBlockList, [this, DebugOnly(&recyclerVerifyListConsistencyData)](TBlockType * heapBlock)
  1789. {
  1790. DebugOnly(VerifyBlockConsistencyInList(heapBlock, recyclerVerifyListConsistencyData));
  1791. char * bumpAllocateAddress = nullptr;
  1792. this->ForEachAllocator([heapBlock, &bumpAllocateAddress](TBlockAllocatorType * allocator)
  1793. {
  1794. if (allocator->GetHeapBlock() == heapBlock && allocator->GetEndAddress() != nullptr)
  1795. {
  1796. Assert(bumpAllocateAddress == nullptr);
  1797. bumpAllocateAddress = (char *)allocator->GetFreeObjectList();
  1798. }
  1799. });
  1800. if (bumpAllocateAddress != nullptr)
  1801. {
  1802. heapBlock->VerifyBumpAllocated(bumpAllocateAddress);
  1803. }
  1804. else
  1805. {
  1806. heapBlock->Verify(false);
  1807. }
  1808. });
  1809. }
  1810. #endif
  1811. #ifdef RECYCLER_VERIFY_MARK
  1812. template <typename TBlockType>
  1813. void
  1814. HeapBucketT<TBlockType>::VerifyMark()
  1815. {
  1816. HeapBlockList::ForEach(this->fullBlockList, [](TBlockType * heapBlock)
  1817. {
  1818. heapBlock->VerifyMark();
  1819. });
  1820. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST
  1821. if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
  1822. {
  1823. HeapBlockList::ForEach(this->sweepableHeapBlockList, [](TBlockType * heapBlock)
  1824. {
  1825. heapBlock->VerifyMark();
  1826. });
  1827. }
  1828. #endif
  1829. HeapBlockList::ForEach(this->heapBlockList, [](TBlockType * heapBlock)
  1830. {
  1831. heapBlock->VerifyMark();
  1832. });
  1833. }
  1834. #endif
  1835. template <class TBlockAttributes>
  1836. void
  1837. HeapBucketGroup<TBlockAttributes>::Initialize(HeapInfo * heapInfo, uint sizeCat)
  1838. {
  1839. heapBucket.Initialize(heapInfo, sizeCat);
  1840. leafHeapBucket.Initialize(heapInfo, sizeCat);
  1841. #ifdef RECYCLER_WRITE_BARRIER
  1842. smallNormalWithBarrierHeapBucket.Initialize(heapInfo, sizeCat);
  1843. smallFinalizableWithBarrierHeapBucket.Initialize(heapInfo, sizeCat);
  1844. #endif
  1845. finalizableHeapBucket.Initialize(heapInfo, sizeCat);
  1846. #ifdef RECYCLER_VISITED_HOST
  1847. recyclerVisitedHostHeapBucket.Initialize(heapInfo, sizeCat);
  1848. #endif
  1849. }
  1850. template <class TBlockAttributes>
  1851. void
  1852. HeapBucketGroup<TBlockAttributes>::ResetMarks(ResetMarkFlags flags)
  1853. {
  1854. heapBucket.ResetMarks(flags);
  1855. leafHeapBucket.ResetMarks(flags);
  1856. #ifdef RECYCLER_WRITE_BARRIER
  1857. smallNormalWithBarrierHeapBucket.ResetMarks(flags);
  1858. smallFinalizableWithBarrierHeapBucket.ResetMarks(flags);
  1859. #endif
  1860. // Although we pass in premarkFreeObjects, the finalizable heap bucket ignores
  1861. // this parameter and never pre-marks free objects
  1862. finalizableHeapBucket.ResetMarks(flags);
  1863. #ifdef RECYCLER_VISITED_HOST
  1864. recyclerVisitedHostHeapBucket.ResetMarks(flags);
  1865. #endif
  1866. }
  1867. template <class TBlockAttributes>
  1868. void
  1869. HeapBucketGroup<TBlockAttributes>::ScanInitialImplicitRoots(Recycler * recycler)
  1870. {
  1871. heapBucket.ScanInitialImplicitRoots(recycler);
  1872. // Don't need to scan implicit roots on leaf heap bucket
  1873. #ifdef RECYCLER_WRITE_BARRIER
  1874. smallNormalWithBarrierHeapBucket.ScanInitialImplicitRoots(recycler);
  1875. smallFinalizableWithBarrierHeapBucket.ScanInitialImplicitRoots(recycler);
  1876. #endif
  1877. finalizableHeapBucket.ScanInitialImplicitRoots(recycler);
  1878. }
  1879. template <class TBlockAttributes>
  1880. void
  1881. HeapBucketGroup<TBlockAttributes>::ScanNewImplicitRoots(Recycler * recycler)
  1882. {
  1883. heapBucket.ScanNewImplicitRoots(recycler);
  1884. // Need to scan new implicit roots on leaf heap bucket
  1885. leafHeapBucket.ScanNewImplicitRoots(recycler);
  1886. #ifdef RECYCLER_WRITE_BARRIER
  1887. smallNormalWithBarrierHeapBucket.ScanNewImplicitRoots(recycler);
  1888. smallFinalizableWithBarrierHeapBucket.ScanNewImplicitRoots(recycler);
  1889. #endif
  1890. finalizableHeapBucket.ScanNewImplicitRoots(recycler);
  1891. }
  1892. template <class TBlockAttributes>
  1893. void
  1894. HeapBucketGroup<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep)
  1895. {
  1896. heapBucket.Sweep(recyclerSweep);
  1897. leafHeapBucket.Sweep(recyclerSweep);
  1898. #ifdef RECYCLER_WRITE_BARRIER
  1899. smallNormalWithBarrierHeapBucket.Sweep(recyclerSweep);
  1900. #endif
  1901. }
  1902. // Sweep finalizable objects first to ensure that if they reference any other
  1903. // objects in the finalizer - they are valid
  1904. template <class TBlockAttributes>
  1905. void
  1906. HeapBucketGroup<TBlockAttributes>::SweepFinalizableObjects(RecyclerSweep& recyclerSweep)
  1907. {
  1908. finalizableHeapBucket.Sweep(recyclerSweep);
  1909. #ifdef RECYCLER_VISITED_HOST
  1910. recyclerVisitedHostHeapBucket.Sweep(recyclerSweep);
  1911. #endif
  1912. #ifdef RECYCLER_WRITE_BARRIER
  1913. smallFinalizableWithBarrierHeapBucket.Sweep(recyclerSweep);
  1914. #endif
  1915. }
  1916. template <class TBlockAttributes>
  1917. void
  1918. HeapBucketGroup<TBlockAttributes>::DisposeObjects()
  1919. {
  1920. finalizableHeapBucket.DisposeObjects();
  1921. #ifdef RECYCLER_VISITED_HOST
  1922. recyclerVisitedHostHeapBucket.DisposeObjects();
  1923. #endif
  1924. #ifdef RECYCLER_WRITE_BARRIER
  1925. smallFinalizableWithBarrierHeapBucket.DisposeObjects();
  1926. #endif
  1927. }
  1928. template <class TBlockAttributes>
  1929. void
  1930. HeapBucketGroup<TBlockAttributes>::TransferDisposedObjects()
  1931. {
  1932. finalizableHeapBucket.TransferDisposedObjects();
  1933. #ifdef RECYCLER_VISITED_HOST
  1934. recyclerVisitedHostHeapBucket.TransferDisposedObjects();
  1935. #endif
  1936. #ifdef RECYCLER_WRITE_BARRIER
  1937. smallFinalizableWithBarrierHeapBucket.TransferDisposedObjects();
  1938. #endif
  1939. }
  1940. template <class TBlockAttributes>
  1941. void
  1942. HeapBucketGroup<TBlockAttributes>::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  1943. {
  1944. heapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1945. leafHeapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1946. #ifdef RECYCLER_WRITE_BARRIER
  1947. smallNormalWithBarrierHeapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1948. smallFinalizableWithBarrierHeapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1949. #endif
  1950. finalizableHeapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1951. #ifdef RECYCLER_VISITED_HOST
  1952. recyclerVisitedHostHeapBucket.EnumerateObjects(infoBits, CallBackFunction);
  1953. #endif
  1954. }
  1955. template <class TBlockAttributes>
  1956. void
  1957. HeapBucketGroup<TBlockAttributes>::FinalizeAllObjects()
  1958. {
  1959. finalizableHeapBucket.FinalizeAllObjects();
  1960. #ifdef RECYCLER_VISITED_HOST
  1961. recyclerVisitedHostHeapBucket.FinalizeAllObjects();
  1962. #endif
  1963. #ifdef RECYCLER_WRITE_BARRIER
  1964. smallFinalizableWithBarrierHeapBucket.FinalizeAllObjects();
  1965. #endif
  1966. }
  1967. template <class TBlockAttributes>
  1968. uint
  1969. HeapBucketGroup<TBlockAttributes>::Rescan(Recycler * recycler, RescanFlags flags)
  1970. {
  1971. return heapBucket.Rescan(recycler, flags) +
  1972. leafHeapBucket.Rescan(recycler, flags) +
  1973. #ifdef RECYCLER_WRITE_BARRIER
  1974. smallNormalWithBarrierHeapBucket.Rescan(recycler, flags) +
  1975. smallFinalizableWithBarrierHeapBucket.Rescan(recycler, flags) +
  1976. #endif
  1977. #ifdef RECYCLER_VISITED_HOST
  1978. recyclerVisitedHostHeapBucket.Rescan(recycler, flags) +
  1979. #endif
  1980. finalizableHeapBucket.Rescan(recycler, flags);
  1981. }
  1982. #if ENABLE_CONCURRENT_GC
  1983. template <class TBlockAttributes>
  1984. void
  1985. HeapBucketGroup<TBlockAttributes>::PrepareSweep()
  1986. {
  1987. heapBucket.PrepareSweep();
  1988. leafHeapBucket.PrepareSweep();
  1989. #ifdef RECYCLER_WRITE_BARRIER
  1990. smallNormalWithBarrierHeapBucket.PrepareSweep();
  1991. smallFinalizableWithBarrierHeapBucket.PrepareSweep();
  1992. #endif
  1993. finalizableHeapBucket.PrepareSweep();
  1994. #ifdef RECYCLER_VISITED_HOST
  1995. recyclerVisitedHostHeapBucket.PrepareSweep();
  1996. #endif
  1997. }
  1998. template <class TBlockAttributes>
  1999. void
  2000. HeapBucketGroup<TBlockAttributes>::SetupBackgroundSweep(RecyclerSweep& recyclerSweep)
  2001. {
  2002. heapBucket.SetupBackgroundSweep(recyclerSweep);
  2003. leafHeapBucket.SetupBackgroundSweep(recyclerSweep);
  2004. #ifdef RECYCLER_WRITE_BARRIER
  2005. smallNormalWithBarrierHeapBucket.SetupBackgroundSweep(recyclerSweep);
  2006. #endif
  2007. }
  2008. #endif
  2009. #if ENABLE_PARTIAL_GC
  2010. template <class TBlockAttributes>
  2011. void
  2012. HeapBucketGroup<TBlockAttributes>::SweepPartialReusePages(RecyclerSweep& recyclerSweep)
  2013. {
  2014. // Leaf heap bucket are always reused for allocation and can be done on the concurrent thread
  2015. // WriteBarrier-TODO: Do the same for write barrier buckets
  2016. heapBucket.SweepPartialReusePages(recyclerSweep);
  2017. #ifdef RECYCLER_WRITE_BARRIER
  2018. smallNormalWithBarrierHeapBucket.SweepPartialReusePages(recyclerSweep);
  2019. smallFinalizableWithBarrierHeapBucket.SweepPartialReusePages(recyclerSweep);
  2020. #endif
  2021. finalizableHeapBucket.SweepPartialReusePages(recyclerSweep);
  2022. #ifdef RECYCLER_VISITED_HOST
  2023. recyclerVisitedHostHeapBucket.SweepPartialReusePages(recyclerSweep);
  2024. #endif
  2025. }
  2026. template <class TBlockAttributes>
  2027. void
  2028. HeapBucketGroup<TBlockAttributes>::FinishPartialCollect(RecyclerSweep * recyclerSweep)
  2029. {
  2030. heapBucket.FinishPartialCollect(recyclerSweep);
  2031. #ifdef RECYCLER_WRITE_BARRIER
  2032. smallNormalWithBarrierHeapBucket.FinishPartialCollect(recyclerSweep);
  2033. smallFinalizableWithBarrierHeapBucket.FinishPartialCollect(recyclerSweep);
  2034. #endif
  2035. finalizableHeapBucket.FinishPartialCollect(recyclerSweep);
  2036. #ifdef RECYCLER_VISITED_HOST
  2037. recyclerVisitedHostHeapBucket.FinishPartialCollect(recyclerSweep);
  2038. #endif
  2039. // Leaf heap block always do a full sweep instead of partial sweep
  2040. // (since touching the page doesn't affect rescan)
  2041. // So just need to verify heap block count (which finishPartialCollect would have done)
  2042. // WriteBarrier-TODO: Do that same for write barrier buckets
  2043. RECYCLER_SLOW_CHECK(leafHeapBucket.VerifyHeapBlockCount(recyclerSweep != nullptr && recyclerSweep->IsBackground()));
  2044. }
  2045. #endif
  2046. #if ENABLE_CONCURRENT_GC
  2047. template <class TBlockAttributes>
  2048. void
  2049. HeapBucketGroup<TBlockAttributes>::SweepPendingObjects(RecyclerSweep& recyclerSweep)
  2050. {
  2051. // For leaf buckets, we can always reuse the page as we don't need to rescan them for partial GC
  2052. // It should have been swept immediately during Sweep
  2053. // WriteBarrier-TODO: Do the same for write barrier buckets
  2054. Assert(recyclerSweep.GetPendingSweepBlockList(&leafHeapBucket) == nullptr);
  2055. heapBucket.SweepPendingObjects(recyclerSweep);
  2056. #ifdef RECYCLER_WRITE_BARRIER
  2057. smallNormalWithBarrierHeapBucket.SweepPendingObjects(recyclerSweep);
  2058. smallFinalizableWithBarrierHeapBucket.SweepPendingObjects(recyclerSweep);
  2059. #endif
  2060. finalizableHeapBucket.SweepPendingObjects(recyclerSweep);
  2061. #ifdef RECYCLER_VISITED_HOST
  2062. recyclerVisitedHostHeapBucket.SweepPendingObjects(recyclerSweep);
  2063. #endif
  2064. }
  2065. template <class TBlockAttributes>
  2066. void
  2067. HeapBucketGroup<TBlockAttributes>::TransferPendingEmptyHeapBlocks(RecyclerSweep& recyclerSweep)
  2068. {
  2069. recyclerSweep.TransferPendingEmptyHeapBlocks(&heapBucket);
  2070. recyclerSweep.TransferPendingEmptyHeapBlocks(&leafHeapBucket);
  2071. #ifdef RECYCLER_WRITE_BARRIER
  2072. recyclerSweep.TransferPendingEmptyHeapBlocks(&smallNormalWithBarrierHeapBucket);
  2073. recyclerSweep.TransferPendingEmptyHeapBlocks(&smallFinalizableWithBarrierHeapBucket);
  2074. #endif
  2075. recyclerSweep.TransferPendingEmptyHeapBlocks(&finalizableHeapBucket);
  2076. #ifdef RECYCLER_VISITED_HOST
  2077. recyclerSweep.TransferPendingEmptyHeapBlocks(&recyclerVisitedHostHeapBucket);
  2078. #endif
  2079. }
  2080. #endif
  2081. #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
  2082. template <class TBlockAttributes>
  2083. size_t
  2084. HeapBucketGroup<TBlockAttributes>::GetNonEmptyHeapBlockCount(bool checkCount) const
  2085. {
  2086. return heapBucket.GetNonEmptyHeapBlockCount(checkCount) +
  2087. finalizableHeapBucket.GetNonEmptyHeapBlockCount(checkCount) +
  2088. #ifdef RECYCLER_VISITED_HOST
  2089. recyclerVisitedHostHeapBucket.GetNonEmptyHeapBlockCount(checkCount) +
  2090. #endif
  2091. #ifdef RECYCLER_WRITE_BARRIER
  2092. smallNormalWithBarrierHeapBucket.GetNonEmptyHeapBlockCount(checkCount) +
  2093. smallFinalizableWithBarrierHeapBucket.GetNonEmptyHeapBlockCount(checkCount) +
  2094. #endif
  2095. leafHeapBucket.GetNonEmptyHeapBlockCount(checkCount);
  2096. }
  2097. template <class TBlockAttributes>
  2098. size_t
  2099. HeapBucketGroup<TBlockAttributes>::GetEmptyHeapBlockCount() const
  2100. {
  2101. return heapBucket.GetEmptyHeapBlockCount() +
  2102. finalizableHeapBucket.GetEmptyHeapBlockCount() +
  2103. #ifdef RECYCLER_VISITED_HOST
  2104. recyclerVisitedHostHeapBucket.GetEmptyHeapBlockCount() +
  2105. #endif
  2106. #ifdef RECYCLER_WRITE_BARRIER
  2107. smallNormalWithBarrierHeapBucket.GetEmptyHeapBlockCount() +
  2108. smallFinalizableWithBarrierHeapBucket.GetEmptyHeapBlockCount() +
  2109. #endif
  2110. leafHeapBucket.GetEmptyHeapBlockCount();
  2111. }
  2112. #endif
  2113. #ifdef RECYCLER_SLOW_CHECK_ENABLED
  2114. template <class TBlockAttributes>
  2115. size_t
  2116. HeapBucketGroup<TBlockAttributes>::Check()
  2117. {
  2118. return heapBucket.Check() + finalizableHeapBucket.Check() + leafHeapBucket.Check()
  2119. #ifdef RECYCLER_VISITED_HOST
  2120. + recyclerVisitedHostHeapBucket.Check()
  2121. #endif
  2122. #ifdef RECYCLER_WRITE_BARRIER
  2123. + smallNormalWithBarrierHeapBucket.Check() + smallFinalizableWithBarrierHeapBucket.Check()
  2124. #endif
  2125. ;
  2126. }
  2127. #endif
  2128. #ifdef RECYCLER_MEMORY_VERIFY
  2129. template <class TBlockAttributes>
  2130. void
  2131. HeapBucketGroup<TBlockAttributes>::Verify()
  2132. {
  2133. heapBucket.Verify();
  2134. finalizableHeapBucket.Verify();
  2135. #ifdef RECYCLER_VISITED_HOST
  2136. recyclerVisitedHostHeapBucket.Verify();
  2137. #endif
  2138. leafHeapBucket.Verify();
  2139. #ifdef RECYCLER_WRITE_BARRIER
  2140. smallNormalWithBarrierHeapBucket.Verify();
  2141. smallFinalizableWithBarrierHeapBucket.Verify();
  2142. #endif
  2143. }
  2144. #endif
  2145. #ifdef RECYCLER_VERIFY_MARK
  2146. template <class TBlockAttributes>
  2147. void
  2148. HeapBucketGroup<TBlockAttributes>::VerifyMark()
  2149. {
  2150. heapBucket.VerifyMark();
  2151. finalizableHeapBucket.VerifyMark();
  2152. #ifdef RECYCLER_VISITED_HOST
  2153. recyclerVisitedHostHeapBucket.VerifyMark();
  2154. #endif
  2155. leafHeapBucket.VerifyMark();
  2156. #ifdef RECYCLER_WRITE_BARRIER
  2157. smallNormalWithBarrierHeapBucket.VerifyMark();
  2158. smallFinalizableWithBarrierHeapBucket.VerifyMark();
  2159. #endif
  2160. }
  2161. #endif
  2162. #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
  2163. template <class TBlockAttributes>
  2164. void
  2165. HeapBucketGroup<TBlockAttributes>::StartAllocationDuringConcurrentSweep()
  2166. {
  2167. // If there were no allocable heap blocks we would not have started alllocations. Stop allocations, only if we started allocations for each of these buckets.
  2168. if (heapBucket.IsAllocationStopped())
  2169. {
  2170. heapBucket.StartAllocationDuringConcurrentSweep();
  2171. }
  2172. if (leafHeapBucket.IsAllocationStopped())
  2173. {
  2174. leafHeapBucket.StartAllocationDuringConcurrentSweep();
  2175. }
  2176. #ifdef RECYCLER_WRITE_BARRIER
  2177. if (smallNormalWithBarrierHeapBucket.IsAllocationStopped())
  2178. {
  2179. smallNormalWithBarrierHeapBucket.StartAllocationDuringConcurrentSweep();
  2180. }
  2181. #endif
  2182. }
  2183. template <class TBlockAttributes>
  2184. bool
  2185. HeapBucketGroup<TBlockAttributes>::DoTwoPassConcurrentSweepPreCheck()
  2186. {
  2187. return heapBucket.DoTwoPassConcurrentSweepPreCheck() ||
  2188. leafHeapBucket.DoTwoPassConcurrentSweepPreCheck()
  2189. #ifdef RECYCLER_WRITE_BARRIER
  2190. || smallNormalWithBarrierHeapBucket.DoTwoPassConcurrentSweepPreCheck();
  2191. #endif
  2192. }
  2193. template <class TBlockAttributes>
  2194. void
  2195. HeapBucketGroup<TBlockAttributes>::FinishConcurrentSweepPass1(RecyclerSweep& recyclerSweep)
  2196. {
  2197. heapBucket.FinishConcurrentSweepPass1(recyclerSweep);
  2198. leafHeapBucket.FinishConcurrentSweepPass1(recyclerSweep);
  2199. #ifdef RECYCLER_WRITE_BARRIER
  2200. smallNormalWithBarrierHeapBucket.FinishConcurrentSweepPass1(recyclerSweep);
  2201. #endif
  2202. }
  2203. template <class TBlockAttributes>
  2204. void
  2205. HeapBucketGroup<TBlockAttributes>::FinishSweepPrep(RecyclerSweep& recyclerSweep)
  2206. {
  2207. heapBucket.FinishSweepPrep(recyclerSweep);
  2208. leafHeapBucket.FinishSweepPrep(recyclerSweep);
  2209. #ifdef RECYCLER_WRITE_BARRIER
  2210. smallNormalWithBarrierHeapBucket.FinishSweepPrep(recyclerSweep);
  2211. #endif
  2212. }
  2213. template <class TBlockAttributes>
  2214. void
  2215. HeapBucketGroup<TBlockAttributes>::FinishConcurrentSweep()
  2216. {
  2217. heapBucket.FinishConcurrentSweep();
  2218. leafHeapBucket.FinishConcurrentSweep();
  2219. #ifdef RECYCLER_WRITE_BARRIER
  2220. smallNormalWithBarrierHeapBucket.FinishConcurrentSweep();
  2221. #endif
  2222. }
  2223. #endif
  2224. #if DBG
  2225. template <class TBlockAttributes>
  2226. bool
  2227. HeapBucketGroup<TBlockAttributes>::AllocatorsAreEmpty()
  2228. {
  2229. return heapBucket.AllocatorsAreEmpty()
  2230. && finalizableHeapBucket.AllocatorsAreEmpty()
  2231. #ifdef RECYCLER_VISITED_HOST
  2232. && recyclerVisitedHostHeapBucket.AllocatorsAreEmpty()
  2233. #endif
  2234. && leafHeapBucket.AllocatorsAreEmpty()
  2235. #ifdef RECYCLER_WRITE_BARRIER
  2236. && smallNormalWithBarrierHeapBucket.AllocatorsAreEmpty()
  2237. && smallFinalizableWithBarrierHeapBucket.AllocatorsAreEmpty()
  2238. #endif
  2239. ;
  2240. }
  2241. #endif
  2242. namespace Memory
  2243. {
  2244. template class HeapBucketGroup<SmallAllocationBlockAttributes>;
  2245. template class HeapBucketGroup<MediumAllocationBlockAttributes>;
  2246. EXPLICIT_INSTANTIATE_WITH_SMALL_HEAP_BLOCK_TYPE(HeapBucketT);
  2247. };