LargeHeapBlock.cpp 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. CompileAssert(
  7. sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity ||
  8. sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity * 2);
  9. void *
  10. LargeObjectHeader::GetAddress() { return ((char *)this) + sizeof(LargeObjectHeader); }
  11. #ifdef LARGEHEAPBLOCK_ENCODING
  12. // decodedNext = decoded next field
  13. // decodedAttributes = decoded attributes part of attributesAndChecksum
  14. // Decode 'next' and 'attributes' using _cookie
  15. // If next=B1B2B3B4, checksum = (B1^B2^B3^B4^attributes)
  16. // Encode 'next' and 'attributes' using _cookie
  17. unsigned char
  18. LargeObjectHeader::CalculateCheckSum(LargeObjectHeader* decodedNext, unsigned char decodedAttributes)
  19. {
  20. unsigned char checksum = 0;
  21. byte *nextField = (byte *)&decodedNext;
  22. checksum = nextField[0] ^ nextField[1] ^ nextField[2] ^ nextField[3] ^ decodedAttributes;
  23. return checksum;
  24. }
  25. LargeObjectHeader*
  26. LargeObjectHeader::EncodeNext(uint cookie, LargeObjectHeader* next)
  27. {
  28. return (LargeObjectHeader *)((uintptr_t)next ^ cookie);
  29. }
  30. ushort
  31. LargeObjectHeader::EncodeAttributesAndChecksum(uint cookie, ushort attributesAndChecksum)
  32. {
  33. return attributesAndChecksum ^ (ushort)cookie;
  34. }
  35. LargeObjectHeader*
  36. LargeObjectHeader::DecodeNext(uint cookie, LargeObjectHeader* next) { return EncodeNext(cookie, next); }
  37. ushort
  38. LargeObjectHeader::DecodeAttributesAndChecksum(uint cookie) { return EncodeAttributesAndChecksum(cookie, this->attributesAndChecksum); }
  39. #else
  40. // If heap block encoding is disabled then have an API to expose
  41. // pointer to attributes so that can be passed to RecyclerHeapObjectInfo()
  42. // which updates the attributes field.
  43. unsigned char *
  44. LargeObjectHeader::GetAttributesPtr()
  45. {
  46. return &this->attributes;
  47. }
  48. #endif
  49. void
  50. LargeObjectHeader::SetNext(uint cookie, LargeObjectHeader* next)
  51. {
  52. #ifdef LARGEHEAPBLOCK_ENCODING
  53. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  54. // Calculate the checksum value with new next
  55. unsigned char newCheckSumValue = this->CalculateCheckSum(next, (unsigned char)(decodedAttributesAndChecksum >> 8));
  56. // pack the (attribute + checksum)
  57. ushort newAttributeWithCheckSum = (decodedAttributesAndChecksum & 0xFF00) | newCheckSumValue;
  58. // encode the packed (attribute + checksum), next and set them
  59. this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
  60. this->next = this->EncodeNext(cookie, next);
  61. #else
  62. this->next = next;
  63. #endif
  64. }
  65. LargeObjectHeader *
  66. LargeObjectHeader::GetNext(uint cookie)
  67. {
  68. #ifdef LARGEHEAPBLOCK_ENCODING
  69. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  70. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  71. unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
  72. unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
  73. if (checkSum != calculatedCheckSumField)
  74. {
  75. LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
  76. }
  77. // If checksum matches return the up-to-date next (in case other thread changed it from last time
  78. // we read it in this method.
  79. return this->DecodeNext(cookie, this->next);
  80. #else
  81. return this->next;
  82. #endif
  83. }
  84. void
  85. LargeObjectHeader::SetAttributes(uint cookie, unsigned char attributes)
  86. {
  87. #ifdef LARGEHEAPBLOCK_ENCODING
  88. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  89. // Calculate the checksum value with new attribute
  90. unsigned char newCheckSumValue = this->CalculateCheckSum(decodedNext, attributes);
  91. // pack the (attribute + checksum)
  92. ushort newAttributeWithCheckSum = ((ushort)attributes << 8) | newCheckSumValue;
  93. // encode the packed (attribute + checksum) and set it
  94. this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
  95. #else
  96. this->attributes = attributes;
  97. #endif
  98. }
  99. unsigned char
  100. LargeObjectHeader::GetAttributes(uint cookie)
  101. {
  102. #ifdef LARGEHEAPBLOCK_ENCODING
  103. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  104. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  105. unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
  106. unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
  107. if (checkSum != calculatedCheckSumField)
  108. {
  109. LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
  110. }
  111. // If checksum matches return the up-to-date attributes (in case other thread changed it from last time
  112. // we read it in this method.
  113. return this->DecodeAttributesAndChecksum(cookie) >> 8;
  114. #else
  115. return this->attributes;
  116. #endif
  117. }
  118. size_t
  119. LargeHeapBlock::GetAllocPlusSize(uint objectCount)
  120. {
  121. // Large Heap Block Layout:
  122. // LargeHeapBlock
  123. // LargeObjectHeader * [objectCount]
  124. // TrackerData * [objectCount] (Optional)
  125. size_t allocPlusSize = objectCount * (sizeof(LargeObjectHeader *));
  126. #ifdef PROFILE_RECYCLER_ALLOC
  127. if (Recycler::DoProfileAllocTracker())
  128. {
  129. allocPlusSize += objectCount * sizeof(void *);
  130. }
  131. #endif
  132. return allocPlusSize;
  133. }
  134. LargeHeapBlock *
  135. LargeHeapBlock::New(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
  136. {
  137. return NoMemProtectHeapNewNoThrowPlusZ(GetAllocPlusSize(objectCount), LargeHeapBlock, address, pageCount, segment, objectCount, bucket);
  138. }
  139. void
  140. LargeHeapBlock::Delete(LargeHeapBlock * heapBlock)
  141. {
  142. NoMemProtectHeapDeletePlus(GetAllocPlusSize(heapBlock->objectCount), heapBlock);
  143. }
  144. LargeHeapBlock::LargeHeapBlock(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
  145. : HeapBlock(LargeBlockType), pageCount(pageCount), allocAddressEnd(address), objectCount(objectCount), bucket(bucket), freeList(this)
  146. {
  147. Assert(address != nullptr);
  148. Assert(pageCount != 0);
  149. Assert(objectCount != 0);
  150. Assert(lastCollectAllocCount == 0);
  151. Assert(finalizeCount == 0);
  152. Assert(next == nullptr);
  153. Assert(!hasPartialFreeObjects);
  154. this->address = address;
  155. this->segment = segment;
  156. this->isPendingConcurrentSweep = false;
  157. this->addressEnd = this->address + this->pageCount * AutoSystemInfo::PageSize;
  158. RECYCLER_PERF_COUNTER_INC(LargeHeapBlockCount);
  159. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
  160. }
  161. LargeHeapBlock::~LargeHeapBlock()
  162. {
  163. AssertMsg(this->segment == nullptr || this->heapInfo->recycler->recyclerLargeBlockPageAllocator.IsClosed(),
  164. "ReleasePages needs to be called before delete");
  165. RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockCount);
  166. #ifdef RECYCLER_PAGE_HEAP
  167. if (this->pageHeapAllocStack != nullptr)
  168. {
  169. this->pageHeapAllocStack->Delete(&NoCheckHeapAllocator::Instance);
  170. this->pageHeapAllocStack = nullptr;
  171. }
  172. // REVIEW: This means that the old free stack is lost when we get free the heap block
  173. // Is this okay? Should we delay freeing heap blocks till process/thread shutdown time?
  174. if (this->pageHeapFreeStack != nullptr)
  175. {
  176. this->pageHeapFreeStack->Delete(&NoCheckHeapAllocator::Instance);
  177. this->pageHeapFreeStack = nullptr;
  178. }
  179. #endif
  180. }
  181. Recycler *
  182. LargeHeapBlock::GetRecycler() const
  183. {
  184. return this->bucket->heapInfo->recycler;
  185. }
  186. LargeObjectHeader **
  187. LargeHeapBlock::HeaderList()
  188. {
  189. // See LargeHeapBlock::GetAllocPlusSize for layout description
  190. return (LargeObjectHeader **)(((byte *)this) + sizeof(LargeHeapBlock));
  191. }
  192. void
  193. LargeHeapBlock::FinalizeAllObjects()
  194. {
  195. if (this->finalizeCount != 0)
  196. {
  197. DebugOnly(uint processedCount = 0);
  198. for (uint i = 0; i < allocCount; i++)
  199. {
  200. LargeObjectHeader * header = this->GetHeader(i);
  201. if (header == nullptr || ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == 0))
  202. {
  203. continue;
  204. }
  205. FinalizableObject * finalizableObject = ((FinalizableObject *)header->GetAddress());
  206. finalizableObject->Finalize(true);
  207. finalizableObject->Dispose(true);
  208. #ifdef RECYCLER_FINALIZE_CHECK
  209. this->heapInfo->liveFinalizableObjectCount--;
  210. #endif
  211. DebugOnly(processedCount++);
  212. }
  213. while (pendingDisposeObject != nullptr)
  214. {
  215. LargeObjectHeader * header = pendingDisposeObject;
  216. pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
  217. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  218. Assert(this->HeaderList()[header->objectIndex] == nullptr);
  219. void * objectAddress = header->GetAddress();
  220. ((FinalizableObject *)objectAddress)->Dispose(true);
  221. #ifdef RECYCLER_FINALIZE_CHECK
  222. this->heapInfo->liveFinalizableObjectCount--;
  223. this->heapInfo->pendingDisposableObjectCount--;
  224. #endif
  225. DebugOnly(processedCount++);
  226. }
  227. Assert(this->finalizeCount == processedCount);
  228. }
  229. }
  230. void
  231. LargeHeapBlock::ReleasePagesShutdown(Recycler * recycler)
  232. {
  233. #if DBG
  234. recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
  235. // Don't release the page in shut down, the page allocator will release them faster
  236. Assert(recycler->recyclerLargeBlockPageAllocator.IsClosed());
  237. #endif
  238. }
  239. template void LargeHeapBlock::ReleasePagesSweep<true>(Recycler * recycler);
  240. template void LargeHeapBlock::ReleasePagesSweep<false>(Recycler * recycler);
  241. template<bool pageheap>
  242. void
  243. LargeHeapBlock::ReleasePagesSweep(Recycler * recycler)
  244. {
  245. recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
  246. ReleasePages<pageheap>(recycler);
  247. }
  248. template void LargeHeapBlock::ReleasePages<true>(Recycler * recycler);
  249. template void LargeHeapBlock::ReleasePages<false>(Recycler * recycler);
  250. template<bool pageheap>
  251. void
  252. LargeHeapBlock::ReleasePages(Recycler * recycler)
  253. {
  254. Assert(segment != nullptr);
  255. char* pageAddress = address;
  256. size_t realPageCount = pageCount;
  257. #ifdef RECYCLER_PAGE_HEAP
  258. if (pageheap)
  259. {
  260. if (InPageHeapMode())
  261. {
  262. if (guardPageAddress != nullptr)
  263. {
  264. DWORD noAccess;
  265. if (::VirtualProtect(static_cast<LPVOID>(guardPageAddress), AutoSystemInfo::PageSize, guardPageOldProtectFlags, &noAccess) == FALSE)
  266. {
  267. AssertMsg(false, "Unable to set permission for guard page.");
  268. return;
  269. }
  270. AssertMsg(noAccess == PAGE_NOACCESS, "Guard page should be PAGE_NOACCESS");
  271. if (this->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
  272. {
  273. pageAddress = guardPageAddress;
  274. }
  275. realPageCount = actualPageCount;
  276. }
  277. }
  278. }
  279. #endif
  280. #ifdef RECYCLER_FREE_MEM_FILL
  281. memset(this->address, DbgMemFill, AutoSystemInfo::PageSize * pageCount);
  282. #endif
  283. recycler->recyclerLargeBlockPageAllocator.Release(pageAddress, realPageCount, segment);
  284. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
  285. this->segment = nullptr;
  286. }
  287. BOOL
  288. LargeHeapBlock::IsValidObject(void* objectAddress)
  289. {
  290. LargeObjectHeader * header = GetHeader(objectAddress);
  291. return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->HeaderList()[header->objectIndex] == header);
  292. }
  293. #if DBG
  294. BOOL
  295. LargeHeapBlock::IsFreeObject(void * objectAddress)
  296. {
  297. LargeObjectHeader * header = GetHeader(objectAddress);
  298. return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->GetHeader(header->objectIndex) == nullptr);
  299. }
  300. #endif
  301. size_t
  302. LargeHeapBlock::GetPagesNeeded(size_t size, bool multiplyRequest)
  303. {
  304. if (multiplyRequest)
  305. {
  306. size = AllocSizeMath::Mul(size, 4);
  307. }
  308. uint pageSize = AutoSystemInfo::PageSize;
  309. size = AllocSizeMath::Add(size, sizeof(LargeObjectHeader) + (pageSize - 1));
  310. if (size == (size_t)-1)
  311. {
  312. return 0;
  313. }
  314. size_t pageCount = size / pageSize;
  315. return pageCount;
  316. }
  317. char*
  318. LargeHeapBlock::TryAllocFromFreeList(size_t size, ObjectInfoBits attributes)
  319. {
  320. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  321. LargeHeapBlockFreeListEntry** prev = &this->freeList.entries;
  322. LargeHeapBlockFreeListEntry* freeListEntry = this->freeList.entries;
  323. char* memBlock = nullptr;
  324. // Walk through the free list, find the first entry that can fit our desired size
  325. while (freeListEntry)
  326. {
  327. LargeHeapBlockFreeListEntry* next = freeListEntry->next;
  328. LargeHeapBlock* heapBlock = freeListEntry->heapBlock;
  329. if (freeListEntry->objectSize >= size)
  330. {
  331. memBlock = heapBlock->AllocFreeListEntry(size, attributes, freeListEntry);
  332. if (memBlock)
  333. {
  334. (*prev) = next;
  335. break;
  336. }
  337. }
  338. prev = &freeListEntry->next;
  339. freeListEntry = freeListEntry->next;
  340. }
  341. if (this->freeList.entries == nullptr)
  342. {
  343. this->bucket->UnregisterFreeList(&this->freeList);
  344. }
  345. return memBlock;
  346. }
  347. char*
  348. LargeHeapBlock::AllocFreeListEntry(size_t size, ObjectInfoBits attributes, LargeHeapBlockFreeListEntry* entry)
  349. {
  350. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  351. Assert(HeapInfo::IsAlignedSize(size));
  352. AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
  353. Assert(entry->heapBlock == this);
  354. Assert(entry->headerIndex < this->objectCount);
  355. Assert(this->HeaderList()[entry->headerIndex] == nullptr);
  356. uint headerIndex = entry->headerIndex;
  357. size_t originalSize = entry->objectSize;
  358. LargeObjectHeader * header = (LargeObjectHeader *) entry;
  359. char * allocObject = ((char*) entry) + sizeof(LargeObjectHeader); // shouldn't overflow
  360. char * newAllocAddressEnd = allocObject + size;
  361. char * originalAllocEnd = allocObject + originalSize;
  362. if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject || (originalAllocEnd < newAllocAddressEnd))
  363. {
  364. return nullptr;
  365. }
  366. #ifdef RECYCLER_MEMORY_VERIFY
  367. if (this->heapInfo->recycler->VerifyEnabled())
  368. {
  369. this->heapInfo->recycler->VerifyCheckFill(allocObject , originalSize);
  370. }
  371. #endif
  372. memset(entry, 0, sizeof(LargeObjectHeader) + originalSize);
  373. #ifdef RECYCLER_MEMORY_VERIFY
  374. // If we're in recyclerVerify mode, fill the non-header part of the allocation
  375. // with the verification pattern
  376. if (this->heapInfo->recycler->VerifyEnabled())
  377. {
  378. memset(allocObject, Recycler::VerifyMemFill, originalSize);
  379. }
  380. #endif
  381. #if DBG
  382. LargeAllocationVerboseTrace(this->heapInfo->recycler->GetRecyclerFlagsTable(), L"Allocated object of size 0x%x in from free list entry at address 0x%p\n", size, allocObject);
  383. #endif
  384. Assert(allocCount <= objectCount);
  385. header->objectIndex = headerIndex;
  386. header->objectSize = originalSize;
  387. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & StoredObjectInfoBitMask));
  388. header->markOnOOMRescan = nullptr;
  389. header->SetNext(this->heapInfo->recycler->Cookie, nullptr);
  390. HeaderList()[headerIndex] = header;
  391. finalizeCount += ((attributes & FinalizeBit) != 0);
  392. #ifdef RECYCLER_FINALIZE_CHECK
  393. if (attributes & FinalizeBit)
  394. {
  395. HeapInfo * heapInfo = this->heapInfo;
  396. heapInfo->liveFinalizableObjectCount++;
  397. heapInfo->newFinalizableObjectCount++;
  398. }
  399. #endif
  400. return allocObject;
  401. }
  402. char*
  403. LargeHeapBlock::Alloc(size_t size, ObjectInfoBits attributes)
  404. {
  405. Assert(HeapInfo::IsAlignedSize(size));
  406. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  407. AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
  408. LargeObjectHeader * header = (LargeObjectHeader *)allocAddressEnd;
  409. #if defined(PARTIAL_GC_ENABLED) && defined(CONCURRENT_GC_ENABLED)
  410. Assert(!IsPartialSweptHeader(header));
  411. #endif
  412. char * allocObject = allocAddressEnd + sizeof(LargeObjectHeader); // shouldn't overflow
  413. char * newAllocAddressEnd = allocObject + size;
  414. if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject)
  415. {
  416. return nullptr;
  417. }
  418. Recycler* recycler = this->heapInfo->recycler;
  419. #if DBG
  420. LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), L"Allocated object of size 0x%x in existing heap block at address 0x%p\n", size, allocObject);
  421. #endif
  422. Assert(allocCount < objectCount);
  423. allocAddressEnd = newAllocAddressEnd;
  424. #ifdef RECYCLER_ZERO_MEM_CHECK
  425. recycler->VerifyZeroFill(header, sizeof(LargeObjectHeader));
  426. #endif
  427. #ifdef RECYCLER_MEMORY_VERIFY
  428. if (recycler->VerifyEnabled())
  429. {
  430. memset(header, 0, sizeof(LargeObjectHeader));
  431. }
  432. #endif
  433. header->objectIndex = allocCount;
  434. header->objectSize = size;
  435. header->SetAttributes(recycler->Cookie, (attributes & StoredObjectInfoBitMask));
  436. HeaderList()[allocCount++] = header;
  437. finalizeCount += ((attributes & FinalizeBit) != 0);
  438. #ifdef RECYCLER_FINALIZE_CHECK
  439. if (attributes & FinalizeBit)
  440. {
  441. HeapInfo * heapInfo = this->heapInfo;
  442. heapInfo->liveFinalizableObjectCount++;
  443. heapInfo->newFinalizableObjectCount++;
  444. }
  445. #endif
  446. return allocObject;
  447. }
  448. __declspec(noinline)
  449. void
  450. LargeHeapBlock::Mark(void* objectAddress, MarkContext * markContext)
  451. {
  452. LargeObjectHeader * header = GetHeader(objectAddress);
  453. if ((char *)header < this->address)
  454. {
  455. return;
  456. }
  457. uint index = header->objectIndex;
  458. if (index >= this->allocCount)
  459. {
  460. // Not allocated yet.
  461. return;
  462. }
  463. if (this->HeaderList()[index] != header)
  464. {
  465. // header doesn't match, not a real object
  466. return;
  467. }
  468. DUMP_OBJECT_REFERENCE(markContext->GetRecycler(), objectAddress);
  469. if (!UpdateAttributesOfMarkedObjects(markContext, objectAddress, header->objectSize, header->GetAttributes(this->heapInfo->recycler->Cookie),
  470. [&](unsigned char attributes) { header->SetAttributes(this->heapInfo->recycler->Cookie, attributes); }))
  471. {
  472. // Couldn't mark children- bail out and come back later
  473. this->SetNeedOOMRescan(markContext->GetRecycler());
  474. // Single page large heap block rescan all marked object on oom rescan
  475. if (this->GetPageCount() != 1)
  476. {
  477. // Failed to mark the objects referenced by this object, so we'll
  478. // revisit this on rescan
  479. header->markOnOOMRescan = true;
  480. }
  481. }
  482. }
  483. bool
  484. LargeHeapBlock::TestObjectMarkedBit(void* objectAddress)
  485. {
  486. Assert(IsValidObject(objectAddress));
  487. LargeObjectHeader* pHeader = nullptr;
  488. if (GetObjectHeader(objectAddress, &pHeader))
  489. {
  490. Recycler* recycler = this->heapInfo->recycler;
  491. return recycler->heapBlockMap.IsMarked(objectAddress);
  492. }
  493. return FALSE;
  494. }
  495. void
  496. LargeHeapBlock::SetObjectMarkedBit(void* objectAddress)
  497. {
  498. Assert(IsValidObject(objectAddress));
  499. LargeObjectHeader* pHeader = nullptr;
  500. if (GetObjectHeader(objectAddress, &pHeader))
  501. {
  502. Recycler* recycler = this->heapInfo->recycler;
  503. recycler->heapBlockMap.SetMark(objectAddress);
  504. }
  505. }
  506. bool
  507. LargeHeapBlock::FindImplicitRootObject(void* objectAddress, Recycler * recycler, RecyclerHeapObjectInfo& heapObject)
  508. {
  509. if (!IsValidObject(objectAddress))
  510. {
  511. return false;
  512. }
  513. LargeObjectHeader* pHeader = nullptr;
  514. if (!GetObjectHeader(objectAddress, &pHeader))
  515. {
  516. return false;
  517. }
  518. #ifdef LARGEHEAPBLOCK_ENCODING
  519. heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, nullptr);
  520. heapObject.SetLargeHeapBlockHeader(pHeader);
  521. #else
  522. heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, pHeader->GetAttributesPtr());
  523. #endif
  524. return true;
  525. }
  526. bool
  527. LargeHeapBlock::FindHeapObject(void* objectAddress, Recycler * recycler, FindHeapObjectFlags, RecyclerHeapObjectInfo& heapObject)
  528. {
  529. // Currently the same actual implementation (flags is ignored)
  530. return FindImplicitRootObject(objectAddress, recycler, heapObject);
  531. }
  532. bool
  533. LargeHeapBlock::GetObjectHeader(void* objectAddress, LargeObjectHeader** ppHeader)
  534. {
  535. (*ppHeader) = nullptr;
  536. LargeObjectHeader * header = GetHeader(objectAddress);
  537. if ((char *)header < this->address)
  538. {
  539. return false;
  540. }
  541. uint index = header->objectIndex;
  542. if (this->HeaderList()[index] != header)
  543. {
  544. // header doesn't match, not a real object
  545. return false;
  546. }
  547. Assert(index < this->allocCount);
  548. (*ppHeader) = header;
  549. return true;
  550. }
  551. void
  552. LargeHeapBlock::ResetMarks(ResetMarkFlags flags, Recycler* recycler)
  553. {
  554. Assert(!this->needOOMRescan);
  555. // Update the lastCollectAllocCount for sweep
  556. this->lastCollectAllocCount = this->allocCount;
  557. Assert(this->GetMarkCount() == 0);
  558. Assert(!this->isPendingConcurrentSweep);
  559. if (flags & ResetMarkFlags_ScanImplicitRoot)
  560. {
  561. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  562. {
  563. // object is allocated during the concurrent mark or it is marked, do rescan
  564. LargeObjectHeader * header = this->GetHeader(objectIndex);
  565. // check if the object index is not allocated
  566. if (header == nullptr)
  567. {
  568. continue;
  569. }
  570. // check whether the object is a leaf and doesn't need to be scanned
  571. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) != 0)
  572. {
  573. recycler->heapBlockMap.SetMark(header->GetAddress());
  574. }
  575. }
  576. }
  577. }
  578. LargeObjectHeader *
  579. LargeHeapBlock::GetHeader(void * objectAddress)
  580. {
  581. Assert(objectAddress >= this->address && objectAddress < this->addressEnd);
  582. return GetHeaderFromAddress(objectAddress);
  583. }
  584. LargeObjectHeader *
  585. LargeHeapBlock::GetHeaderFromAddress(void * objectAddress)
  586. {
  587. return (LargeObjectHeader*)(((char *)objectAddress) - sizeof(LargeObjectHeader));
  588. }
  589. byte *
  590. LargeHeapBlock::GetRealAddressFromInterior(void * interiorAddress)
  591. {
  592. for (uint i = 0; i < allocCount; i++)
  593. {
  594. LargeObjectHeader * header = this->HeaderList()[i];
  595. if (header != nullptr && !IsPartialSweptHeader(header))
  596. {
  597. Assert(header->objectIndex == i);
  598. byte * startAddress = (byte *)header->GetAddress();
  599. if (startAddress <= interiorAddress && (startAddress + header->objectSize > interiorAddress))
  600. {
  601. return startAddress;
  602. }
  603. }
  604. }
  605. return nullptr;
  606. }
  607. #ifdef RECYCLER_VERIFY_MARK
  608. void
  609. LargeHeapBlock::VerifyMark()
  610. {
  611. Assert(!this->needOOMRescan);
  612. Recycler* recycler = this->heapInfo->recycler;
  613. for (uint i = 0; i < allocCount; i++)
  614. {
  615. LargeObjectHeader * header = this->GetHeader(i);
  616. if (header == nullptr)
  617. {
  618. continue;
  619. }
  620. char * objectAddress = (char *)header->GetAddress();
  621. if (!recycler->heapBlockMap.IsMarked(objectAddress))
  622. {
  623. continue;
  624. }
  625. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  626. Assert((attributes & NewFinalizeBit) == 0);
  627. if ((attributes & LeafBit) != 0)
  628. {
  629. continue;
  630. }
  631. Assert(!header->markOnOOMRescan);
  632. char * objectAddressEnd = objectAddress + header->objectSize;
  633. while (objectAddress + sizeof(void *) <= objectAddressEnd)
  634. {
  635. void* target = *(void **)objectAddress;
  636. recycler->VerifyMark(target);
  637. objectAddress += sizeof(void *);
  638. }
  639. }
  640. }
  641. void
  642. LargeHeapBlock::VerifyMark(void * objectAddress)
  643. {
  644. LargeObjectHeader * header = GetHeader(objectAddress);
  645. if ((char *)header < this->address)
  646. {
  647. return;
  648. }
  649. uint index = header->objectIndex;
  650. if (index >= this->allocCount)
  651. {
  652. // object not allocated
  653. return;
  654. }
  655. if (this->HeaderList()[index] != header)
  656. {
  657. // header doesn't match, not a real object
  658. return;
  659. }
  660. bool isMarked = this->heapInfo->recycler->heapBlockMap.IsMarked(objectAddress);
  661. #if DBG
  662. Assert(isMarked);
  663. #else
  664. if (!isMarked)
  665. {
  666. DebugBreak();
  667. }
  668. #endif
  669. }
  670. #endif
  671. void
  672. LargeHeapBlock::ScanInitialImplicitRoots(Recycler * recycler)
  673. {
  674. Assert(recycler->enableScanImplicitRoots);
  675. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  676. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  677. {
  678. // object is allocated during the concurrent mark or it is marked, do rescan
  679. LargeObjectHeader * header = this->GetHeader(objectIndex);
  680. // check if the object index is not allocated
  681. if (header == nullptr)
  682. {
  683. continue;
  684. }
  685. // check whether the object is a leaf and doesn't need to be scanned
  686. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
  687. {
  688. continue;
  689. }
  690. char * objectAddress = (char *)header->GetAddress();
  691. // it is not marked, don't scan implicit root
  692. if (!heapBlockMap.IsMarked(objectAddress))
  693. {
  694. continue;
  695. }
  696. // TODO: Assume scan interior?
  697. DUMP_IMPLICIT_ROOT(recycler, objectAddress);
  698. recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
  699. }
  700. }
  701. void
  702. LargeHeapBlock::ScanNewImplicitRoots(Recycler * recycler)
  703. {
  704. Assert(recycler->enableScanImplicitRoots);
  705. uint objectIndex = 0;
  706. HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  707. while (objectIndex < allocCount)
  708. {
  709. // object is allocated during the concurrent mark or it is marked, do rescan
  710. LargeObjectHeader * header = this->GetHeader(objectIndex);
  711. objectIndex++;
  712. // check if the object index is not allocated
  713. if (header == nullptr)
  714. {
  715. continue;
  716. }
  717. // check whether the object is an implicit root
  718. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) == 0)
  719. {
  720. continue;
  721. }
  722. char * objectAddress = (char *)header->GetAddress();
  723. bool marked = heapBlockMap.TestAndSetMark(objectAddress);
  724. if (!marked)
  725. {
  726. DUMP_IMPLICIT_ROOT(recycler, objectAddress);
  727. // check whether the object is a leaf and doesn't need to be scanned
  728. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
  729. {
  730. continue;
  731. }
  732. // TODO: Assume scan interior
  733. recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
  734. }
  735. }
  736. }
  737. #if defined(PARTIAL_GC_ENABLED) || defined(CONCURRENT_GC_ENABLED)
  738. bool
  739. LargeHeapBlock::RescanOnePage(Recycler * recycler, DWORD const writeWatchFlags)
  740. {
  741. Assert(this->GetPageCount() == 1);
  742. bool const oldNeedOOMRescan = this->needOOMRescan;
  743. // Reset this, we'll increment this if we OOM again
  744. this->needOOMRescan = false;
  745. // don't need to get the write watch bit if we already need to oom rescan
  746. if (!oldNeedOOMRescan)
  747. {
  748. if (recycler->inEndMarkOnLowMemory)
  749. {
  750. // we only do oom rescan if we are on low memory mark
  751. return false;
  752. }
  753. // Check the write watch bit to see if we need to rescan
  754. ULONG_PTR count = 1;
  755. DWORD pageSize = AutoSystemInfo::PageSize;
  756. void * written;
  757. if (GetWriteWatch(writeWatchFlags, this->GetBeginAddress(), AutoSystemInfo::PageSize, &written, &count, &pageSize) == 0 && (count != 1))
  758. {
  759. return false;
  760. }
  761. }
  762. RECYCLER_STATS_INC(recycler, markData.rescanLargePageCount);
  763. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  764. {
  765. // object is allocated during the concurrent mark or it is marked, do rescan
  766. LargeObjectHeader * header = this->GetHeader(objectIndex);
  767. // check if the object index is not allocated
  768. if (header == nullptr)
  769. {
  770. continue;
  771. }
  772. char * objectAddress = (char *)header->GetAddress();
  773. // it is not marked, don't rescan
  774. if (!recycler->heapBlockMap.IsMarked(objectAddress))
  775. {
  776. continue;
  777. }
  778. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  779. #ifdef RECYCLER_STATS
  780. if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
  781. {
  782. // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
  783. // As such, our finalizeCount is not correct. Update it now.
  784. RECYCLER_STATS_INC(recycler, finalizeCount);
  785. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
  786. }
  787. #endif
  788. // check whether the object is a leaf and doesn't need to be scanned
  789. if ((attributes & LeafBit) != 0)
  790. {
  791. continue;
  792. }
  793. RECYCLER_STATS_INC(recycler, markData.rescanLargeObjectCount);
  794. RECYCLER_STATS_ADD(recycler, markData.rescanLargeByteCount, header->objectSize);
  795. if (!recycler->AddMark(objectAddress, header->objectSize))
  796. {
  797. this->SetNeedOOMRescan(recycler);
  798. }
  799. }
  800. return true;
  801. }
  802. size_t
  803. LargeHeapBlock::Rescan(Recycler * recycler, bool isPartialSwept, RescanFlags flags)
  804. {
  805. // Update the lastCollectAllocCount for sweep
  806. this->lastCollectAllocCount = this->allocCount;
  807. Assert(recycler->collectionState != CollectionStateConcurrentFinishMark || (flags & RescanFlags_ResetWriteWatch));
  808. DWORD const writeWatchFlags = (flags & RescanFlags_ResetWriteWatch? WRITE_WATCH_FLAG_RESET : 0);
  809. if (this->GetPageCount() == 1)
  810. {
  811. return RescanOnePage(recycler, writeWatchFlags);
  812. }
  813. // Need to rescan for finish mark even if it is done on the background thread
  814. if (recycler->collectionState != CollectionStateConcurrentFinishMark && recycler->IsConcurrentMarkState())
  815. {
  816. // CONCURRENT-TODO: Don't do background rescan for pages with multiple pages because
  817. // we don't track which page we have queued up
  818. return 0;
  819. }
  820. return RescanMultiPage(recycler, writeWatchFlags);
  821. }
  822. size_t
  823. LargeHeapBlock::RescanMultiPage(Recycler * recycler, DWORD const writeWatchFlags)
  824. {
  825. Assert(this->GetPageCount() != 1);
  826. DebugOnly(bool oldNeedOOMRescan = this->needOOMRescan);
  827. // Reset this, we'll increment this if we OOM again
  828. this->needOOMRescan = false;
  829. size_t rescanCount = 0;
  830. DWORD pageSize = AutoSystemInfo::PageSize;
  831. uint objectIndex = 0;
  832. char * lastPageCheckedForWriteWatch = nullptr;
  833. bool isLastPageCheckedForWriteWatchDirty = false;
  834. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  835. while (objectIndex < allocCount)
  836. {
  837. // object is allocated during the concurrent mark or it is marked, do rescan
  838. LargeObjectHeader * header = this->GetHeader(objectIndex);
  839. objectIndex++;
  840. // check if the object index is not allocated
  841. if (header == nullptr)
  842. {
  843. continue;
  844. }
  845. char * objectAddress = (char *)header->GetAddress();
  846. // it is not marked, don't rescan
  847. if (!heapBlockMap.IsMarked(objectAddress))
  848. {
  849. continue;
  850. }
  851. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  852. #ifdef RECYCLER_STATS
  853. if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
  854. {
  855. // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
  856. // As such, our finalizeCount is not correct. Update it now.
  857. RECYCLER_STATS_INC(recycler, finalizeCount);
  858. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
  859. }
  860. #endif
  861. // check whether the object is a leaf and doesn't need to be scanned
  862. if ((attributes & LeafBit) != 0)
  863. {
  864. continue;
  865. }
  866. #ifdef RECYCLER_STATS
  867. bool objectScanned = false;
  868. #endif
  869. Assert(oldNeedOOMRescan || !header->markOnOOMRescan);
  870. // Avoid writing to the page unnecessary by checking first
  871. if (header->markOnOOMRescan)
  872. {
  873. if (!recycler->AddMark(objectAddress, header->objectSize))
  874. {
  875. this->SetNeedOOMRescan(recycler);
  876. header->markOnOOMRescan = true;
  877. // We need to bail out of rescan early only if the recycler is
  878. // trying to finish marking because of low memory. If this is
  879. // a regular rescan, we want to try and rescan all the objects
  880. // on the page. It's possible that the rescan OOMs but if the
  881. // object rescan does OOM, we'll set the right bit on the
  882. // object header. When we later rescan it in a low memory
  883. // situation, when the bit is set, we don't need to check for
  884. // write-watch etc. since we'd have already done that before
  885. // setting the bit in the non-low-memory rescan case.
  886. if (!recycler->inEndMarkOnLowMemory)
  887. {
  888. continue;
  889. }
  890. return rescanCount;
  891. }
  892. header->markOnOOMRescan = false;
  893. #ifdef RECYCLER_STATS
  894. objectScanned = true;
  895. #endif
  896. }
  897. else if (!recycler->inEndMarkOnLowMemory)
  898. {
  899. char * objectAddressEnd = objectAddress + header->objectSize;
  900. // Walk through the object, checking if any of its pages have been written to
  901. // If it has, then queue up this object for marking
  902. do
  903. {
  904. char * pageStart = (char *)(((size_t)objectAddress) & ~(size_t)(AutoSystemInfo::PageSize - 1));
  905. /*
  906. * The rescan logic for large object is as follows:
  907. * - We rescan the object if it was marked during concurrent mark
  908. * - If it was marked, since the large object has multiple pages, we'll rescan only the parts that were changed
  909. * - So for each page in the large object, check if it's been written to, and if it hasn't, skip looking at that region
  910. * - If we can't get the write watch, rescan that region
  911. * - However, this logic applies only if we're not rescanning because of an OOM
  912. * - If we are rescanning this object because of OOM (i.e !rescanBecauseOfOOM = false), rescan the whole object
  913. *
  914. * We cache the result of the write watch and the page that it was checked on so that we don't call GetWriteWatch on the same
  915. * page twice and inadvertently reset the write watch on a page where we've already scanned an object
  916. */
  917. if (lastPageCheckedForWriteWatch != pageStart)
  918. {
  919. void * written = nullptr;
  920. ULONG_PTR count = 1;
  921. lastPageCheckedForWriteWatch = pageStart;
  922. isLastPageCheckedForWriteWatchDirty = true;
  923. if (GetWriteWatch(writeWatchFlags, pageStart, AutoSystemInfo::PageSize, &written, &count, &pageSize) == 0 && (count != 1))
  924. {
  925. // Fall through to the case below where we'll update objectAddress and continue
  926. isLastPageCheckedForWriteWatchDirty = false;
  927. }
  928. }
  929. if (!isLastPageCheckedForWriteWatchDirty)
  930. {
  931. objectAddress = pageStart + AutoSystemInfo::PageSize;
  932. continue;
  933. }
  934. // We're interested in only rescanning the parts of the object that have changed, not the whole
  935. // object. So just queue that up for marking
  936. char * checkEnd = min(pageStart + AutoSystemInfo::PageSize, objectAddressEnd);
  937. if (!recycler->AddMark(objectAddress, (checkEnd - objectAddress)))
  938. {
  939. this->SetNeedOOMRescan(recycler);
  940. header->markOnOOMRescan = true;
  941. }
  942. #ifdef RECYCLER_STATS
  943. objectScanned = true;
  944. recycler->collectionStats.markData.rescanLargePageCount++;
  945. recycler->collectionStats.markData.rescanLargeByteCount += (checkEnd - objectAddress);
  946. #endif
  947. objectAddress = checkEnd;
  948. rescanCount++;
  949. }
  950. while (objectAddress < objectAddressEnd);
  951. }
  952. RECYCLER_STATS_ADD(recycler, markData.rescanLargeObjectCount, objectScanned);
  953. }
  954. return rescanCount;
  955. }
  956. #endif
  957. /*
  958. * Sweep the large heap block
  959. *
  960. * If there are no finalizable or weak referenced objects, and if nothing is marked
  961. * that means that everything in this heap block is considered free. So the heap block
  962. * can be released.
  963. * In that case, return SweepStateEmpty
  964. * If there are objects to be freed, first see if they are any finalizable objects. If there
  965. * aren't any in this heap block, then this heap block can be swept concurrently. So return SweepStatePendingSweep
  966. * If there are finalizable objects, sweep them in thread. They would have been added to the pendingDispose list
  967. * during the finalize phase, so we return SweepStatePendingDispose.
  968. * In any case, if the pendingDispose list is not empty, we return SweepStatePendingDispose.
  969. * If the allocCount equals the max object count, or if there's no more space to allocate a large object,
  970. * we return SweepStateFull, so that the HeapInfo can move this to the full block list. Otherwise,
  971. * we return SweepStateSwept.
  972. */
  973. template SweepState LargeHeapBlock::Sweep<true>(RecyclerSweep& recyclerSweep, bool queuePendingSweep);
  974. template SweepState LargeHeapBlock::Sweep<false>(RecyclerSweep& recyclerSweep, bool queuePendingSweep);
  975. template<bool pageheap>
  976. SweepState
  977. LargeHeapBlock::Sweep(RecyclerSweep& recyclerSweep, bool queuePendingSweep)
  978. {
  979. Recycler * recycler = recyclerSweep.GetRecycler();
  980. uint markCount = GetMarkCount();
  981. #if DBG
  982. Assert(this->lastCollectAllocCount == this->allocCount);
  983. Assert(markCount <= allocCount);
  984. #endif
  985. RECYCLER_STATS_INC(recycler, heapBlockCount[HeapBlock::LargeBlockType]);
  986. #if DBG
  987. this->expectedSweepCount = allocCount - markCount;
  988. #endif
  989. Assert(!this->isPendingConcurrentSweep);
  990. bool isAllFreed = (finalizeCount == 0 && markCount == 0);
  991. if (isAllFreed)
  992. {
  993. recycler->NotifyFree<pageheap>(this);
  994. Assert(this->pendingDisposeObject == nullptr);
  995. return SweepStateEmpty;
  996. }
  997. RECYCLER_STATS_ADD(recycler, largeHeapBlockTotalByteCount, this->pageCount * AutoSystemInfo::PageSize);
  998. RECYCLER_STATS_ADD(recycler, heapBlockFreeByteCount[HeapBlock::LargeBlockType],
  999. addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize? 0 : (size_t)(addressEnd - allocAddressEnd));
  1000. // If the number of objects marked is not equal to the number of objects
  1001. // that have been allocated by this large heap block, that means that there
  1002. // could be some objects that need to be swept
  1003. if (markCount != allocCount)
  1004. {
  1005. Assert(this->expectedSweepCount != 0);
  1006. // We need to sweep in thread if there are any finalizable objects so
  1007. // that the PrepareFinalize() can be called before concurrent sweep
  1008. // and other finalizers. This gives the object an opportunity before any
  1009. // other script can be ran to clean up its references/states that are not
  1010. // valid since we've determined that the object is not live any more.
  1011. //
  1012. // An example is the ITrackable's tracking alias. The reference to the alias
  1013. // object needs to be clear so that the reference will not be given out again
  1014. // in other script during concurrent sweep or finalizer called before.
  1015. Assert(!recyclerSweep.IsBackground());
  1016. #ifdef CONCURRENT_GC_ENABLED
  1017. if (queuePendingSweep && finalizeCount == 0)
  1018. {
  1019. this->isPendingConcurrentSweep = true;
  1020. return SweepStatePendingSweep;
  1021. }
  1022. #else
  1023. Assert(!queuePendingSweep);
  1024. #endif
  1025. SweepObjects<pageheap, SweepMode_InThread>(recycler);
  1026. if (TransferSweptObjects())
  1027. {
  1028. return SweepStatePendingDispose;
  1029. }
  1030. }
  1031. #ifdef RECYCLER_STATS
  1032. else
  1033. {
  1034. Assert(expectedSweepCount == 0);
  1035. isForceSweeping = true;
  1036. SweepObjects<pageheap, SweepMode_InThread>(recycler);
  1037. isForceSweeping = false;
  1038. }
  1039. #endif
  1040. if (this->pendingDisposeObject != nullptr)
  1041. {
  1042. return SweepStatePendingDispose;
  1043. }
  1044. return (allocCount == objectCount || addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize) && this->freeList.entries == nullptr ?
  1045. SweepStateFull : SweepStateSwept;
  1046. }
  1047. bool
  1048. LargeHeapBlock::TrimObject(Recycler* recycler, LargeObjectHeader* header, size_t sizeOfObject, bool inDispose)
  1049. {
  1050. IdleDecommitPageAllocator* pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
  1051. uint pageSize = AutoSystemInfo::PageSize ;
  1052. // If we have to trim an object, either we need to have more than one object in the
  1053. // heap block or we're being called as a part of force-sweep or dispose
  1054. Assert(this->allocCount > 1 || this->isForceSweeping || inDispose);
  1055. // If we have more than 1 page of bytes to free
  1056. // make sure that the number of bytes doesn't exceed the cap for a PageSegment
  1057. // since this optimization can only be applied to heap blocks using page segments.
  1058. // We also skip this optimization if the allocCount is 1 since that means
  1059. // the heap block is empty and we've been called only because we're force sweeping.
  1060. // So, skip the opt since we're going to be marking the heap block as empty soon
  1061. if (sizeOfObject > pageSize &&
  1062. this->segment->GetPageCount() <= pageAllocator->GetMaxAllocPageCount() &&
  1063. this->allocCount > 1)
  1064. {
  1065. Assert(!this->hadTrimmed);
  1066. // We want to decommit the free pages beyond 4K (the page size)
  1067. // The way large allocations work is that at most we can have 4 objects in a large heap block
  1068. // The first object can span multiple pages, the remaining 3 objects must all fit within a page
  1069. // So if the object being freed is greater than 1 page, then it must be the first object
  1070. // The objectIndex must be 0 and the header must be same as this->address
  1071. // The end address is (baseAddress + objectSize) & ~(4k - 1)
  1072. // The number of pages to free is (freePageEnd - freePageStart) / pageSize
  1073. char* objectAddress = (char*) header;
  1074. char* objectEndAddress = objectAddress + sizeof(LargeObjectHeader) + header->objectSize;
  1075. uintptr_t alignmentMask = ~((uintptr_t) (AutoSystemInfo::PageSize - 1));
  1076. uintptr_t objectFreeAddress = (uintptr_t) objectAddress;
  1077. uintptr_t objectFreeEndAddress = ((uintptr_t) objectEndAddress) & alignmentMask;
  1078. size_t bytesToFree = (objectFreeEndAddress - objectFreeAddress);
  1079. // Verify assumptions
  1080. // Make sure that the object being freed is the first object since
  1081. // the expectation in a large heap block is that the first object is the largest
  1082. // object.
  1083. // The amount of bytes to free is always less than the size of the object being freed including its header
  1084. // The exception is if the original object's size + header size is a multiple of the page size
  1085. Assert(objectAddress == this->address);
  1086. Assert(header->objectIndex == 0);
  1087. Assert(objectFreeEndAddress <= (uintptr_t) objectEndAddress);
  1088. Assert(objectFreeAddress <= objectFreeEndAddress);
  1089. Assert(bytesToFree < sizeOfObject + sizeof(LargeObjectHeader) || (uintptr_t) objectEndAddress == objectFreeEndAddress);
  1090. // If we actually have something to free, release those pages
  1091. // Move the heap block to start from the new start address
  1092. // Change the heap block map to contain an entry for only the pages that haven't been freed
  1093. // Fill up the old object's unreleased memory if we have to
  1094. Assert(bytesToFree > 0);
  1095. Assert((bytesToFree & (AutoSystemInfo::PageSize - 1)) == 0);
  1096. size_t freePageCount = bytesToFree / AutoSystemInfo::PageSize;
  1097. Assert(freePageCount > 0);
  1098. Assert(freePageCount < this->pageCount);
  1099. // If this call to trim needs idle decommit to be suspended (e.g. dispose case)
  1100. // check if IdleDecommit has been suspended already. If it hasn't, suspend it
  1101. // This is to prevent reentrant idle decommits (e.g. sometimes dispose is called with
  1102. if (inDispose)
  1103. {
  1104. pageAllocator->SuspendIdleDecommit();
  1105. }
  1106. pageAllocator->Release((char*) objectFreeAddress, freePageCount, this->GetSegment());
  1107. if (inDispose)
  1108. {
  1109. pageAllocator->ResumeIdleDecommit();
  1110. }
  1111. // Remove the freed pages from the heap block map
  1112. // and move the heap block to start from after the pages that were freed
  1113. // and update the page count
  1114. recycler->heapBlockMap.ClearHeapBlock(this->address, freePageCount);
  1115. this->address = (char*) objectFreeEndAddress;
  1116. this->pageCount -= freePageCount;
  1117. FillFreeMemory(recycler, (void*) objectFreeEndAddress, (size_t) (objectEndAddress - objectFreeEndAddress));
  1118. #if DBG
  1119. this->hadTrimmed = true;
  1120. #endif
  1121. return true;
  1122. }
  1123. return false;
  1124. }
  1125. template <>
  1126. void
  1127. LargeHeapBlock::SweepObject<SweepMode_InThread>(Recycler * recycler, LargeObjectHeader * header)
  1128. {
  1129. Assert(this->HeaderList()[header->objectIndex] == header);
  1130. // Set the header and object to null only if this is not a finalizable object
  1131. // If it's finalizable, it'll be zeroed out during dispose
  1132. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != FinalizeBit)
  1133. {
  1134. this->HeaderList()[header->objectIndex] = nullptr;
  1135. size_t sizeOfObject = header->objectSize;
  1136. bool objectTrimmed = false;
  1137. if (this->bucket == nullptr)
  1138. {
  1139. objectTrimmed = TrimObject(recycler, header, sizeOfObject);
  1140. }
  1141. if (!objectTrimmed)
  1142. {
  1143. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + sizeOfObject);
  1144. }
  1145. }
  1146. }
  1147. //
  1148. // Call the finalizer on the heapblock object and add it to the pending dispose list
  1149. //
  1150. void
  1151. LargeHeapBlock::FinalizeObject(Recycler* recycler, LargeObjectHeader* header)
  1152. {
  1153. // The header count can also be null if this object has already been finalized
  1154. // but this method should never be called if the header list header is null
  1155. Assert(this->HeaderList()[header->objectIndex] == header);
  1156. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  1157. // Call finalize to do clean up that needs to be done immediately
  1158. // (e.g. Clear the ITrackable alias reference, so it can't be revived during
  1159. // other finalizers or concurrent sweep)
  1160. // Call it only if it hasn't already been finalized
  1161. ((FinalizableObject *)header->GetAddress())->Finalize(false);
  1162. header->SetNext(this->heapInfo->recycler->Cookie, this->pendingDisposeObject);
  1163. this->pendingDisposeObject = header;
  1164. // Null out the header in the header list- this means that this object has already
  1165. // been finalized and is just pending dispose
  1166. this->HeaderList()[header->objectIndex] = nullptr;
  1167. #ifdef RECYCLER_FINALIZE_CHECK
  1168. recycler->autoHeap.pendingDisposableObjectCount++;
  1169. #endif
  1170. }
  1171. // Explicitly instantiate all the sweep modes
  1172. template void LargeHeapBlock::SweepObjects<false, SweepMode_InThread>(Recycler * recycler);
  1173. template void LargeHeapBlock::SweepObjects<true, SweepMode_InThread>(Recycler * recycler);
  1174. #ifdef CONCURRENT_GC_ENABLED
  1175. template <>
  1176. void
  1177. LargeHeapBlock::SweepObject<SweepMode_Concurrent>(Recycler * recycler, LargeObjectHeader * header)
  1178. {
  1179. Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
  1180. Assert(this->HeaderList()[header->objectIndex] == header);
  1181. this->HeaderList()[header->objectIndex] = nullptr;
  1182. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1183. }
  1184. // Explicitly instantiate all the sweep modes
  1185. template void LargeHeapBlock::SweepObjects<false, SweepMode_Concurrent>(Recycler * recycler);
  1186. #ifdef PARTIAL_GC_ENABLED
  1187. template <>
  1188. void
  1189. LargeHeapBlock::SweepObject<SweepMode_ConcurrentPartial>(Recycler * recycler, LargeObjectHeader * header)
  1190. {
  1191. Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
  1192. Assert(this->HeaderList()[header->objectIndex] == header);
  1193. this->HeaderList()[header->objectIndex] = (LargeObjectHeader *)((size_t)header | PartialFreeBit);
  1194. DebugOnly(this->hasPartialFreeObjects = true);
  1195. }
  1196. // Explicitly instantiate all the sweep modes
  1197. template void LargeHeapBlock::SweepObjects<false, SweepMode_ConcurrentPartial>(Recycler * recycler);
  1198. #endif
  1199. #endif
  1200. //
  1201. // Walk through the objects in this heap block and call finalize
  1202. // on them if they're not marked and finalizable.
  1203. //
  1204. // At the end of this phase, if there were any finalizable objects,
  1205. // they would be in the pendingDisposeObject list. When we later call
  1206. // sweep on this heapblock, we'd simply null out the header and zero out the memory
  1207. // and then Sweep would return PendingDispose as its state
  1208. //
  1209. void LargeHeapBlock::FinalizeObjects(Recycler* recycler)
  1210. {
  1211. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  1212. for (uint i = 0; i < this->lastCollectAllocCount; i++)
  1213. {
  1214. LargeObjectHeader * header = this->GetHeader(i);
  1215. if (header == nullptr)
  1216. {
  1217. continue;
  1218. }
  1219. Assert(header->objectIndex == i);
  1220. // Skip finalization if the object is alive
  1221. if (heapBlockMap.IsMarked(header->GetAddress()))
  1222. {
  1223. continue;
  1224. }
  1225. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == FinalizeBit)
  1226. {
  1227. recycler->NotifyFree((char *)header->GetAddress(), header->objectSize);
  1228. FinalizeObject(recycler, header);
  1229. }
  1230. }
  1231. }
  1232. template <bool pageheap, SweepMode mode>
  1233. void
  1234. LargeHeapBlock::SweepObjects(Recycler * recycler)
  1235. {
  1236. Assert(mode == SweepMode_InThread || this->isPendingConcurrentSweep);
  1237. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  1238. #if DBG
  1239. uint markCount = GetMarkCount();
  1240. // mark count included newly allocated objects
  1241. #ifdef CONCURRENT_GC_ENABLED
  1242. Assert(expectedSweepCount == allocCount - markCount || recycler->collectionState == CollectionStateConcurrentSweep);
  1243. #else
  1244. Assert(expectedSweepCount == allocCount - markCount);
  1245. #endif
  1246. Assert(expectedSweepCount != 0 || isForceSweeping);
  1247. uint sweepCount = 0;
  1248. #endif
  1249. for (uint i = 0; i < lastCollectAllocCount; i++)
  1250. {
  1251. RECYCLER_STATS_ADD(recycler, objectSweepScanCount, !isForceSweeping);
  1252. LargeObjectHeader * header = this->GetHeader(i);
  1253. if (header == nullptr)
  1254. {
  1255. #if DBG
  1256. Assert(expectedSweepCount != 0);
  1257. expectedSweepCount--;
  1258. #endif
  1259. #if DBG
  1260. LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), L"Index %d empty\n", i);
  1261. #endif
  1262. continue;
  1263. }
  1264. Assert(header->objectIndex == i);
  1265. // Skip sweep if the object is alive
  1266. if (heapBlockMap.IsMarked(header->GetAddress()))
  1267. {
  1268. #if DBG
  1269. Assert((header->GetAttributes(recycler->Cookie) & NewFinalizeBit) == 0);
  1270. #endif
  1271. RECYCLER_STATS_ADD(recycler, largeHeapBlockUsedByteCount, this->GetHeader(i)->objectSize);
  1272. continue;
  1273. }
  1274. size_t objectSize = header->objectSize;
  1275. recycler->NotifyFree((char *)header->GetAddress(), objectSize);
  1276. SweepObject<mode>(recycler, header);
  1277. if (this->bucket != nullptr
  1278. #ifdef RECYCLER_STATS
  1279. && !isForceSweeping
  1280. #endif
  1281. )
  1282. {
  1283. LargeHeapBlockFreeListEntry* head = this->freeList.entries;
  1284. LargeHeapBlockFreeListEntry* entry = (LargeHeapBlockFreeListEntry*) header;
  1285. entry->headerIndex = i;
  1286. entry->heapBlock = this;
  1287. entry->next = head;
  1288. entry->objectSize = objectSize;
  1289. this->freeList.entries = entry;
  1290. }
  1291. #if DBG
  1292. sweepCount++;
  1293. #endif
  1294. }
  1295. Assert(sweepCount == expectedSweepCount);
  1296. this->isPendingConcurrentSweep = false;
  1297. }
  1298. bool
  1299. LargeHeapBlock::TransferSweptObjects()
  1300. {
  1301. // TODO : Large heap block doesn't do free listing yet
  1302. return pendingDisposeObject != nullptr;
  1303. }
  1304. void
  1305. LargeHeapBlock::DisposeObjects(Recycler * recycler)
  1306. {
  1307. Assert(this->pendingDisposeObject != nullptr || this->hasDisposeBeenCalled);
  1308. while (pendingDisposeObject != nullptr)
  1309. {
  1310. #if DBG
  1311. this->hasDisposeBeenCalled = true;
  1312. #endif
  1313. LargeObjectHeader * header = pendingDisposeObject;
  1314. pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
  1315. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  1316. Assert(this->HeaderList()[header->objectIndex] == nullptr);
  1317. void * objectAddress = header->GetAddress();
  1318. ((FinalizableObject *)objectAddress)->Dispose(false);
  1319. Assert(finalizeCount != 0);
  1320. finalizeCount--;
  1321. bool objectTrimmed = false;
  1322. if (this->bucket == nullptr)
  1323. {
  1324. objectTrimmed = TrimObject(recycler, header, header->objectSize, true /* need suspend */);
  1325. }
  1326. // GCTODO: Consider free listing items after Dispose too
  1327. // GCTODO: Consider compacting heap blocks- if the last n items are free, move the address pointer
  1328. // back to before the nth item so we can bump allocate from this heap block
  1329. if (!objectTrimmed)
  1330. {
  1331. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1332. }
  1333. RECYCLER_STATS_INC(recycler, finalizeSweepCount);
  1334. #ifdef RECYCLER_FINALIZE_CHECK
  1335. this->heapInfo->liveFinalizableObjectCount--;
  1336. this->heapInfo->pendingDisposableObjectCount--;
  1337. #endif
  1338. }
  1339. }
  1340. #if defined(PARTIAL_GC_ENABLED) && defined(CONCURRENT_GC_ENABLED)
  1341. void
  1342. LargeHeapBlock::PartialTransferSweptObjects()
  1343. {
  1344. // Nothing to do
  1345. Assert(this->hasPartialFreeObjects);
  1346. }
  1347. void
  1348. LargeHeapBlock::FinishPartialCollect(Recycler * recycler)
  1349. {
  1350. Assert(this->hasPartialFreeObjects);
  1351. for (uint i = 0; i < allocCount; i++)
  1352. {
  1353. LargeObjectHeader * header = this->HeaderList()[i];
  1354. if (header != nullptr && IsPartialSweptHeader(header))
  1355. {
  1356. header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
  1357. Assert(header->objectIndex == i);
  1358. this->HeaderList()[i] = nullptr;
  1359. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1360. }
  1361. }
  1362. DebugOnly(this->hasPartialFreeObjects = false);
  1363. }
  1364. #endif
  1365. void
  1366. LargeHeapBlock::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  1367. {
  1368. for (uint i = 0; i < allocCount; i++)
  1369. {
  1370. LargeObjectHeader * header = this->GetHeader(i);
  1371. if (header == nullptr)
  1372. {
  1373. continue;
  1374. }
  1375. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & infoBits) != 0)
  1376. {
  1377. CallBackFunction(header->GetAddress(), header->objectSize);
  1378. }
  1379. }
  1380. }
  1381. uint
  1382. LargeHeapBlock::GetMaxLargeObjectCount(size_t pageCount, size_t firstAllocationSize)
  1383. {
  1384. size_t freeSize = (AutoSystemInfo::PageSize * pageCount) - firstAllocationSize - sizeof(LargeObjectHeader);
  1385. Assert(freeSize < AutoSystemInfo::Data.dwAllocationGranularity);
  1386. size_t objectCount = (freeSize / HeapConstants::MaxSmallObjectSize) + 1;
  1387. Assert(objectCount <= UINT_MAX);
  1388. return (uint)objectCount;
  1389. }
  1390. #ifdef RECYCLER_SLOW_CHECK_ENABLED
  1391. void
  1392. LargeHeapBlock::Check(bool expectFull, bool expectPending)
  1393. {
  1394. for (uint i = 0; i < allocCount; i++)
  1395. {
  1396. LargeObjectHeader * header = this->HeaderList()[i];
  1397. if (header == nullptr)
  1398. {
  1399. continue;
  1400. }
  1401. #if defined(PARTIAL_GC_ENABLED) && defined(CONCURRENT_GC_ENABLED)
  1402. header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
  1403. Assert(this->hasPartialFreeObjects || header == this->HeaderList()[i]);
  1404. #endif
  1405. Assert(header->objectIndex == i);
  1406. }
  1407. }
  1408. #endif
  1409. void LargeHeapBlock::FillFreeMemory(Recycler * recycler, __in_bcount(size) void * address, size_t size)
  1410. {
  1411. // For now, we don't do anything in release build because we don't reuse this memory until we return
  1412. // the pages to the allocator which will zero out the whole page
  1413. #ifdef RECYCLER_MEMORY_VERIFY
  1414. if (recycler->VerifyEnabled())
  1415. {
  1416. memset(address, Recycler::VerifyMemFill, size);
  1417. return;
  1418. }
  1419. #endif
  1420. #ifdef RECYCLER_FREE_MEM_FILL
  1421. memset(address, DbgMemFill, size);
  1422. #endif
  1423. }
  1424. size_t LargeHeapBlock::GetObjectSize(void* objectAddress)
  1425. {
  1426. LargeObjectHeader * header = GetHeader(objectAddress);
  1427. Assert((char *)header >= this->address);
  1428. return header->objectSize;
  1429. }
  1430. #ifdef RECYCLER_MEMORY_VERIFY
  1431. void
  1432. LargeHeapBlock::Verify(Recycler * recycler)
  1433. {
  1434. char * lastAddress = this->address;
  1435. uint verifyFinalizeCount = 0;
  1436. for (uint i = 0; i < allocCount; i++)
  1437. {
  1438. LargeObjectHeader * header = this->HeaderList()[i];
  1439. if (header == nullptr)
  1440. {
  1441. // Check if the object if on the free list
  1442. LargeHeapBlockFreeListEntry* current = this->freeList.entries;
  1443. while (current != nullptr)
  1444. {
  1445. // Verify the free listed object
  1446. if (current->headerIndex == i)
  1447. {
  1448. BYTE* objectAddress = (BYTE *)current + sizeof(LargeObjectHeader);
  1449. Recycler::VerifyCheck(current->heapBlock == this, L"Invalid heap block", this, current->heapBlock);
  1450. Recycler::VerifyCheck((char *)current >= lastAddress, L"LargeHeapBlock invalid object header order", this->address, current);
  1451. Recycler::VerifyCheckFill(lastAddress, (char *)current - lastAddress);
  1452. recycler->VerifyCheckPad(objectAddress, current->objectSize);
  1453. lastAddress = (char *) objectAddress + current->objectSize;
  1454. break;
  1455. }
  1456. current = current->next;
  1457. }
  1458. continue;
  1459. }
  1460. Recycler::VerifyCheck((char *)header >= lastAddress, L"LargeHeapBlock invalid object header order", this->address, header);
  1461. Recycler::VerifyCheckFill(lastAddress, (char *)header - lastAddress);
  1462. Recycler::VerifyCheck(header->objectIndex == i, L"LargeHeapBlock object index mismatch", this->address, &header->objectIndex);
  1463. recycler->VerifyCheckPad((BYTE *)header->GetAddress(), header->objectSize);
  1464. verifyFinalizeCount += ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != 0);
  1465. lastAddress = (char *)header->GetAddress() + header->objectSize;
  1466. }
  1467. Recycler::VerifyCheck(verifyFinalizeCount == this->finalizeCount, L"LargeHeapBlock finalize object count mismatch", this->address, &this->finalizeCount);
  1468. }
  1469. #endif
  1470. uint
  1471. LargeHeapBlock::GetMarkCount()
  1472. {
  1473. uint markCount = 0;
  1474. const HeapBlockMap& heapBlockMap = this->heapInfo->recycler->heapBlockMap;
  1475. for (uint i = 0; i < allocCount; i++)
  1476. {
  1477. LargeObjectHeader* header = this->HeaderList()[i];
  1478. if (header && header->objectIndex == i && heapBlockMap.IsMarked(header->GetAddress()))
  1479. {
  1480. markCount++;
  1481. }
  1482. }
  1483. return markCount;
  1484. }
  1485. #ifdef RECYCLER_PERF_COUNTERS
  1486. void
  1487. LargeHeapBlock::UpdatePerfCountersOnFree()
  1488. {
  1489. Assert(GetMarkCount() == 0);
  1490. size_t usedCount = 0;
  1491. size_t usedBytes = 0;
  1492. for (uint i = 0; i < allocCount; i++)
  1493. {
  1494. LargeObjectHeader * header = this->HeaderList()[i];
  1495. if (header == nullptr)
  1496. {
  1497. continue;
  1498. }
  1499. usedCount++;
  1500. usedBytes += header->objectSize;
  1501. }
  1502. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObject, usedCount);
  1503. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, usedBytes);
  1504. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockFreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
  1505. RECYCLER_PERF_COUNTER_SUB(LiveObject, usedCount);
  1506. RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, usedBytes);
  1507. RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
  1508. }
  1509. #endif
  1510. #ifdef PROFILE_RECYCLER_ALLOC
  1511. void *
  1512. LargeHeapBlock::GetTrackerData(void * address)
  1513. {
  1514. Assert(Recycler::DoProfileAllocTracker());
  1515. LargeObjectHeader * header = GetHeader(address);
  1516. Assert((char *)header >= this->address);
  1517. uint index = header->objectIndex;
  1518. Assert(index < this->allocCount);
  1519. Assert(this->HeaderList()[index] == header);
  1520. return this->GetTrackerDataArray()[index];
  1521. }
  1522. void
  1523. LargeHeapBlock::SetTrackerData(void * address, void * data)
  1524. {
  1525. Assert(Recycler::DoProfileAllocTracker());
  1526. LargeObjectHeader * header = GetHeader(address);
  1527. Assert((char *)header >= this->address);
  1528. uint index = header->objectIndex;
  1529. Assert(index < this->allocCount);
  1530. Assert(this->HeaderList()[index] == header);
  1531. this->GetTrackerDataArray()[index] = data;
  1532. }
  1533. void **
  1534. LargeHeapBlock::GetTrackerDataArray()
  1535. {
  1536. // See LargeHeapBlock::GetAllocPlusSize for layout description
  1537. return (void **)((char *)(this + 1) + LargeHeapBlock::GetAllocPlusSize(this->objectCount) - this->objectCount * sizeof(void *));
  1538. }
  1539. #endif