LargeHeapBlock.cpp 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. CompileAssert(
  7. sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity ||
  8. sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity * 2);
  9. #ifdef STACK_BACK_TRACE
  10. const StackBackTrace* LargeHeapBlock::s_StackTraceAllocFailed = (StackBackTrace*)1;
  11. #endif
  12. void *
  13. LargeObjectHeader::GetAddress() { return ((char *)this) + sizeof(LargeObjectHeader); }
  14. #ifdef LARGEHEAPBLOCK_ENCODING
  15. // decodedNext = decoded next field
  16. // decodedAttributes = decoded attributes part of attributesAndChecksum
  17. // Decode 'next' and 'attributes' using _cookie
  18. // If next=B1B2B3B4, checksum = (B1^B2^B3^B4^attributes)
  19. // Encode 'next' and 'attributes' using _cookie
  20. unsigned char
  21. LargeObjectHeader::CalculateCheckSum(LargeObjectHeader* decodedNext, unsigned char decodedAttributes)
  22. {
  23. unsigned char checksum = 0;
  24. byte *nextField = (byte *)&decodedNext;
  25. checksum = nextField[0] ^ nextField[1] ^ nextField[2] ^ nextField[3] ^ decodedAttributes;
  26. return checksum;
  27. }
  28. LargeObjectHeader*
  29. LargeObjectHeader::EncodeNext(uint cookie, LargeObjectHeader* next)
  30. {
  31. return (LargeObjectHeader *)((uintptr_t)next ^ cookie);
  32. }
  33. ushort
  34. LargeObjectHeader::EncodeAttributesAndChecksum(uint cookie, ushort attributesAndChecksum)
  35. {
  36. return attributesAndChecksum ^ (ushort)cookie;
  37. }
  38. LargeObjectHeader*
  39. LargeObjectHeader::DecodeNext(uint cookie, LargeObjectHeader* next) { return EncodeNext(cookie, next); }
  40. ushort
  41. LargeObjectHeader::DecodeAttributesAndChecksum(uint cookie) { return EncodeAttributesAndChecksum(cookie, this->attributesAndChecksum); }
  42. #else
  43. // If heap block encoding is disabled then have an API to expose
  44. // pointer to attributes so that can be passed to RecyclerHeapObjectInfo()
  45. // which updates the attributes field.
  46. unsigned char *
  47. LargeObjectHeader::GetAttributesPtr()
  48. {
  49. return &this->attributes;
  50. }
  51. #endif
  52. void
  53. LargeObjectHeader::SetNext(uint cookie, LargeObjectHeader* next)
  54. {
  55. #ifdef LARGEHEAPBLOCK_ENCODING
  56. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  57. // Calculate the checksum value with new next
  58. unsigned char newCheckSumValue = this->CalculateCheckSum(next, (unsigned char)(decodedAttributesAndChecksum >> 8));
  59. // pack the (attribute + checksum)
  60. ushort newAttributeWithCheckSum = (decodedAttributesAndChecksum & 0xFF00) | newCheckSumValue;
  61. // encode the packed (attribute + checksum), next and set them
  62. this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
  63. this->next = this->EncodeNext(cookie, next);
  64. #else
  65. this->next = next;
  66. #endif
  67. }
  68. LargeObjectHeader *
  69. LargeObjectHeader::GetNext(uint cookie)
  70. {
  71. #ifdef LARGEHEAPBLOCK_ENCODING
  72. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  73. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  74. unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
  75. unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
  76. if (checkSum != calculatedCheckSumField)
  77. {
  78. LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
  79. }
  80. // If checksum matches return the up-to-date next (in case other thread changed it from last time
  81. // we read it in this method.
  82. return this->DecodeNext(cookie, this->next);
  83. #else
  84. return this->next;
  85. #endif
  86. }
  87. void
  88. LargeObjectHeader::SetAttributes(uint cookie, unsigned char attributes)
  89. {
  90. #ifdef LARGEHEAPBLOCK_ENCODING
  91. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  92. // Calculate the checksum value with new attribute
  93. unsigned char newCheckSumValue = this->CalculateCheckSum(decodedNext, attributes);
  94. // pack the (attribute + checksum)
  95. ushort newAttributeWithCheckSum = ((ushort)attributes << 8) | newCheckSumValue;
  96. // encode the packed (attribute + checksum) and set it
  97. this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
  98. #else
  99. this->attributes = attributes;
  100. #endif
  101. }
  102. unsigned char
  103. LargeObjectHeader::GetAttributes(uint cookie)
  104. {
  105. #ifdef LARGEHEAPBLOCK_ENCODING
  106. LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
  107. ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
  108. unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
  109. unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
  110. if (checkSum != calculatedCheckSumField)
  111. {
  112. LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
  113. }
  114. // If checksum matches return the up-to-date attributes (in case other thread changed it from last time
  115. // we read it in this method.
  116. return this->DecodeAttributesAndChecksum(cookie) >> 8;
  117. #else
  118. return this->attributes;
  119. #endif
  120. }
  121. size_t
  122. LargeHeapBlock::GetAllocPlusSize(uint objectCount)
  123. {
  124. // Large Heap Block Layout:
  125. // LargeHeapBlock
  126. // LargeObjectHeader * [objectCount]
  127. // TrackerData * [objectCount] (Optional)
  128. size_t allocPlusSize = objectCount * (sizeof(LargeObjectHeader *));
  129. #ifdef PROFILE_RECYCLER_ALLOC
  130. if (Recycler::DoProfileAllocTracker())
  131. {
  132. allocPlusSize += objectCount * sizeof(void *);
  133. }
  134. #endif
  135. return allocPlusSize;
  136. }
  137. LargeHeapBlock *
  138. LargeHeapBlock::New(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
  139. {
  140. return NoMemProtectHeapNewNoThrowPlusZ(GetAllocPlusSize(objectCount), LargeHeapBlock, address, pageCount, segment, objectCount, bucket);
  141. }
  142. void
  143. LargeHeapBlock::Delete(LargeHeapBlock * heapBlock)
  144. {
  145. NoMemProtectHeapDeletePlus(GetAllocPlusSize(heapBlock->objectCount), heapBlock);
  146. }
  147. LargeHeapBlock::LargeHeapBlock(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
  148. : HeapBlock(LargeBlockType), pageCount(pageCount), allocAddressEnd(address), objectCount(objectCount), bucket(bucket), freeList(this)
  149. #if defined(RECYCLER_PAGE_HEAP) && defined(STACK_BACK_TRACE)
  150. , pageHeapAllocStack(nullptr), pageHeapFreeStack(nullptr)
  151. #endif
  152. #if DBG
  153. ,wbVerifyBits(&HeapAllocator::Instance)
  154. #endif
  155. {
  156. Assert(address != nullptr);
  157. Assert(pageCount != 0);
  158. Assert(objectCount != 0);
  159. Assert(lastCollectAllocCount == 0);
  160. Assert(finalizeCount == 0);
  161. Assert(next == nullptr);
  162. Assert(!hasPartialFreeObjects);
  163. this->address = address;
  164. this->segment = segment;
  165. #if ENABLE_CONCURRENT_GC
  166. this->isPendingConcurrentSweep = false;
  167. #endif
  168. this->addressEnd = this->address + this->pageCount * AutoSystemInfo::PageSize;
  169. RECYCLER_PERF_COUNTER_INC(LargeHeapBlockCount);
  170. RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
  171. }
  172. LargeHeapBlock::~LargeHeapBlock()
  173. {
  174. AssertMsg(this->segment == nullptr || this->heapInfo->recycler->recyclerLargeBlockPageAllocator.IsClosed(),
  175. "ReleasePages needs to be called before delete");
  176. RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockCount);
  177. #if defined(RECYCLER_PAGE_HEAP) && defined(STACK_BACK_TRACE)
  178. if (this->pageHeapAllocStack != nullptr)
  179. {
  180. if (this->pageHeapAllocStack != s_StackTraceAllocFailed)
  181. {
  182. this->pageHeapAllocStack->Delete(&NoThrowHeapAllocator::Instance);
  183. }
  184. this->pageHeapAllocStack = nullptr;
  185. }
  186. // REVIEW: This means that the old free stack is lost when we get free the heap block
  187. // Is this okay? Should we delay freeing heap blocks till process/thread shutdown time?
  188. if (this->pageHeapFreeStack != nullptr)
  189. {
  190. this->pageHeapFreeStack->Delete(&NoThrowHeapAllocator::Instance);
  191. this->pageHeapFreeStack = nullptr;
  192. }
  193. #endif
  194. }
  195. Recycler *
  196. LargeHeapBlock::GetRecycler() const
  197. {
  198. return this->bucket->heapInfo->recycler;
  199. }
  200. LargeObjectHeader **
  201. LargeHeapBlock::HeaderList()
  202. {
  203. // See LargeHeapBlock::GetAllocPlusSize for layout description
  204. return (LargeObjectHeader **)(((byte *)this) + sizeof(LargeHeapBlock));
  205. }
  206. void
  207. LargeHeapBlock::FinalizeAllObjects()
  208. {
  209. if (this->finalizeCount != 0)
  210. {
  211. DebugOnly(uint processedCount = 0);
  212. for (uint i = 0; i < allocCount; i++)
  213. {
  214. LargeObjectHeader * header = this->GetHeader(i);
  215. if (header == nullptr || ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == 0))
  216. {
  217. continue;
  218. }
  219. FinalizableObject * finalizableObject = ((FinalizableObject *)header->GetAddress());
  220. finalizableObject->Finalize(true);
  221. finalizableObject->Dispose(true);
  222. #ifdef RECYCLER_FINALIZE_CHECK
  223. this->heapInfo->liveFinalizableObjectCount--;
  224. #endif
  225. DebugOnly(processedCount++);
  226. }
  227. while (pendingDisposeObject != nullptr)
  228. {
  229. LargeObjectHeader * header = pendingDisposeObject;
  230. pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
  231. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  232. Assert(this->HeaderList()[header->objectIndex] == nullptr);
  233. void * objectAddress = header->GetAddress();
  234. ((FinalizableObject *)objectAddress)->Dispose(true);
  235. #ifdef RECYCLER_FINALIZE_CHECK
  236. this->heapInfo->liveFinalizableObjectCount--;
  237. this->heapInfo->pendingDisposableObjectCount--;
  238. #endif
  239. DebugOnly(processedCount++);
  240. }
  241. Assert(this->finalizeCount == processedCount);
  242. }
  243. }
  244. void
  245. LargeHeapBlock::ReleasePagesShutdown(Recycler * recycler)
  246. {
  247. #if DBG
  248. recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
  249. // Don't release the page in shut down, the page allocator will release them faster
  250. Assert(recycler->recyclerLargeBlockPageAllocator.IsClosed());
  251. #endif
  252. }
  253. void
  254. LargeHeapBlock::ReleasePagesSweep(Recycler * recycler)
  255. {
  256. recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
  257. ReleasePages(recycler);
  258. }
  259. #ifdef RECYCLER_PAGE_HEAP
  260. _NOINLINE
  261. void LargeHeapBlock::VerifyPageHeapPattern()
  262. {
  263. Assert(InPageHeapMode());
  264. Assert(this->allocCount > 0);
  265. byte* objectEndAddress = (byte*)this->allocAddressEnd;
  266. byte* addrEnd = (byte*)this->addressEnd;
  267. for (int i = 0; objectEndAddress + i < (byte*)addrEnd; i++)
  268. {
  269. byte current = objectEndAddress[i];
  270. if (current != 0xF0u)
  271. {
  272. Assert(false);
  273. ReportFatalException(NULL, E_FAIL, Fatal_Recycler_MemoryCorruption, 2);
  274. }
  275. }
  276. }
  277. #endif
  278. void
  279. LargeHeapBlock::ReleasePages(Recycler * recycler)
  280. {
  281. Assert(segment != nullptr);
  282. char* blockStartAddress = this->address;
  283. size_t realPageCount = this->pageCount;
  284. #ifdef RECYCLER_PAGE_HEAP
  285. if (InPageHeapMode())
  286. {
  287. Assert(((LargeObjectHeader*)this->address)->isPageHeapFillVerified || this->allocCount == 0);
  288. if (this->allocCount > 0) // in case OOM while adding heapblock to heapBlockMap, we release page before setting the pattern
  289. {
  290. Assert(this->allocCount == 1); // one object per heapblock in pageheap
  291. VerifyPageHeapPattern();
  292. }
  293. if (guardPageAddress != nullptr)
  294. {
  295. if (this->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
  296. {
  297. blockStartAddress = guardPageAddress;
  298. }
  299. realPageCount = this->actualPageCount;
  300. size_t guardPageCount = this->actualPageCount - this->pageCount;
  301. DWORD oldProtect;
  302. BOOL ret = ::VirtualProtect(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, PAGE_READWRITE, &oldProtect);
  303. Assert(ret && oldProtect == PAGE_NOACCESS);
  304. }
  305. }
  306. #endif
  307. #ifdef RECYCLER_FREE_MEM_FILL
  308. memset(this->address, DbgMemFill, AutoSystemInfo::PageSize * pageCount);
  309. #endif
  310. IdleDecommitPageAllocator* pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
  311. pageAllocator->Release(blockStartAddress, realPageCount, segment);
  312. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
  313. this->segment = nullptr;
  314. }
  315. BOOL
  316. LargeHeapBlock::IsValidObject(void* objectAddress)
  317. {
  318. LargeObjectHeader * header = GetHeader(objectAddress);
  319. return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->HeaderList()[header->objectIndex] == header);
  320. }
  321. #if DBG
  322. BOOL
  323. LargeHeapBlock::IsFreeObject(void * objectAddress)
  324. {
  325. LargeObjectHeader * header = GetHeader(objectAddress);
  326. return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->GetHeader(header->objectIndex) == nullptr);
  327. }
  328. #endif
  329. bool
  330. LargeHeapBlock::TryGetAttributes(void* objectAddress, unsigned char * pAttr)
  331. {
  332. return this->TryGetAttributes(GetHeader(objectAddress), pAttr);
  333. }
  334. bool
  335. LargeHeapBlock::TryGetAttributes(LargeObjectHeader * header, unsigned char * pAttr)
  336. {
  337. if ((char *)header < this->address)
  338. {
  339. return false;
  340. }
  341. uint index = header->objectIndex;
  342. if (index >= this->allocCount)
  343. {
  344. // Not allocated yet.
  345. return false;
  346. }
  347. if (this->HeaderList()[index] != header)
  348. {
  349. // header doesn't match, not a real object
  350. return false;
  351. }
  352. if (this->InPageHeapMode())
  353. {
  354. this->VerifyPageHeapPattern();
  355. }
  356. *pAttr = header->GetAttributes(this->heapInfo->recycler->Cookie);
  357. return true;
  358. }
  359. size_t
  360. LargeHeapBlock::GetPagesNeeded(size_t size, bool multiplyRequest)
  361. {
  362. if (multiplyRequest)
  363. {
  364. size = AllocSizeMath::Mul(size, 4);
  365. }
  366. uint pageSize = AutoSystemInfo::PageSize;
  367. size = AllocSizeMath::Add(size, sizeof(LargeObjectHeader) + (pageSize - 1));
  368. if (size == (size_t)-1)
  369. {
  370. return 0;
  371. }
  372. size_t pageCount = size / pageSize;
  373. return pageCount;
  374. }
  375. char*
  376. LargeHeapBlock::TryAllocFromFreeList(size_t size, ObjectInfoBits attributes)
  377. {
  378. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  379. LargeHeapBlockFreeListEntry** prev = &this->freeList.entries;
  380. LargeHeapBlockFreeListEntry* freeListEntry = this->freeList.entries;
  381. char* memBlock = nullptr;
  382. // Walk through the free list, find the first entry that can fit our desired size
  383. while (freeListEntry)
  384. {
  385. LargeHeapBlockFreeListEntry* next = freeListEntry->next;
  386. LargeHeapBlock* heapBlock = freeListEntry->heapBlock;
  387. if (freeListEntry->objectSize >= size)
  388. {
  389. memBlock = heapBlock->AllocFreeListEntry(size, attributes, freeListEntry);
  390. if (memBlock)
  391. {
  392. (*prev) = next;
  393. break;
  394. }
  395. }
  396. prev = &freeListEntry->next;
  397. freeListEntry = freeListEntry->next;
  398. }
  399. if (this->freeList.entries == nullptr)
  400. {
  401. this->bucket->UnregisterFreeList(&this->freeList);
  402. }
  403. return memBlock;
  404. }
  405. char*
  406. LargeHeapBlock::AllocFreeListEntry(size_t size, ObjectInfoBits attributes, LargeHeapBlockFreeListEntry* entry)
  407. {
  408. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  409. Assert(HeapInfo::IsAlignedSize(size));
  410. AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
  411. Assert(entry->heapBlock == this);
  412. Assert(entry->headerIndex < this->objectCount);
  413. Assert(this->HeaderList()[entry->headerIndex] == nullptr);
  414. uint headerIndex = entry->headerIndex;
  415. size_t originalSize = entry->objectSize;
  416. LargeObjectHeader * header = (LargeObjectHeader *) entry;
  417. char * allocObject = ((char*) entry) + sizeof(LargeObjectHeader); // shouldn't overflow
  418. char * newAllocAddressEnd = allocObject + size;
  419. char * originalAllocEnd = allocObject + originalSize;
  420. if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject || (originalAllocEnd < newAllocAddressEnd))
  421. {
  422. return nullptr;
  423. }
  424. #ifdef RECYCLER_MEMORY_VERIFY
  425. if (this->heapInfo->recycler->VerifyEnabled())
  426. {
  427. this->heapInfo->recycler->VerifyCheckFill(allocObject , originalSize);
  428. }
  429. #endif
  430. memset(entry, 0, sizeof(LargeObjectHeader) + originalSize);
  431. #ifdef RECYCLER_MEMORY_VERIFY
  432. // If we're in recyclerVerify mode, fill the non-header part of the allocation
  433. // with the verification pattern
  434. if (this->heapInfo->recycler->VerifyEnabled())
  435. {
  436. memset(allocObject, Recycler::VerifyMemFill, originalSize);
  437. }
  438. #endif
  439. #if DBG
  440. LargeAllocationVerboseTrace(this->heapInfo->recycler->GetRecyclerFlagsTable(), _u("Allocated object of size 0x%x in from free list entry at address 0x%p\n"), size, allocObject);
  441. #endif
  442. Assert(allocCount <= objectCount);
  443. header->objectIndex = headerIndex;
  444. header->objectSize = originalSize;
  445. #ifdef RECYCLER_WRITE_BARRIER
  446. header->hasWriteBarrier = (attributes & WithBarrierBit) == WithBarrierBit;
  447. #endif
  448. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & StoredObjectInfoBitMask));
  449. header->markOnOOMRescan = false;
  450. header->SetNext(this->heapInfo->recycler->Cookie, nullptr);
  451. HeaderList()[headerIndex] = header;
  452. finalizeCount += ((attributes & FinalizeBit) != 0);
  453. #ifdef RECYCLER_FINALIZE_CHECK
  454. if (attributes & FinalizeBit)
  455. {
  456. HeapInfo * heapInfo = this->heapInfo;
  457. heapInfo->liveFinalizableObjectCount++;
  458. heapInfo->newFinalizableObjectCount++;
  459. }
  460. #endif
  461. return allocObject;
  462. }
  463. char*
  464. LargeHeapBlock::Alloc(size_t size, ObjectInfoBits attributes)
  465. {
  466. Assert(HeapInfo::IsAlignedSize(size) || InPageHeapMode());
  467. Assert((attributes & InternalObjectInfoBitMask) == attributes);
  468. AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
  469. LargeObjectHeader * header = (LargeObjectHeader *)allocAddressEnd;
  470. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  471. Assert(!IsPartialSweptHeader(header));
  472. #endif
  473. char * allocObject = allocAddressEnd + sizeof(LargeObjectHeader); // shouldn't overflow
  474. char * newAllocAddressEnd = allocObject + size;
  475. if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject)
  476. {
  477. return nullptr;
  478. }
  479. Recycler* recycler = this->heapInfo->recycler;
  480. #if DBG
  481. LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated object of size 0x%x in existing heap block at address 0x%p\n"), size, allocObject);
  482. #endif
  483. Assert(allocCount < objectCount);
  484. allocAddressEnd = newAllocAddressEnd;
  485. #ifdef RECYCLER_ZERO_MEM_CHECK
  486. recycler->VerifyZeroFill(header, sizeof(LargeObjectHeader));
  487. #endif
  488. #ifdef RECYCLER_MEMORY_VERIFY
  489. if (recycler->VerifyEnabled())
  490. {
  491. memset(header, 0, sizeof(LargeObjectHeader));
  492. }
  493. #endif
  494. header->objectIndex = allocCount;
  495. header->objectSize = size;
  496. #ifdef RECYCLER_WRITE_BARRIER
  497. header->hasWriteBarrier = (attributes&WithBarrierBit) == WithBarrierBit;
  498. #endif
  499. header->SetAttributes(recycler->Cookie, (attributes & StoredObjectInfoBitMask));
  500. HeaderList()[allocCount++] = header;
  501. finalizeCount += ((attributes & FinalizeBit) != 0);
  502. #ifdef RECYCLER_FINALIZE_CHECK
  503. if (attributes & FinalizeBit)
  504. {
  505. HeapInfo * heapInfo = this->heapInfo;
  506. heapInfo->liveFinalizableObjectCount++;
  507. heapInfo->newFinalizableObjectCount++;
  508. }
  509. #endif
  510. return allocObject;
  511. }
  512. template <bool doSpecialMark>
  513. _NOINLINE
  514. void
  515. LargeHeapBlock::Mark(void* objectAddress, MarkContext * markContext)
  516. {
  517. LargeObjectHeader * header = GetHeader(objectAddress);
  518. unsigned char attributes = ObjectInfoBits::NoBit;
  519. if (!this->TryGetAttributes(header, &attributes))
  520. {
  521. return;
  522. }
  523. DUMP_OBJECT_REFERENCE(markContext->GetRecycler(), objectAddress);
  524. size_t objectSize = header->objectSize;
  525. if (this->InPageHeapMode())
  526. {
  527. // trim off the trailing part which is not a pointer
  528. objectSize = HeapInfo::RoundObjectSize(objectSize);
  529. if (objectSize == 0)
  530. {
  531. // finalizable object must bigger than a pointer size because of the vtable
  532. Assert((attributes & FinalizeBit) == 0);
  533. return;
  534. }
  535. }
  536. if (!UpdateAttributesOfMarkedObjects<doSpecialMark>(markContext, objectAddress, objectSize, attributes,
  537. [&](unsigned char attributes) { header->SetAttributes(this->heapInfo->recycler->Cookie, attributes); }))
  538. {
  539. // Couldn't mark children- bail out and come back later
  540. this->SetNeedOOMRescan(markContext->GetRecycler());
  541. // Single page large heap block rescan all marked object on oom rescan
  542. if (this->GetPageCount() != 1)
  543. {
  544. // Failed to mark the objects referenced by this object, so we'll
  545. // revisit this on rescan
  546. header->markOnOOMRescan = true;
  547. }
  548. }
  549. }
  550. template void LargeHeapBlock::Mark<true>(void* objectAddress, MarkContext * markContext);
  551. template void LargeHeapBlock::Mark<false>(void* objectAddress, MarkContext * markContext);
  552. bool
  553. LargeHeapBlock::TestObjectMarkedBit(void* objectAddress)
  554. {
  555. Assert(IsValidObject(objectAddress));
  556. LargeObjectHeader* pHeader = nullptr;
  557. if (GetObjectHeader(objectAddress, &pHeader))
  558. {
  559. Recycler* recycler = this->heapInfo->recycler;
  560. return recycler->heapBlockMap.IsMarked(objectAddress);
  561. }
  562. return FALSE;
  563. }
  564. void
  565. LargeHeapBlock::SetObjectMarkedBit(void* objectAddress)
  566. {
  567. Assert(IsValidObject(objectAddress));
  568. LargeObjectHeader* pHeader = nullptr;
  569. if (GetObjectHeader(objectAddress, &pHeader))
  570. {
  571. Recycler* recycler = this->heapInfo->recycler;
  572. recycler->heapBlockMap.SetMark(objectAddress);
  573. }
  574. }
  575. bool
  576. LargeHeapBlock::FindImplicitRootObject(void* objectAddress, Recycler * recycler, RecyclerHeapObjectInfo& heapObject)
  577. {
  578. if (!IsValidObject(objectAddress))
  579. {
  580. return false;
  581. }
  582. LargeObjectHeader* pHeader = nullptr;
  583. if (!GetObjectHeader(objectAddress, &pHeader))
  584. {
  585. return false;
  586. }
  587. #ifdef LARGEHEAPBLOCK_ENCODING
  588. heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, nullptr);
  589. heapObject.SetLargeHeapBlockHeader(pHeader);
  590. #else
  591. heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, pHeader->GetAttributesPtr());
  592. #endif
  593. return true;
  594. }
  595. bool
  596. LargeHeapBlock::FindHeapObject(void* objectAddress, Recycler * recycler, FindHeapObjectFlags, RecyclerHeapObjectInfo& heapObject)
  597. {
  598. // Currently the same actual implementation (flags is ignored)
  599. return FindImplicitRootObject(objectAddress, recycler, heapObject);
  600. }
  601. bool
  602. LargeHeapBlock::GetObjectHeader(void* objectAddress, LargeObjectHeader** ppHeader)
  603. {
  604. (*ppHeader) = nullptr;
  605. LargeObjectHeader * header = GetHeader(objectAddress);
  606. if ((char *)header < this->address)
  607. {
  608. return false;
  609. }
  610. uint index = header->objectIndex;
  611. if (this->HeaderList()[index] != header)
  612. {
  613. // header doesn't match, not a real object
  614. return false;
  615. }
  616. Assert(index < this->allocCount);
  617. (*ppHeader) = header;
  618. return true;
  619. }
  620. void
  621. LargeHeapBlock::ResetMarks(ResetMarkFlags flags, Recycler* recycler)
  622. {
  623. Assert(!this->needOOMRescan);
  624. // Update the lastCollectAllocCount for sweep
  625. this->lastCollectAllocCount = this->allocCount;
  626. Assert(this->GetMarkCount() == 0);
  627. #if ENABLE_CONCURRENT_GC
  628. Assert(!this->isPendingConcurrentSweep);
  629. #endif
  630. if (flags & ResetMarkFlags_ScanImplicitRoot)
  631. {
  632. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  633. {
  634. // object is allocated during the concurrent mark or it is marked, do rescan
  635. LargeObjectHeader * header = this->GetHeader(objectIndex);
  636. // check if the object index is not allocated
  637. if (header == nullptr)
  638. {
  639. continue;
  640. }
  641. // check whether the object is a leaf and doesn't need to be scanned
  642. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) != 0)
  643. {
  644. recycler->heapBlockMap.SetMark(header->GetAddress());
  645. }
  646. }
  647. }
  648. }
  649. LargeObjectHeader *
  650. LargeHeapBlock::GetHeader(void * objectAddress)
  651. {
  652. Assert(objectAddress >= this->address && objectAddress < this->addressEnd);
  653. return GetHeaderFromAddress(objectAddress);
  654. }
  655. LargeObjectHeader *
  656. LargeHeapBlock::GetHeaderFromAddress(void * objectAddress)
  657. {
  658. return (LargeObjectHeader*)(((char *)objectAddress) - sizeof(LargeObjectHeader));
  659. }
  660. byte *
  661. LargeHeapBlock::GetRealAddressFromInterior(void * interiorAddress)
  662. {
  663. for (uint i = 0; i < allocCount; i++)
  664. {
  665. LargeObjectHeader * header = this->HeaderList()[i];
  666. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  667. if (header != nullptr && !IsPartialSweptHeader(header))
  668. #else
  669. if (header != nullptr)
  670. #endif
  671. {
  672. Assert(header->objectIndex == i);
  673. byte * startAddress = (byte *)header->GetAddress();
  674. if (startAddress <= interiorAddress && (startAddress + header->objectSize > interiorAddress))
  675. {
  676. return startAddress;
  677. }
  678. }
  679. }
  680. return nullptr;
  681. }
  682. #ifdef RECYCLER_VERIFY_MARK
  683. void
  684. LargeHeapBlock::VerifyMark()
  685. {
  686. Assert(!this->needOOMRescan);
  687. Recycler* recycler = this->heapInfo->recycler;
  688. for (uint i = 0; i < allocCount; i++)
  689. {
  690. LargeObjectHeader * header = this->GetHeader(i);
  691. if (header == nullptr)
  692. {
  693. continue;
  694. }
  695. char * objectAddress = (char *)header->GetAddress();
  696. if (!recycler->heapBlockMap.IsMarked(objectAddress))
  697. {
  698. continue;
  699. }
  700. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  701. Assert((attributes & NewFinalizeBit) == 0);
  702. if ((attributes & LeafBit) != 0)
  703. {
  704. continue;
  705. }
  706. Assert(!header->markOnOOMRescan);
  707. char * objectAddressEnd = objectAddress + header->objectSize;
  708. while (objectAddress + sizeof(void *) <= objectAddressEnd)
  709. {
  710. void* target = *(void **)objectAddress;
  711. if (recycler->VerifyMark(target))
  712. {
  713. Assert(this->wbVerifyBits.Test((BVIndex)(objectAddress - this->address) / sizeof(void*)));
  714. }
  715. objectAddress += sizeof(void *);
  716. }
  717. }
  718. }
  719. bool
  720. LargeHeapBlock::VerifyMark(void * objectAddress)
  721. {
  722. LargeObjectHeader * header = GetHeader(objectAddress);
  723. if ((char *)header < this->address)
  724. {
  725. return false;
  726. }
  727. uint index = header->objectIndex;
  728. if (index >= this->allocCount)
  729. {
  730. // object not allocated
  731. return false;
  732. }
  733. if (this->HeaderList()[index] != header)
  734. {
  735. // header doesn't match, not a real object
  736. return false;
  737. }
  738. bool isMarked = this->heapInfo->recycler->heapBlockMap.IsMarked(objectAddress);
  739. #if DBG
  740. Assert(isMarked);
  741. #else
  742. if (!isMarked)
  743. {
  744. DebugBreak();
  745. }
  746. #endif
  747. return isMarked;
  748. }
  749. #endif
  750. void
  751. LargeHeapBlock::ScanInitialImplicitRoots(Recycler * recycler)
  752. {
  753. Assert(recycler->enableScanImplicitRoots);
  754. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  755. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  756. {
  757. // object is allocated during the concurrent mark or it is marked, do rescan
  758. LargeObjectHeader * header = this->GetHeader(objectIndex);
  759. // check if the object index is not allocated
  760. if (header == nullptr)
  761. {
  762. continue;
  763. }
  764. // check whether the object is a leaf and doesn't need to be scanned
  765. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
  766. {
  767. continue;
  768. }
  769. char * objectAddress = (char *)header->GetAddress();
  770. // it is not marked, don't scan implicit root
  771. if (!heapBlockMap.IsMarked(objectAddress))
  772. {
  773. continue;
  774. }
  775. // TODO: Assume scan interior?
  776. DUMP_IMPLICIT_ROOT(recycler, objectAddress);
  777. if (this->InPageHeapMode())
  778. {
  779. size_t objectSize = header->objectSize;
  780. // trim off the trailing part which is not a pointer
  781. objectSize = HeapInfo::RoundObjectSize(objectSize);
  782. if (objectSize > 0) // otherwize the object total size is less than a pointer size
  783. {
  784. recycler->ScanObjectInlineInterior((void **)objectAddress, objectSize);
  785. }
  786. }
  787. else
  788. {
  789. recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
  790. }
  791. }
  792. }
  793. void
  794. LargeHeapBlock::ScanNewImplicitRoots(Recycler * recycler)
  795. {
  796. Assert(recycler->enableScanImplicitRoots);
  797. uint objectIndex = 0;
  798. HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  799. while (objectIndex < allocCount)
  800. {
  801. // object is allocated during the concurrent mark or it is marked, do rescan
  802. LargeObjectHeader * header = this->GetHeader(objectIndex);
  803. objectIndex++;
  804. // check if the object index is not allocated
  805. if (header == nullptr)
  806. {
  807. continue;
  808. }
  809. // check whether the object is an implicit root
  810. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) == 0)
  811. {
  812. continue;
  813. }
  814. char * objectAddress = (char *)header->GetAddress();
  815. bool marked = heapBlockMap.TestAndSetMark(objectAddress);
  816. if (!marked)
  817. {
  818. DUMP_IMPLICIT_ROOT(recycler, objectAddress);
  819. // check whether the object is a leaf and doesn't need to be scanned
  820. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
  821. {
  822. continue;
  823. }
  824. if (this->InPageHeapMode())
  825. {
  826. size_t objectSize = header->objectSize;
  827. // trim off the trailing part which is not a pointer
  828. objectSize = HeapInfo::RoundObjectSize(objectSize);
  829. if (objectSize > 0) // otherwize the object total size is less than a pointer size
  830. {
  831. recycler->ScanObjectInlineInterior((void **)objectAddress, objectSize);
  832. }
  833. }
  834. else
  835. {
  836. // TODO: Assume scan interior
  837. recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
  838. }
  839. }
  840. }
  841. }
  842. #if ENABLE_CONCURRENT_GC
  843. bool LargeHeapBlock::IsPageDirty(char* page, RescanFlags flags, bool isWriteBarrier)
  844. {
  845. #ifdef RECYCLER_WRITE_BARRIER
  846. // TODO: SWB, use special page allocator for large block with write barrier?
  847. if (CONFIG_FLAG(WriteBarrierTest))
  848. {
  849. Assert(isWriteBarrier);
  850. }
  851. if (isWriteBarrier)
  852. {
  853. return (RecyclerWriteBarrierManager::GetWriteBarrier(page) & DIRTYBIT) == DIRTYBIT;
  854. }
  855. #endif
  856. #ifdef RECYCLER_WRITE_WATCH
  857. if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
  858. {
  859. ULONG_PTR count = 1;
  860. DWORD pageSize = AutoSystemInfo::PageSize;
  861. DWORD const writeWatchFlags = (flags & RescanFlags_ResetWriteWatch ? WRITE_WATCH_FLAG_RESET : 0);
  862. void * written = nullptr;
  863. UINT ret = GetWriteWatch(writeWatchFlags, page, AutoSystemInfo::PageSize, &written, &count, &pageSize);
  864. bool isDirty = (ret != 0) || (count == 1);
  865. return isDirty;
  866. }
  867. else
  868. {
  869. Js::Throw::FatalInternalError();
  870. }
  871. #else
  872. Js::Throw::FatalInternalError();
  873. #endif
  874. }
  875. #endif
  876. #if ENABLE_CONCURRENT_GC
  877. bool
  878. LargeHeapBlock::RescanOnePage(Recycler * recycler, RescanFlags flags)
  879. #else
  880. bool
  881. LargeHeapBlock::RescanOnePage(Recycler * recycler)
  882. #endif
  883. {
  884. Assert(this->GetPageCount() == 1);
  885. bool const oldNeedOOMRescan = this->needOOMRescan;
  886. // Reset this, we'll increment this if we OOM again
  887. this->needOOMRescan = false;
  888. #if ENABLE_CONCURRENT_GC
  889. // don't need to get the write watch bit if we already need to oom rescan
  890. if (!oldNeedOOMRescan)
  891. {
  892. if (recycler->inEndMarkOnLowMemory)
  893. {
  894. // we only do oom rescan if we are on low memory mark
  895. return false;
  896. }
  897. // Check the write watch bit to see if we need to rescan
  898. // REVIEW: large object size if bigger than one page, to use header index 0 here should be OK
  899. bool hasWriteBarrier = false;
  900. #ifdef RECYCLER_WRITE_BARRIER
  901. hasWriteBarrier = this->GetHeader(0u)->hasWriteBarrier;
  902. #endif
  903. if (!IsPageDirty(this->GetBeginAddress(), flags, hasWriteBarrier))
  904. {
  905. return false;
  906. }
  907. }
  908. #else
  909. // Shouldn't be rescanning in cases other than OOM if GetWriteWatch
  910. Assert(oldNeedOOMRescan);
  911. #endif
  912. RECYCLER_STATS_INC(recycler, markData.rescanLargePageCount);
  913. for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
  914. {
  915. // object is allocated during the concurrent mark or it is marked, do rescan
  916. LargeObjectHeader * header = this->GetHeader(objectIndex);
  917. // check if the object index is not allocated
  918. if (header == nullptr)
  919. {
  920. continue;
  921. }
  922. char * objectAddress = (char *)header->GetAddress();
  923. // it is not marked, don't rescan
  924. if (!recycler->heapBlockMap.IsMarked(objectAddress))
  925. {
  926. continue;
  927. }
  928. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  929. #ifdef RECYCLER_STATS
  930. if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
  931. {
  932. // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
  933. // As such, our finalizeCount is not correct. Update it now.
  934. RECYCLER_STATS_INC(recycler, finalizeCount);
  935. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
  936. }
  937. #endif
  938. // check whether the object is a leaf and doesn't need to be scanned
  939. if ((attributes & LeafBit) != 0)
  940. {
  941. continue;
  942. }
  943. RECYCLER_STATS_INC(recycler, markData.rescanLargeObjectCount);
  944. RECYCLER_STATS_ADD(recycler, markData.rescanLargeByteCount, header->objectSize);
  945. size_t objectSize = header->objectSize;
  946. if (this->InPageHeapMode())
  947. {
  948. // trim off the trailing part which is not a pointer
  949. objectSize = HeapInfo::RoundObjectSize(objectSize);
  950. }
  951. if (objectSize > 0) // otherwize the object total size is less than a pointer size
  952. {
  953. if (!recycler->AddMark(objectAddress, objectSize))
  954. {
  955. this->SetNeedOOMRescan(recycler);
  956. }
  957. }
  958. }
  959. return true;
  960. }
  961. size_t
  962. LargeHeapBlock::Rescan(Recycler * recycler, bool isPartialSwept, RescanFlags flags)
  963. {
  964. // Update the lastCollectAllocCount for sweep
  965. this->lastCollectAllocCount = this->allocCount;
  966. #if ENABLE_CONCURRENT_GC
  967. Assert(recycler->collectionState != CollectionStateConcurrentFinishMark || (flags & RescanFlags_ResetWriteWatch));
  968. if (this->GetPageCount() == 1)
  969. {
  970. return RescanOnePage(recycler, flags);
  971. }
  972. // Need to rescan for finish mark even if it is done on the background thread
  973. if (recycler->collectionState != CollectionStateConcurrentFinishMark && recycler->IsConcurrentMarkState())
  974. {
  975. // CONCURRENT-TODO: Don't do background rescan for pages with multiple pages because
  976. // we don't track which page we have queued up
  977. return 0;
  978. }
  979. return RescanMultiPage(recycler, flags);
  980. #else
  981. return this->GetPageCount() == 1 ? RescanOnePage(recycler) : RescanMultiPage(recycler);
  982. #endif
  983. }
  984. #if ENABLE_CONCURRENT_GC
  985. size_t
  986. LargeHeapBlock::RescanMultiPage(Recycler * recycler, RescanFlags flags)
  987. #else
  988. size_t
  989. LargeHeapBlock::RescanMultiPage(Recycler * recycler)
  990. #endif
  991. {
  992. Assert(this->GetPageCount() != 1);
  993. DebugOnly(bool oldNeedOOMRescan = this->needOOMRescan);
  994. // Reset this, we'll increment this if we OOM again
  995. this->needOOMRescan = false;
  996. size_t rescanCount = 0;
  997. uint objectIndex = 0;
  998. #if ENABLE_CONCURRENT_GC
  999. char * lastPageCheckedForWriteWatch = nullptr;
  1000. bool isLastPageCheckedForWriteWatchDirty = false;
  1001. #endif
  1002. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  1003. while (objectIndex < allocCount)
  1004. {
  1005. // object is allocated during the concurrent mark or it is marked, do rescan
  1006. LargeObjectHeader * header = this->GetHeader(objectIndex);
  1007. objectIndex++;
  1008. // check if the object index is not allocated
  1009. if (header == nullptr)
  1010. {
  1011. continue;
  1012. }
  1013. char * objectAddress = (char *)header->GetAddress();
  1014. // it is not marked, don't rescan
  1015. if (!heapBlockMap.IsMarked(objectAddress))
  1016. {
  1017. continue;
  1018. }
  1019. unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
  1020. #ifdef RECYCLER_STATS
  1021. if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
  1022. {
  1023. // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
  1024. // As such, our finalizeCount is not correct. Update it now.
  1025. RECYCLER_STATS_INC(recycler, finalizeCount);
  1026. header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
  1027. }
  1028. #endif
  1029. // check whether the object is a leaf and doesn't need to be scanned
  1030. if ((attributes & LeafBit) != 0)
  1031. {
  1032. continue;
  1033. }
  1034. #ifdef RECYCLER_STATS
  1035. bool objectScanned = false;
  1036. #endif
  1037. size_t objectSize = header->objectSize;
  1038. if (this->InPageHeapMode())
  1039. {
  1040. // trim off the trailing part which is not a pointer
  1041. objectSize = HeapInfo::RoundObjectSize(objectSize);
  1042. }
  1043. Assert(objectSize > 0);
  1044. Assert(oldNeedOOMRescan || !header->markOnOOMRescan);
  1045. // Avoid writing to the page unnecessary by checking first
  1046. if (header->markOnOOMRescan)
  1047. {
  1048. if (!recycler->AddMark(objectAddress, objectSize))
  1049. {
  1050. this->SetNeedOOMRescan(recycler);
  1051. header->markOnOOMRescan = true;
  1052. // We need to bail out of rescan early only if the recycler is
  1053. // trying to finish marking because of low memory. If this is
  1054. // a regular rescan, we want to try and rescan all the objects
  1055. // on the page. It's possible that the rescan OOMs but if the
  1056. // object rescan does OOM, we'll set the right bit on the
  1057. // object header. When we later rescan it in a low memory
  1058. // situation, when the bit is set, we don't need to check for
  1059. // write-watch etc. since we'd have already done that before
  1060. // setting the bit in the non-low-memory rescan case.
  1061. if (!recycler->inEndMarkOnLowMemory)
  1062. {
  1063. continue;
  1064. }
  1065. return rescanCount;
  1066. }
  1067. header->markOnOOMRescan = false;
  1068. #ifdef RECYCLER_STATS
  1069. objectScanned = true;
  1070. #endif
  1071. }
  1072. #if ENABLE_CONCURRENT_GC
  1073. else if (!recycler->inEndMarkOnLowMemory)
  1074. {
  1075. char * objectAddressEnd = objectAddress + objectSize;
  1076. // Walk through the object, checking if any of its pages have been written to
  1077. // If it has, then queue up this object for marking
  1078. do
  1079. {
  1080. char * pageStart = (char *)(((size_t)objectAddress) & ~(size_t)(AutoSystemInfo::PageSize - 1));
  1081. /*
  1082. * The rescan logic for large object is as follows:
  1083. * - We rescan the object if it was marked during concurrent mark
  1084. * - If it was marked, since the large object has multiple pages, we'll rescan only the parts that were changed
  1085. * - So for each page in the large object, check if it's been written to, and if it hasn't, skip looking at that region
  1086. * - If we can't get the write watch, rescan that region
  1087. * - However, this logic applies only if we're not rescanning because of an OOM
  1088. * - If we are rescanning this object because of OOM (i.e !rescanBecauseOfOOM = false), rescan the whole object
  1089. *
  1090. * We cache the result of the write watch and the page that it was checked on so that we don't call GetWriteWatch on the same
  1091. * page twice and inadvertently reset the write watch on a page where we've already scanned an object
  1092. */
  1093. if (lastPageCheckedForWriteWatch != pageStart)
  1094. {
  1095. lastPageCheckedForWriteWatch = pageStart;
  1096. isLastPageCheckedForWriteWatchDirty = true;
  1097. bool hasWriteBarrier = false;
  1098. #ifdef RECYCLER_WRITE_BARRIER
  1099. hasWriteBarrier = header->hasWriteBarrier;
  1100. #endif
  1101. if (!IsPageDirty(pageStart, flags, hasWriteBarrier))
  1102. {
  1103. // Fall through to the case below where we'll update objectAddress and continue
  1104. isLastPageCheckedForWriteWatchDirty = false;
  1105. }
  1106. }
  1107. if (!isLastPageCheckedForWriteWatchDirty)
  1108. {
  1109. objectAddress = pageStart + AutoSystemInfo::PageSize;
  1110. continue;
  1111. }
  1112. // We're interested in only rescanning the parts of the object that have changed, not the whole
  1113. // object. So just queue that up for marking
  1114. char * checkEnd = min(pageStart + AutoSystemInfo::PageSize, objectAddressEnd);
  1115. if (!recycler->AddMark(objectAddress, (checkEnd - objectAddress)))
  1116. {
  1117. this->SetNeedOOMRescan(recycler);
  1118. header->markOnOOMRescan = true;
  1119. }
  1120. #ifdef RECYCLER_STATS
  1121. objectScanned = true;
  1122. recycler->collectionStats.markData.rescanLargePageCount++;
  1123. recycler->collectionStats.markData.rescanLargeByteCount += (checkEnd - objectAddress);
  1124. #endif
  1125. objectAddress = checkEnd;
  1126. rescanCount++;
  1127. }
  1128. while (objectAddress < objectAddressEnd);
  1129. }
  1130. #else
  1131. else
  1132. {
  1133. Assert(recycler->inEndMarkOnLowMemory);
  1134. }
  1135. #endif
  1136. RECYCLER_STATS_ADD(recycler, markData.rescanLargeObjectCount, objectScanned);
  1137. }
  1138. return rescanCount;
  1139. }
  1140. /*
  1141. * Sweep the large heap block
  1142. *
  1143. * If there are no finalizable or weak referenced objects, and if nothing is marked
  1144. * that means that everything in this heap block is considered free. So the heap block
  1145. * can be released.
  1146. * In that case, return SweepStateEmpty
  1147. * If there are objects to be freed, first see if they are any finalizable objects. If there
  1148. * aren't any in this heap block, then this heap block can be swept concurrently. So return SweepStatePendingSweep
  1149. * If there are finalizable objects, sweep them in thread. They would have been added to the pendingDispose list
  1150. * during the finalize phase, so we return SweepStatePendingDispose.
  1151. * In any case, if the pendingDispose list is not empty, we return SweepStatePendingDispose.
  1152. * If the allocCount equals the max object count, or if there's no more space to allocate a large object,
  1153. * we return SweepStateFull, so that the HeapInfo can move this to the full block list. Otherwise,
  1154. * we return SweepStateSwept.
  1155. */
  1156. SweepState
  1157. LargeHeapBlock::Sweep(RecyclerSweep& recyclerSweep, bool queuePendingSweep)
  1158. {
  1159. Recycler * recycler = recyclerSweep.GetRecycler();
  1160. uint markCount = GetMarkCount();
  1161. #if DBG
  1162. Assert(this->lastCollectAllocCount == this->allocCount);
  1163. Assert(markCount <= allocCount);
  1164. #endif
  1165. RECYCLER_STATS_INC(recycler, heapBlockCount[HeapBlock::LargeBlockType]);
  1166. #if DBG
  1167. this->expectedSweepCount = allocCount - markCount;
  1168. #endif
  1169. #if ENABLE_CONCURRENT_GC
  1170. Assert(!this->isPendingConcurrentSweep);
  1171. #endif
  1172. bool isAllFreed = (finalizeCount == 0 && markCount == 0);
  1173. if (isAllFreed)
  1174. {
  1175. recycler->NotifyFree(this);
  1176. Assert(this->pendingDisposeObject == nullptr);
  1177. return SweepStateEmpty;
  1178. }
  1179. RECYCLER_STATS_ADD(recycler, largeHeapBlockTotalByteCount, this->pageCount * AutoSystemInfo::PageSize);
  1180. RECYCLER_STATS_ADD(recycler, heapBlockFreeByteCount[HeapBlock::LargeBlockType],
  1181. addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize? 0 : (size_t)(addressEnd - allocAddressEnd));
  1182. // If the number of objects marked is not equal to the number of objects
  1183. // that have been allocated by this large heap block, that means that there
  1184. // could be some objects that need to be swept
  1185. if (markCount != allocCount)
  1186. {
  1187. Assert(this->expectedSweepCount != 0);
  1188. // We need to sweep in thread if there are any finalizable objects so
  1189. // that the PrepareFinalize() can be called before concurrent sweep
  1190. // and other finalizers. This gives the object an opportunity before any
  1191. // other script can be ran to clean up its references/states that are not
  1192. // valid since we've determined that the object is not live any more.
  1193. //
  1194. // An example is the ITrackable's tracking alias. The reference to the alias
  1195. // object needs to be clear so that the reference will not be given out again
  1196. // in other script during concurrent sweep or finalizer called before.
  1197. Assert(!recyclerSweep.IsBackground());
  1198. #if ENABLE_CONCURRENT_GC
  1199. if (queuePendingSweep && finalizeCount == 0)
  1200. {
  1201. this->isPendingConcurrentSweep = true;
  1202. return SweepStatePendingSweep;
  1203. }
  1204. #else
  1205. Assert(!queuePendingSweep);
  1206. #endif
  1207. SweepObjects<SweepMode_InThread>(recycler);
  1208. if (TransferSweptObjects())
  1209. {
  1210. return SweepStatePendingDispose;
  1211. }
  1212. }
  1213. #ifdef RECYCLER_STATS
  1214. else
  1215. {
  1216. Assert(expectedSweepCount == 0);
  1217. isForceSweeping = true;
  1218. SweepObjects<SweepMode_InThread>(recycler);
  1219. isForceSweeping = false;
  1220. }
  1221. #endif
  1222. if (this->pendingDisposeObject != nullptr)
  1223. {
  1224. return SweepStatePendingDispose;
  1225. }
  1226. return (allocCount == objectCount || addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize) && this->freeList.entries == nullptr ?
  1227. SweepStateFull : SweepStateSwept;
  1228. }
  1229. bool
  1230. LargeHeapBlock::TrimObject(Recycler* recycler, LargeObjectHeader* header, size_t sizeOfObject, bool inDispose)
  1231. {
  1232. IdleDecommitPageAllocator* pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
  1233. uint pageSize = AutoSystemInfo::PageSize ;
  1234. // If we have to trim an object, either we need to have more than one object in the
  1235. // heap block or we're being called as a part of force-sweep or dispose
  1236. Assert(this->allocCount > 1 || this->isForceSweeping || inDispose);
  1237. // If we have more than 1 page of bytes to free
  1238. // make sure that the number of bytes doesn't exceed the cap for a PageSegment
  1239. // since this optimization can only be applied to heap blocks using page segments.
  1240. // We also skip this optimization if the allocCount is 1 since that means
  1241. // the heap block is empty and we've been called only because we're force sweeping.
  1242. // So, skip the opt since we're going to be marking the heap block as empty soon
  1243. if (sizeOfObject > pageSize &&
  1244. this->segment->GetPageCount() <= pageAllocator->GetMaxAllocPageCount() &&
  1245. this->allocCount > 1)
  1246. {
  1247. Assert(!this->hadTrimmed);
  1248. // We want to decommit the free pages beyond 4K (the page size)
  1249. // The way large allocations work is that at most we can have 4 objects in a large heap block
  1250. // The first object can span multiple pages, the remaining 3 objects must all fit within a page
  1251. // So if the object being freed is greater than 1 page, then it must be the first object
  1252. // The objectIndex must be 0 and the header must be same as this->address
  1253. // The end address is (baseAddress + objectSize) & ~(4k - 1)
  1254. // The number of pages to free is (freePageEnd - freePageStart) / pageSize
  1255. char* objectAddress = (char*) header;
  1256. char* objectEndAddress = objectAddress + sizeof(LargeObjectHeader) + header->objectSize;
  1257. uintptr_t alignmentMask = ~((uintptr_t) (AutoSystemInfo::PageSize - 1));
  1258. uintptr_t objectFreeAddress = (uintptr_t) objectAddress;
  1259. uintptr_t objectFreeEndAddress = ((uintptr_t) objectEndAddress) & alignmentMask;
  1260. size_t bytesToFree = (objectFreeEndAddress - objectFreeAddress);
  1261. // Verify assumptions
  1262. // Make sure that the object being freed is the first object since
  1263. // the expectation in a large heap block is that the first object is the largest
  1264. // object.
  1265. // The amount of bytes to free is always less than the size of the object being freed including its header
  1266. // The exception is if the original object's size + header size is a multiple of the page size
  1267. Assert(objectAddress == this->address);
  1268. Assert(header->objectIndex == 0);
  1269. Assert(objectFreeEndAddress <= (uintptr_t) objectEndAddress);
  1270. Assert(objectFreeAddress <= objectFreeEndAddress);
  1271. Assert(bytesToFree < sizeOfObject + sizeof(LargeObjectHeader) || (uintptr_t) objectEndAddress == objectFreeEndAddress);
  1272. // If we actually have something to free, release those pages
  1273. // Move the heap block to start from the new start address
  1274. // Change the heap block map to contain an entry for only the pages that haven't been freed
  1275. // Fill up the old object's unreleased memory if we have to
  1276. Assert(bytesToFree > 0);
  1277. Assert((bytesToFree & (AutoSystemInfo::PageSize - 1)) == 0);
  1278. size_t freePageCount = bytesToFree / AutoSystemInfo::PageSize;
  1279. Assert(freePageCount > 0);
  1280. Assert(freePageCount < this->pageCount);
  1281. // If this call to trim needs idle decommit to be suspended (e.g. dispose case)
  1282. // check if IdleDecommit has been suspended already. If it hasn't, suspend it
  1283. // This is to prevent reentrant idle decommits (e.g. sometimes dispose is called with
  1284. if (inDispose)
  1285. {
  1286. pageAllocator->SuspendIdleDecommit();
  1287. }
  1288. pageAllocator->Release((char*) objectFreeAddress, freePageCount, this->GetSegment());
  1289. if (inDispose)
  1290. {
  1291. pageAllocator->ResumeIdleDecommit();
  1292. }
  1293. // Remove the freed pages from the heap block map
  1294. // and move the heap block to start from after the pages that were freed
  1295. // and update the page count
  1296. recycler->heapBlockMap.ClearHeapBlock(this->address, freePageCount);
  1297. this->address = (char*) objectFreeEndAddress;
  1298. this->pageCount -= freePageCount;
  1299. FillFreeMemory(recycler, (void*) objectFreeEndAddress, (size_t) (objectEndAddress - objectFreeEndAddress));
  1300. #if DBG
  1301. this->hadTrimmed = true;
  1302. #endif
  1303. return true;
  1304. }
  1305. return false;
  1306. }
  1307. template <>
  1308. void
  1309. LargeHeapBlock::SweepObject<SweepMode_InThread>(Recycler * recycler, LargeObjectHeader * header)
  1310. {
  1311. Assert(this->HeaderList()[header->objectIndex] == header);
  1312. // Set the header and object to null only if this is not a finalizable object
  1313. // If it's finalizable, it'll be zeroed out during dispose
  1314. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != FinalizeBit)
  1315. {
  1316. this->HeaderList()[header->objectIndex] = nullptr;
  1317. size_t sizeOfObject = header->objectSize;
  1318. bool objectTrimmed = false;
  1319. if (this->bucket == nullptr)
  1320. {
  1321. objectTrimmed = TrimObject(recycler, header, sizeOfObject);
  1322. }
  1323. if (!objectTrimmed)
  1324. {
  1325. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + sizeOfObject);
  1326. }
  1327. }
  1328. }
  1329. //
  1330. // Call the finalizer on the heapblock object and add it to the pending dispose list
  1331. //
  1332. void
  1333. LargeHeapBlock::FinalizeObject(Recycler* recycler, LargeObjectHeader* header)
  1334. {
  1335. // The header count can also be null if this object has already been finalized
  1336. // but this method should never be called if the header list header is null
  1337. Assert(this->HeaderList()[header->objectIndex] == header);
  1338. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  1339. // Call finalize to do clean up that needs to be done immediately
  1340. // (e.g. Clear the ITrackable alias reference, so it can't be revived during
  1341. // other finalizers or concurrent sweep)
  1342. // Call it only if it hasn't already been finalized
  1343. ((FinalizableObject *)header->GetAddress())->Finalize(false);
  1344. header->SetNext(this->heapInfo->recycler->Cookie, this->pendingDisposeObject);
  1345. this->pendingDisposeObject = header;
  1346. // Null out the header in the header list- this means that this object has already
  1347. // been finalized and is just pending dispose
  1348. this->HeaderList()[header->objectIndex] = nullptr;
  1349. #ifdef RECYCLER_FINALIZE_CHECK
  1350. recycler->autoHeap.pendingDisposableObjectCount++;
  1351. #endif
  1352. }
  1353. // Explicitly instantiate all the sweep modes
  1354. template void LargeHeapBlock::SweepObjects<SweepMode_InThread>(Recycler * recycler);
  1355. #if ENABLE_CONCURRENT_GC
  1356. template <>
  1357. void
  1358. LargeHeapBlock::SweepObject<SweepMode_Concurrent>(Recycler * recycler, LargeObjectHeader * header)
  1359. {
  1360. Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
  1361. Assert(this->HeaderList()[header->objectIndex] == header);
  1362. this->HeaderList()[header->objectIndex] = nullptr;
  1363. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1364. }
  1365. // Explicitly instantiate all the sweep modes
  1366. template void LargeHeapBlock::SweepObjects<SweepMode_Concurrent>(Recycler * recycler);
  1367. #if ENABLE_PARTIAL_GC
  1368. template <>
  1369. void
  1370. LargeHeapBlock::SweepObject<SweepMode_ConcurrentPartial>(Recycler * recycler, LargeObjectHeader * header)
  1371. {
  1372. Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
  1373. Assert(this->HeaderList()[header->objectIndex] == header);
  1374. this->HeaderList()[header->objectIndex] = (LargeObjectHeader *)((size_t)header | PartialFreeBit);
  1375. DebugOnly(this->hasPartialFreeObjects = true);
  1376. }
  1377. // Explicitly instantiate all the sweep modes
  1378. template void LargeHeapBlock::SweepObjects<SweepMode_ConcurrentPartial>(Recycler * recycler);
  1379. #endif
  1380. #endif
  1381. //
  1382. // Walk through the objects in this heap block and call finalize
  1383. // on them if they're not marked and finalizable.
  1384. //
  1385. // At the end of this phase, if there were any finalizable objects,
  1386. // they would be in the pendingDisposeObject list. When we later call
  1387. // sweep on this heapblock, we'd simply null out the header and zero out the memory
  1388. // and then Sweep would return PendingDispose as its state
  1389. //
  1390. void LargeHeapBlock::FinalizeObjects(Recycler* recycler)
  1391. {
  1392. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  1393. for (uint i = 0; i < this->lastCollectAllocCount; i++)
  1394. {
  1395. LargeObjectHeader * header = this->GetHeader(i);
  1396. if (header == nullptr)
  1397. {
  1398. continue;
  1399. }
  1400. Assert(header->objectIndex == i);
  1401. // Skip finalization if the object is alive
  1402. if (heapBlockMap.IsMarked(header->GetAddress()))
  1403. {
  1404. continue;
  1405. }
  1406. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == FinalizeBit)
  1407. {
  1408. recycler->NotifyFree((char *)header->GetAddress(), header->objectSize);
  1409. FinalizeObject(recycler, header);
  1410. }
  1411. }
  1412. }
  1413. template <SweepMode mode>
  1414. void
  1415. LargeHeapBlock::SweepObjects(Recycler * recycler)
  1416. {
  1417. #if ENABLE_CONCURRENT_GC
  1418. Assert(mode == SweepMode_InThread || this->isPendingConcurrentSweep);
  1419. #else
  1420. Assert(mode == SweepMode_InThread);
  1421. #endif
  1422. const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
  1423. #if DBG
  1424. uint markCount = GetMarkCount();
  1425. // mark count included newly allocated objects
  1426. #if ENABLE_CONCURRENT_GC
  1427. Assert(expectedSweepCount == allocCount - markCount || recycler->collectionState == CollectionStateConcurrentSweep);
  1428. #else
  1429. Assert(expectedSweepCount == allocCount - markCount);
  1430. #endif
  1431. Assert(expectedSweepCount != 0 || isForceSweeping);
  1432. uint sweepCount = 0;
  1433. #endif
  1434. for (uint i = 0; i < lastCollectAllocCount; i++)
  1435. {
  1436. RECYCLER_STATS_ADD(recycler, objectSweepScanCount, !isForceSweeping);
  1437. LargeObjectHeader * header = this->GetHeader(i);
  1438. if (header == nullptr)
  1439. {
  1440. #if DBG
  1441. Assert(expectedSweepCount != 0);
  1442. expectedSweepCount--;
  1443. #endif
  1444. #if DBG
  1445. LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Index %d empty\n"), i);
  1446. #endif
  1447. continue;
  1448. }
  1449. Assert(header->objectIndex == i);
  1450. // Skip sweep if the object is alive
  1451. if (heapBlockMap.IsMarked(header->GetAddress()))
  1452. {
  1453. #if DBG
  1454. Assert((header->GetAttributes(recycler->Cookie) & NewFinalizeBit) == 0);
  1455. #endif
  1456. RECYCLER_STATS_ADD(recycler, largeHeapBlockUsedByteCount, this->GetHeader(i)->objectSize);
  1457. continue;
  1458. }
  1459. size_t objectSize = header->objectSize;
  1460. recycler->NotifyFree((char *)header->GetAddress(), objectSize);
  1461. SweepObject<mode>(recycler, header);
  1462. if (this->bucket != nullptr
  1463. #ifdef RECYCLER_STATS
  1464. && !isForceSweeping
  1465. #endif
  1466. )
  1467. {
  1468. LargeHeapBlockFreeListEntry* head = this->freeList.entries;
  1469. LargeHeapBlockFreeListEntry* entry = (LargeHeapBlockFreeListEntry*) header;
  1470. entry->headerIndex = i;
  1471. entry->heapBlock = this;
  1472. entry->next = head;
  1473. entry->objectSize = objectSize;
  1474. this->freeList.entries = entry;
  1475. }
  1476. #if DBG
  1477. sweepCount++;
  1478. #endif
  1479. }
  1480. Assert(sweepCount == expectedSweepCount);
  1481. #if ENABLE_CONCURRENT_GC
  1482. this->isPendingConcurrentSweep = false;
  1483. #endif
  1484. }
  1485. bool
  1486. LargeHeapBlock::TransferSweptObjects()
  1487. {
  1488. // TODO : Large heap block doesn't do free listing yet
  1489. return pendingDisposeObject != nullptr;
  1490. }
  1491. void
  1492. LargeHeapBlock::DisposeObjects(Recycler * recycler)
  1493. {
  1494. Assert(this->pendingDisposeObject != nullptr || this->hasDisposeBeenCalled);
  1495. while (pendingDisposeObject != nullptr)
  1496. {
  1497. #if DBG
  1498. this->hasDisposeBeenCalled = true;
  1499. #endif
  1500. LargeObjectHeader * header = pendingDisposeObject;
  1501. pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
  1502. Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
  1503. Assert(this->HeaderList()[header->objectIndex] == nullptr);
  1504. void * objectAddress = header->GetAddress();
  1505. ((FinalizableObject *)objectAddress)->Dispose(false);
  1506. Assert(finalizeCount != 0);
  1507. finalizeCount--;
  1508. bool objectTrimmed = false;
  1509. if (this->bucket == nullptr)
  1510. {
  1511. objectTrimmed = TrimObject(recycler, header, header->objectSize, true /* need suspend */);
  1512. }
  1513. // GCTODO: Consider free listing items after Dispose too
  1514. // GCTODO: Consider compacting heap blocks- if the last n items are free, move the address pointer
  1515. // back to before the nth item so we can bump allocate from this heap block
  1516. if (!objectTrimmed)
  1517. {
  1518. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1519. }
  1520. RECYCLER_STATS_INC(recycler, finalizeSweepCount);
  1521. #ifdef RECYCLER_FINALIZE_CHECK
  1522. this->heapInfo->liveFinalizableObjectCount--;
  1523. this->heapInfo->pendingDisposableObjectCount--;
  1524. #endif
  1525. }
  1526. }
  1527. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  1528. void
  1529. LargeHeapBlock::PartialTransferSweptObjects()
  1530. {
  1531. // Nothing to do
  1532. Assert(this->hasPartialFreeObjects);
  1533. }
  1534. void
  1535. LargeHeapBlock::FinishPartialCollect(Recycler * recycler)
  1536. {
  1537. Assert(this->hasPartialFreeObjects);
  1538. for (uint i = 0; i < allocCount; i++)
  1539. {
  1540. LargeObjectHeader * header = this->HeaderList()[i];
  1541. if (header != nullptr && IsPartialSweptHeader(header))
  1542. {
  1543. header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
  1544. Assert(header->objectIndex == i);
  1545. this->HeaderList()[i] = nullptr;
  1546. FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
  1547. }
  1548. }
  1549. DebugOnly(this->hasPartialFreeObjects = false);
  1550. }
  1551. #endif
  1552. void
  1553. LargeHeapBlock::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
  1554. {
  1555. for (uint i = 0; i < allocCount; i++)
  1556. {
  1557. LargeObjectHeader * header = this->GetHeader(i);
  1558. if (header == nullptr)
  1559. {
  1560. continue;
  1561. }
  1562. if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & infoBits) != 0)
  1563. {
  1564. CallBackFunction(header->GetAddress(), header->objectSize);
  1565. }
  1566. }
  1567. }
  1568. uint
  1569. LargeHeapBlock::GetMaxLargeObjectCount(size_t pageCount, size_t firstAllocationSize)
  1570. {
  1571. size_t freeSize = (AutoSystemInfo::PageSize * pageCount) - firstAllocationSize - sizeof(LargeObjectHeader);
  1572. Assert(freeSize < AutoSystemInfo::Data.dwAllocationGranularity);
  1573. size_t objectCount = (freeSize / HeapConstants::MaxSmallObjectSize) + 1;
  1574. Assert(objectCount <= UINT_MAX);
  1575. return (uint)objectCount;
  1576. }
  1577. #ifdef RECYCLER_SLOW_CHECK_ENABLED
  1578. void
  1579. LargeHeapBlock::Check(bool expectFull, bool expectPending)
  1580. {
  1581. for (uint i = 0; i < allocCount; i++)
  1582. {
  1583. LargeObjectHeader * header = this->HeaderList()[i];
  1584. if (header == nullptr)
  1585. {
  1586. continue;
  1587. }
  1588. #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
  1589. header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
  1590. Assert(this->hasPartialFreeObjects || header == this->HeaderList()[i]);
  1591. #endif
  1592. Assert(header->objectIndex == i);
  1593. }
  1594. }
  1595. #endif
  1596. void LargeHeapBlock::FillFreeMemory(Recycler * recycler, __in_bcount(size) void * address, size_t size)
  1597. {
  1598. // For now, we don't do anything in release build because we don't reuse this memory until we return
  1599. // the pages to the allocator which will zero out the whole page
  1600. #ifdef RECYCLER_MEMORY_VERIFY
  1601. if (recycler->VerifyEnabled())
  1602. {
  1603. memset(address, Recycler::VerifyMemFill, size);
  1604. return;
  1605. }
  1606. #endif
  1607. #ifdef RECYCLER_FREE_MEM_FILL
  1608. memset(address, DbgMemFill, size);
  1609. #endif
  1610. }
  1611. size_t LargeHeapBlock::GetObjectSize(void* objectAddress)
  1612. {
  1613. LargeObjectHeader * header = GetHeader(objectAddress);
  1614. Assert((char *)header >= this->address);
  1615. return header->objectSize;
  1616. }
  1617. #ifdef RECYCLER_MEMORY_VERIFY
  1618. void
  1619. LargeHeapBlock::Verify(Recycler * recycler)
  1620. {
  1621. char * lastAddress = this->address;
  1622. uint verifyFinalizeCount = 0;
  1623. for (uint i = 0; i < allocCount; i++)
  1624. {
  1625. LargeObjectHeader * header = this->HeaderList()[i];
  1626. if (header == nullptr)
  1627. {
  1628. // Check if the object if on the free list
  1629. LargeHeapBlockFreeListEntry* current = this->freeList.entries;
  1630. while (current != nullptr)
  1631. {
  1632. // Verify the free listed object
  1633. if (current->headerIndex == i)
  1634. {
  1635. BYTE* objectAddress = (BYTE *)current + sizeof(LargeObjectHeader);
  1636. Recycler::VerifyCheck(current->heapBlock == this, _u("Invalid heap block"), this, current->heapBlock);
  1637. Recycler::VerifyCheck((char *)current >= lastAddress, _u("LargeHeapBlock invalid object header order"), this->address, current);
  1638. Recycler::VerifyCheckFill(lastAddress, (char *)current - lastAddress);
  1639. recycler->VerifyCheckPad(objectAddress, current->objectSize);
  1640. lastAddress = (char *) objectAddress + current->objectSize;
  1641. break;
  1642. }
  1643. current = current->next;
  1644. }
  1645. continue;
  1646. }
  1647. Recycler::VerifyCheck((char *)header >= lastAddress, _u("LargeHeapBlock invalid object header order"), this->address, header);
  1648. Recycler::VerifyCheckFill(lastAddress, (char *)header - lastAddress);
  1649. Recycler::VerifyCheck(header->objectIndex == i, _u("LargeHeapBlock object index mismatch"), this->address, &header->objectIndex);
  1650. recycler->VerifyCheckPad((BYTE *)header->GetAddress(), header->objectSize);
  1651. verifyFinalizeCount += ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != 0);
  1652. lastAddress = (char *)header->GetAddress() + header->objectSize;
  1653. }
  1654. Recycler::VerifyCheck(verifyFinalizeCount == this->finalizeCount, _u("LargeHeapBlock finalize object count mismatch"), this->address, &this->finalizeCount);
  1655. }
  1656. #endif
  1657. uint
  1658. LargeHeapBlock::GetMarkCount()
  1659. {
  1660. uint markCount = 0;
  1661. const HeapBlockMap& heapBlockMap = this->heapInfo->recycler->heapBlockMap;
  1662. for (uint i = 0; i < allocCount; i++)
  1663. {
  1664. LargeObjectHeader* header = this->HeaderList()[i];
  1665. if (header && header->objectIndex == i && heapBlockMap.IsMarked(header->GetAddress()))
  1666. {
  1667. markCount++;
  1668. }
  1669. }
  1670. return markCount;
  1671. }
  1672. #ifdef RECYCLER_PERF_COUNTERS
  1673. void
  1674. LargeHeapBlock::UpdatePerfCountersOnFree()
  1675. {
  1676. Assert(GetMarkCount() == 0);
  1677. size_t usedCount = 0;
  1678. size_t usedBytes = 0;
  1679. for (uint i = 0; i < allocCount; i++)
  1680. {
  1681. LargeObjectHeader * header = this->HeaderList()[i];
  1682. if (header == nullptr)
  1683. {
  1684. continue;
  1685. }
  1686. usedCount++;
  1687. usedBytes += header->objectSize;
  1688. }
  1689. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObject, usedCount);
  1690. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, usedBytes);
  1691. RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockFreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
  1692. RECYCLER_PERF_COUNTER_SUB(LiveObject, usedCount);
  1693. RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, usedBytes);
  1694. RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
  1695. }
  1696. #endif
  1697. #ifdef PROFILE_RECYCLER_ALLOC
  1698. void *
  1699. LargeHeapBlock::GetTrackerData(void * address)
  1700. {
  1701. Assert(Recycler::DoProfileAllocTracker());
  1702. LargeObjectHeader * header = GetHeader(address);
  1703. Assert((char *)header >= this->address);
  1704. uint index = header->objectIndex;
  1705. Assert(index < this->allocCount);
  1706. Assert(this->HeaderList()[index] == header);
  1707. return this->GetTrackerDataArray()[index];
  1708. }
  1709. void
  1710. LargeHeapBlock::SetTrackerData(void * address, void * data)
  1711. {
  1712. Assert(Recycler::DoProfileAllocTracker());
  1713. LargeObjectHeader * header = GetHeader(address);
  1714. Assert((char *)header >= this->address);
  1715. uint index = header->objectIndex;
  1716. Assert(index < this->allocCount);
  1717. Assert(this->HeaderList()[index] == header);
  1718. this->GetTrackerDataArray()[index] = data;
  1719. }
  1720. void **
  1721. LargeHeapBlock::GetTrackerDataArray()
  1722. {
  1723. // See LargeHeapBlock::GetAllocPlusSize for layout description
  1724. return (void **)((char *)(this + 1) + LargeHeapBlock::GetAllocPlusSize(this->objectCount) - this->objectCount * sizeof(void *));
  1725. }
  1726. #endif
  1727. #ifdef RECYCLER_PAGE_HEAP
  1728. void
  1729. LargeHeapBlock::CapturePageHeapAllocStack()
  1730. {
  1731. #ifdef STACK_BACK_TRACE
  1732. if (this->InPageHeapMode()) // pageheap can be enabled only for some of the buckets
  1733. {
  1734. // These asserts are true because explicit free is disallowed in
  1735. // page heap mode. If they weren't, we'd have to modify the asserts
  1736. Assert(this->pageHeapFreeStack == nullptr);
  1737. Assert(this->pageHeapAllocStack == nullptr);
  1738. // Note: NoCheckHeapAllocator will fail fast if we can't allocate the stack to capture
  1739. // REVIEW: Should we have a flag to configure the number of frames captured?
  1740. if (pageHeapAllocStack != nullptr && this->pageHeapAllocStack != s_StackTraceAllocFailed)
  1741. {
  1742. this->pageHeapAllocStack->Capture(Recycler::s_numFramesToSkipForPageHeapAlloc);
  1743. }
  1744. else
  1745. {
  1746. this->pageHeapAllocStack = StackBackTrace::Capture(&NoThrowHeapAllocator::Instance,
  1747. Recycler::s_numFramesToSkipForPageHeapAlloc, Recycler::s_numFramesToCaptureForPageHeap);
  1748. }
  1749. if (this->pageHeapAllocStack == nullptr)
  1750. {
  1751. this->pageHeapAllocStack = const_cast<StackBackTrace*>(s_StackTraceAllocFailed); // allocate failed, mark it we have tried
  1752. }
  1753. }
  1754. #endif
  1755. }
  1756. void
  1757. LargeHeapBlock::CapturePageHeapFreeStack()
  1758. {
  1759. #ifdef STACK_BACK_TRACE
  1760. if (this->InPageHeapMode()) // pageheap can be enabled only for some of the buckets
  1761. {
  1762. // These asserts are true because explicit free is disallowed in
  1763. // page heap mode. If they weren't, we'd have to modify the asserts
  1764. Assert(this->pageHeapFreeStack == nullptr);
  1765. Assert(this->pageHeapAllocStack != nullptr);
  1766. if (this->pageHeapFreeStack != nullptr)
  1767. {
  1768. this->pageHeapFreeStack->Capture(Recycler::s_numFramesToSkipForPageHeapFree);
  1769. }
  1770. else
  1771. {
  1772. this->pageHeapFreeStack = StackBackTrace::Capture(&NoThrowHeapAllocator::Instance,
  1773. Recycler::s_numFramesToSkipForPageHeapFree, Recycler::s_numFramesToCaptureForPageHeap);
  1774. }
  1775. }
  1776. #endif
  1777. }
  1778. #endif
  1779. #if DBG
  1780. void LargeHeapBlock::WBSetBit(char* addr)
  1781. {
  1782. uint index = (uint)(addr - this->address) / sizeof(void*);
  1783. try
  1784. {
  1785. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(static_cast<ExceptionType>(ExceptionType_DisableCheck));
  1786. wbVerifyBits.Set(index);
  1787. }
  1788. catch (Js::OutOfMemoryException&)
  1789. {
  1790. }
  1791. }
  1792. void LargeHeapBlock::WBSetBits(char* addr, uint length)
  1793. {
  1794. uint index = (uint)(addr - this->address) / sizeof(void*);
  1795. try
  1796. {
  1797. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(static_cast<ExceptionType>(ExceptionType_DisableCheck));
  1798. for (uint i = 0; i < length; i++)
  1799. {
  1800. wbVerifyBits.Set(index + i);
  1801. }
  1802. }
  1803. catch (Js::OutOfMemoryException&)
  1804. {
  1805. }
  1806. }
  1807. void LargeHeapBlock::WBClearBits(char* addr)
  1808. {
  1809. uint index = (uint)(addr - this->address) / sizeof(void*);
  1810. size_t objectSize = this->GetHeader(addr)->objectSize;
  1811. for (uint i = 0; i < (uint)objectSize / sizeof(void*); i++)
  1812. {
  1813. wbVerifyBits.Clear(index + i);
  1814. }
  1815. }
  1816. #endif