| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130 |
- //-------------------------------------------------------------------------------------------------------
- // Copyright (C) Microsoft. All rights reserved.
- // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
- //-------------------------------------------------------------------------------------------------------
- #include "CommonMemoryPch.h"
- CompileAssert(
- sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity ||
- sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity * 2);
- #ifdef STACK_BACK_TRACE
- const StackBackTrace* LargeHeapBlock::s_StackTraceAllocFailed = (StackBackTrace*)1;
- #endif
- void *
- LargeObjectHeader::GetAddress() { return ((char *)this) + sizeof(LargeObjectHeader); }
- #ifdef LARGEHEAPBLOCK_ENCODING
- // decodedNext = decoded next field
- // decodedAttributes = decoded attributes part of attributesAndChecksum
- // Decode 'next' and 'attributes' using _cookie
- // If next=B1B2B3B4, checksum = (B1^B2^B3^B4^attributes)
- // Encode 'next' and 'attributes' using _cookie
- unsigned char
- LargeObjectHeader::CalculateCheckSum(LargeObjectHeader* decodedNext, unsigned char decodedAttributes)
- {
- unsigned char checksum = 0;
- byte *nextField = (byte *)&decodedNext;
- checksum = nextField[0] ^ nextField[1] ^ nextField[2] ^ nextField[3] ^ decodedAttributes;
- return checksum;
- }
- LargeObjectHeader*
- LargeObjectHeader::EncodeNext(uint cookie, LargeObjectHeader* next)
- {
- return (LargeObjectHeader *)((uintptr_t)next ^ cookie);
- }
- ushort
- LargeObjectHeader::EncodeAttributesAndChecksum(uint cookie, ushort attributesAndChecksum)
- {
- return attributesAndChecksum ^ (ushort)cookie;
- }
- LargeObjectHeader*
- LargeObjectHeader::DecodeNext(uint cookie, LargeObjectHeader* next) { return EncodeNext(cookie, next); }
- ushort
- LargeObjectHeader::DecodeAttributesAndChecksum(uint cookie) { return EncodeAttributesAndChecksum(cookie, this->attributesAndChecksum); }
- #else
- // If heap block encoding is disabled then have an API to expose
- // pointer to attributes so that can be passed to RecyclerHeapObjectInfo()
- // which updates the attributes field.
- unsigned char *
- LargeObjectHeader::GetAttributesPtr()
- {
- return &this->attributes;
- }
- #endif
- void
- LargeObjectHeader::SetNext(uint cookie, LargeObjectHeader* next)
- {
- #ifdef LARGEHEAPBLOCK_ENCODING
- ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
- // Calculate the checksum value with new next
- unsigned char newCheckSumValue = this->CalculateCheckSum(next, (unsigned char)(decodedAttributesAndChecksum >> 8));
- // pack the (attribute + checksum)
- ushort newAttributeWithCheckSum = (decodedAttributesAndChecksum & 0xFF00) | newCheckSumValue;
- // encode the packed (attribute + checksum), next and set them
- this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
- this->next = this->EncodeNext(cookie, next);
- #else
- this->next = next;
- #endif
- }
- LargeObjectHeader *
- LargeObjectHeader::GetNext(uint cookie)
- {
- #ifdef LARGEHEAPBLOCK_ENCODING
- LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
- ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
- unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
- unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
- if (checkSum != calculatedCheckSumField)
- {
- LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
- }
- // If checksum matches return the up-to-date next (in case other thread changed it from last time
- // we read it in this method.
- return this->DecodeNext(cookie, this->next);
- #else
- return this->next;
- #endif
- }
- void
- LargeObjectHeader::SetAttributes(uint cookie, unsigned char attributes)
- {
- #ifdef LARGEHEAPBLOCK_ENCODING
- LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
- // Calculate the checksum value with new attribute
- unsigned char newCheckSumValue = this->CalculateCheckSum(decodedNext, attributes);
- // pack the (attribute + checksum)
- ushort newAttributeWithCheckSum = ((ushort)attributes << 8) | newCheckSumValue;
- // encode the packed (attribute + checksum) and set it
- this->attributesAndChecksum = this->EncodeAttributesAndChecksum(cookie, newAttributeWithCheckSum);
- #else
- this->attributes = attributes;
- #endif
- }
- unsigned char
- LargeObjectHeader::GetAttributes(uint cookie)
- {
- #ifdef LARGEHEAPBLOCK_ENCODING
- LargeObjectHeader *decodedNext = this->DecodeNext(cookie, this->next);
- ushort decodedAttributesAndChecksum = this->DecodeAttributesAndChecksum(cookie);
- unsigned char checkSum = (unsigned char)(decodedAttributesAndChecksum & 0xFF);
- unsigned char calculatedCheckSumField = this->CalculateCheckSum(decodedNext, (unsigned char)(decodedAttributesAndChecksum >> 8));
- if (checkSum != calculatedCheckSumField)
- {
- LargeHeapBlock_Metadata_Corrupted((ULONG_PTR)this, calculatedCheckSumField);
- }
- // If checksum matches return the up-to-date attributes (in case other thread changed it from last time
- // we read it in this method.
- return this->DecodeAttributesAndChecksum(cookie) >> 8;
- #else
- return this->attributes;
- #endif
- }
- size_t
- LargeHeapBlock::GetAllocPlusSize(uint objectCount)
- {
- // Large Heap Block Layout:
- // LargeHeapBlock
- // LargeObjectHeader * [objectCount]
- // TrackerData * [objectCount] (Optional)
- size_t allocPlusSize = objectCount * (sizeof(LargeObjectHeader *));
- #ifdef PROFILE_RECYCLER_ALLOC
- if (Recycler::DoProfileAllocTracker())
- {
- allocPlusSize += objectCount * sizeof(void *);
- }
- #endif
- return allocPlusSize;
- }
- LargeHeapBlock *
- LargeHeapBlock::New(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
- {
- return NoMemProtectHeapNewNoThrowPlusZ(GetAllocPlusSize(objectCount), LargeHeapBlock, address, pageCount, segment, objectCount, bucket);
- }
- void
- LargeHeapBlock::Delete(LargeHeapBlock * heapBlock)
- {
- NoMemProtectHeapDeletePlus(GetAllocPlusSize(heapBlock->objectCount), heapBlock);
- }
- LargeHeapBlock::LargeHeapBlock(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket)
- : HeapBlock(LargeBlockType), pageCount(pageCount), allocAddressEnd(address), objectCount(objectCount), bucket(bucket), freeList(this)
- #if defined(RECYCLER_PAGE_HEAP) && defined(STACK_BACK_TRACE)
- , pageHeapAllocStack(nullptr), pageHeapFreeStack(nullptr)
- #endif
- #if DBG
- ,wbVerifyBits(&HeapAllocator::Instance)
- #endif
- {
- Assert(address != nullptr);
- Assert(pageCount != 0);
- Assert(objectCount != 0);
- Assert(lastCollectAllocCount == 0);
- Assert(finalizeCount == 0);
- Assert(next == nullptr);
- Assert(!hasPartialFreeObjects);
- this->address = address;
- this->segment = segment;
- #if ENABLE_CONCURRENT_GC
- this->isPendingConcurrentSweep = false;
- #endif
- this->addressEnd = this->address + this->pageCount * AutoSystemInfo::PageSize;
- RECYCLER_PERF_COUNTER_INC(LargeHeapBlockCount);
- RECYCLER_PERF_COUNTER_ADD(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
- }
- LargeHeapBlock::~LargeHeapBlock()
- {
- AssertMsg(this->segment == nullptr || this->heapInfo->recycler->recyclerLargeBlockPageAllocator.IsClosed(),
- "ReleasePages needs to be called before delete");
- RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockCount);
- #if defined(RECYCLER_PAGE_HEAP) && defined(STACK_BACK_TRACE)
- if (this->pageHeapAllocStack != nullptr)
- {
- if (this->pageHeapAllocStack != s_StackTraceAllocFailed)
- {
- this->pageHeapAllocStack->Delete(&NoThrowHeapAllocator::Instance);
- }
- this->pageHeapAllocStack = nullptr;
- }
- // REVIEW: This means that the old free stack is lost when we get free the heap block
- // Is this okay? Should we delay freeing heap blocks till process/thread shutdown time?
- if (this->pageHeapFreeStack != nullptr)
- {
- this->pageHeapFreeStack->Delete(&NoThrowHeapAllocator::Instance);
- this->pageHeapFreeStack = nullptr;
- }
- #endif
- }
- Recycler *
- LargeHeapBlock::GetRecycler() const
- {
- return this->bucket->heapInfo->recycler;
- }
- LargeObjectHeader **
- LargeHeapBlock::HeaderList()
- {
- // See LargeHeapBlock::GetAllocPlusSize for layout description
- return (LargeObjectHeader **)(((byte *)this) + sizeof(LargeHeapBlock));
- }
- void
- LargeHeapBlock::FinalizeAllObjects()
- {
- if (this->finalizeCount != 0)
- {
- DebugOnly(uint processedCount = 0);
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->GetHeader(i);
- if (header == nullptr || ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == 0))
- {
- continue;
- }
- FinalizableObject * finalizableObject = ((FinalizableObject *)header->GetAddress());
- finalizableObject->Finalize(true);
- finalizableObject->Dispose(true);
- #ifdef RECYCLER_FINALIZE_CHECK
- this->heapInfo->liveFinalizableObjectCount--;
- #endif
- DebugOnly(processedCount++);
- }
- while (pendingDisposeObject != nullptr)
- {
- LargeObjectHeader * header = pendingDisposeObject;
- pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
- Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
- Assert(this->HeaderList()[header->objectIndex] == nullptr);
- void * objectAddress = header->GetAddress();
- ((FinalizableObject *)objectAddress)->Dispose(true);
- #ifdef RECYCLER_FINALIZE_CHECK
- this->heapInfo->liveFinalizableObjectCount--;
- this->heapInfo->pendingDisposableObjectCount--;
- #endif
- DebugOnly(processedCount++);
- }
- Assert(this->finalizeCount == processedCount);
- }
- }
- void
- LargeHeapBlock::ReleasePagesShutdown(Recycler * recycler)
- {
- #if DBG
- recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
- // Don't release the page in shut down, the page allocator will release them faster
- Assert(recycler->recyclerLargeBlockPageAllocator.IsClosed());
- #endif
- }
- void
- LargeHeapBlock::ReleasePagesSweep(Recycler * recycler)
- {
- recycler->heapBlockMap.ClearHeapBlock(this->address, this->pageCount);
- ReleasePages(recycler);
- }
- #ifdef RECYCLER_PAGE_HEAP
- _NOINLINE
- void LargeHeapBlock::VerifyPageHeapPattern()
- {
- Assert(InPageHeapMode());
- Assert(this->allocCount > 0);
- byte* objectEndAddress = (byte*)this->allocAddressEnd;
- byte* addrEnd = (byte*)this->addressEnd;
- for (int i = 0; objectEndAddress + i < (byte*)addrEnd; i++)
- {
- byte current = objectEndAddress[i];
- if (current != 0xF0u)
- {
- Assert(false);
- ReportFatalException(NULL, E_FAIL, Fatal_Recycler_MemoryCorruption, 2);
- }
- }
- }
- #endif
- void
- LargeHeapBlock::ReleasePages(Recycler * recycler)
- {
- Assert(segment != nullptr);
- char* blockStartAddress = this->address;
- size_t realPageCount = this->pageCount;
- #ifdef RECYCLER_PAGE_HEAP
- if (InPageHeapMode())
- {
- Assert(((LargeObjectHeader*)this->address)->isPageHeapFillVerified || this->allocCount == 0);
- if (this->allocCount > 0) // in case OOM while adding heapblock to heapBlockMap, we release page before setting the pattern
- {
- Assert(this->allocCount == 1); // one object per heapblock in pageheap
- VerifyPageHeapPattern();
- }
- if (guardPageAddress != nullptr)
- {
- if (this->pageHeapMode == PageHeapMode::PageHeapModeBlockStart)
- {
- blockStartAddress = guardPageAddress;
- }
- realPageCount = this->actualPageCount;
- size_t guardPageCount = this->actualPageCount - this->pageCount;
- DWORD oldProtect;
- BOOL ret = ::VirtualProtect(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, PAGE_READWRITE, &oldProtect);
- Assert(ret && oldProtect == PAGE_NOACCESS);
- }
- }
- #endif
- #ifdef RECYCLER_FREE_MEM_FILL
- memset(this->address, DbgMemFill, AutoSystemInfo::PageSize * pageCount);
- #endif
- IdleDecommitPageAllocator* pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
- pageAllocator->Release(blockStartAddress, realPageCount, segment);
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockPageSize, pageCount * AutoSystemInfo::PageSize);
- this->segment = nullptr;
- }
- BOOL
- LargeHeapBlock::IsValidObject(void* objectAddress)
- {
- LargeObjectHeader * header = GetHeader(objectAddress);
- return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->HeaderList()[header->objectIndex] == header);
- }
- #if DBG
- BOOL
- LargeHeapBlock::IsFreeObject(void * objectAddress)
- {
- LargeObjectHeader * header = GetHeader(objectAddress);
- return ((char *)header >= this->address && header->objectIndex < this->allocCount && this->GetHeader(header->objectIndex) == nullptr);
- }
- #endif
- bool
- LargeHeapBlock::TryGetAttributes(void* objectAddress, unsigned char * pAttr)
- {
- return this->TryGetAttributes(GetHeader(objectAddress), pAttr);
- }
- bool
- LargeHeapBlock::TryGetAttributes(LargeObjectHeader * header, unsigned char * pAttr)
- {
- if ((char *)header < this->address)
- {
- return false;
- }
- uint index = header->objectIndex;
- if (index >= this->allocCount)
- {
- // Not allocated yet.
- return false;
- }
- if (this->HeaderList()[index] != header)
- {
- // header doesn't match, not a real object
- return false;
- }
- if (this->InPageHeapMode())
- {
- this->VerifyPageHeapPattern();
- }
- *pAttr = header->GetAttributes(this->heapInfo->recycler->Cookie);
- return true;
- }
- size_t
- LargeHeapBlock::GetPagesNeeded(size_t size, bool multiplyRequest)
- {
- if (multiplyRequest)
- {
- size = AllocSizeMath::Mul(size, 4);
- }
- uint pageSize = AutoSystemInfo::PageSize;
- size = AllocSizeMath::Add(size, sizeof(LargeObjectHeader) + (pageSize - 1));
- if (size == (size_t)-1)
- {
- return 0;
- }
- size_t pageCount = size / pageSize;
- return pageCount;
- }
- char*
- LargeHeapBlock::TryAllocFromFreeList(size_t size, ObjectInfoBits attributes)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- LargeHeapBlockFreeListEntry** prev = &this->freeList.entries;
- LargeHeapBlockFreeListEntry* freeListEntry = this->freeList.entries;
- char* memBlock = nullptr;
- // Walk through the free list, find the first entry that can fit our desired size
- while (freeListEntry)
- {
- LargeHeapBlockFreeListEntry* next = freeListEntry->next;
- LargeHeapBlock* heapBlock = freeListEntry->heapBlock;
- if (freeListEntry->objectSize >= size)
- {
- memBlock = heapBlock->AllocFreeListEntry(size, attributes, freeListEntry);
- if (memBlock)
- {
- (*prev) = next;
- break;
- }
- }
- prev = &freeListEntry->next;
- freeListEntry = freeListEntry->next;
- }
- if (this->freeList.entries == nullptr)
- {
- this->bucket->UnregisterFreeList(&this->freeList);
- }
- return memBlock;
- }
- char*
- LargeHeapBlock::AllocFreeListEntry(size_t size, ObjectInfoBits attributes, LargeHeapBlockFreeListEntry* entry)
- {
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- Assert(HeapInfo::IsAlignedSize(size));
- AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
- Assert(entry->heapBlock == this);
- Assert(entry->headerIndex < this->objectCount);
- Assert(this->HeaderList()[entry->headerIndex] == nullptr);
- uint headerIndex = entry->headerIndex;
- size_t originalSize = entry->objectSize;
- LargeObjectHeader * header = (LargeObjectHeader *) entry;
- char * allocObject = ((char*) entry) + sizeof(LargeObjectHeader); // shouldn't overflow
- char * newAllocAddressEnd = allocObject + size;
- char * originalAllocEnd = allocObject + originalSize;
- if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject || (originalAllocEnd < newAllocAddressEnd))
- {
- return nullptr;
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- if (this->heapInfo->recycler->VerifyEnabled())
- {
- this->heapInfo->recycler->VerifyCheckFill(allocObject , originalSize);
- }
- #endif
- memset(entry, 0, sizeof(LargeObjectHeader) + originalSize);
- #ifdef RECYCLER_MEMORY_VERIFY
- // If we're in recyclerVerify mode, fill the non-header part of the allocation
- // with the verification pattern
- if (this->heapInfo->recycler->VerifyEnabled())
- {
- memset(allocObject, Recycler::VerifyMemFill, originalSize);
- }
- #endif
- #if DBG
- LargeAllocationVerboseTrace(this->heapInfo->recycler->GetRecyclerFlagsTable(), _u("Allocated object of size 0x%x in from free list entry at address 0x%p\n"), size, allocObject);
- #endif
- Assert(allocCount <= objectCount);
- header->objectIndex = headerIndex;
- header->objectSize = originalSize;
- #ifdef RECYCLER_WRITE_BARRIER
- header->hasWriteBarrier = (attributes & WithBarrierBit) == WithBarrierBit;
- #endif
- header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & StoredObjectInfoBitMask));
- header->markOnOOMRescan = false;
- header->SetNext(this->heapInfo->recycler->Cookie, nullptr);
- HeaderList()[headerIndex] = header;
- finalizeCount += ((attributes & FinalizeBit) != 0);
- #ifdef RECYCLER_FINALIZE_CHECK
- if (attributes & FinalizeBit)
- {
- HeapInfo * heapInfo = this->heapInfo;
- heapInfo->liveFinalizableObjectCount++;
- heapInfo->newFinalizableObjectCount++;
- }
- #endif
- return allocObject;
- }
- char*
- LargeHeapBlock::Alloc(size_t size, ObjectInfoBits attributes)
- {
- Assert(HeapInfo::IsAlignedSize(size) || InPageHeapMode());
- Assert((attributes & InternalObjectInfoBitMask) == attributes);
- AssertMsg((attributes & TrackBit) == 0, "Large tracked object collection not implemented");
- LargeObjectHeader * header = (LargeObjectHeader *)allocAddressEnd;
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- Assert(!IsPartialSweptHeader(header));
- #endif
- char * allocObject = allocAddressEnd + sizeof(LargeObjectHeader); // shouldn't overflow
- char * newAllocAddressEnd = allocObject + size;
- if (newAllocAddressEnd > addressEnd || newAllocAddressEnd < allocObject)
- {
- return nullptr;
- }
- Recycler* recycler = this->heapInfo->recycler;
- #if DBG
- LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Allocated object of size 0x%x in existing heap block at address 0x%p\n"), size, allocObject);
- #endif
- Assert(allocCount < objectCount);
- allocAddressEnd = newAllocAddressEnd;
- #ifdef RECYCLER_ZERO_MEM_CHECK
- recycler->VerifyZeroFill(header, sizeof(LargeObjectHeader));
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- if (recycler->VerifyEnabled())
- {
- memset(header, 0, sizeof(LargeObjectHeader));
- }
- #endif
- header->objectIndex = allocCount;
- header->objectSize = size;
- #ifdef RECYCLER_WRITE_BARRIER
- header->hasWriteBarrier = (attributes&WithBarrierBit) == WithBarrierBit;
- #endif
- header->SetAttributes(recycler->Cookie, (attributes & StoredObjectInfoBitMask));
- HeaderList()[allocCount++] = header;
- finalizeCount += ((attributes & FinalizeBit) != 0);
- #ifdef RECYCLER_FINALIZE_CHECK
- if (attributes & FinalizeBit)
- {
- HeapInfo * heapInfo = this->heapInfo;
- heapInfo->liveFinalizableObjectCount++;
- heapInfo->newFinalizableObjectCount++;
- }
- #endif
- return allocObject;
- }
- template <bool doSpecialMark>
- _NOINLINE
- void
- LargeHeapBlock::Mark(void* objectAddress, MarkContext * markContext)
- {
- LargeObjectHeader * header = GetHeader(objectAddress);
- unsigned char attributes = ObjectInfoBits::NoBit;
- if (!this->TryGetAttributes(header, &attributes))
- {
- return;
- }
- DUMP_OBJECT_REFERENCE(markContext->GetRecycler(), objectAddress);
- size_t objectSize = header->objectSize;
- if (this->InPageHeapMode())
- {
- // trim off the trailing part which is not a pointer
- objectSize = HeapInfo::RoundObjectSize(objectSize);
- if (objectSize == 0)
- {
- // finalizable object must bigger than a pointer size because of the vtable
- Assert((attributes & FinalizeBit) == 0);
- return;
- }
- }
- if (!UpdateAttributesOfMarkedObjects<doSpecialMark>(markContext, objectAddress, objectSize, attributes,
- [&](unsigned char attributes) { header->SetAttributes(this->heapInfo->recycler->Cookie, attributes); }))
- {
- // Couldn't mark children- bail out and come back later
- this->SetNeedOOMRescan(markContext->GetRecycler());
- // Single page large heap block rescan all marked object on oom rescan
- if (this->GetPageCount() != 1)
- {
- // Failed to mark the objects referenced by this object, so we'll
- // revisit this on rescan
- header->markOnOOMRescan = true;
- }
- }
- }
- template void LargeHeapBlock::Mark<true>(void* objectAddress, MarkContext * markContext);
- template void LargeHeapBlock::Mark<false>(void* objectAddress, MarkContext * markContext);
- bool
- LargeHeapBlock::TestObjectMarkedBit(void* objectAddress)
- {
- Assert(IsValidObject(objectAddress));
- LargeObjectHeader* pHeader = nullptr;
- if (GetObjectHeader(objectAddress, &pHeader))
- {
- Recycler* recycler = this->heapInfo->recycler;
- return recycler->heapBlockMap.IsMarked(objectAddress);
- }
- return FALSE;
- }
- void
- LargeHeapBlock::SetObjectMarkedBit(void* objectAddress)
- {
- Assert(IsValidObject(objectAddress));
- LargeObjectHeader* pHeader = nullptr;
- if (GetObjectHeader(objectAddress, &pHeader))
- {
- Recycler* recycler = this->heapInfo->recycler;
- recycler->heapBlockMap.SetMark(objectAddress);
- }
- }
- bool
- LargeHeapBlock::FindImplicitRootObject(void* objectAddress, Recycler * recycler, RecyclerHeapObjectInfo& heapObject)
- {
- if (!IsValidObject(objectAddress))
- {
- return false;
- }
- LargeObjectHeader* pHeader = nullptr;
- if (!GetObjectHeader(objectAddress, &pHeader))
- {
- return false;
- }
- #ifdef LARGEHEAPBLOCK_ENCODING
- heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, nullptr);
- heapObject.SetLargeHeapBlockHeader(pHeader);
- #else
- heapObject = RecyclerHeapObjectInfo(objectAddress, recycler, this, pHeader->GetAttributesPtr());
- #endif
- return true;
- }
- bool
- LargeHeapBlock::FindHeapObject(void* objectAddress, Recycler * recycler, FindHeapObjectFlags, RecyclerHeapObjectInfo& heapObject)
- {
- // Currently the same actual implementation (flags is ignored)
- return FindImplicitRootObject(objectAddress, recycler, heapObject);
- }
- bool
- LargeHeapBlock::GetObjectHeader(void* objectAddress, LargeObjectHeader** ppHeader)
- {
- (*ppHeader) = nullptr;
- LargeObjectHeader * header = GetHeader(objectAddress);
- if ((char *)header < this->address)
- {
- return false;
- }
- uint index = header->objectIndex;
- if (this->HeaderList()[index] != header)
- {
- // header doesn't match, not a real object
- return false;
- }
- Assert(index < this->allocCount);
- (*ppHeader) = header;
- return true;
- }
- void
- LargeHeapBlock::ResetMarks(ResetMarkFlags flags, Recycler* recycler)
- {
- Assert(!this->needOOMRescan);
- // Update the lastCollectAllocCount for sweep
- this->lastCollectAllocCount = this->allocCount;
- Assert(this->GetMarkCount() == 0);
- #if ENABLE_CONCURRENT_GC
- Assert(!this->isPendingConcurrentSweep);
- #endif
- if (flags & ResetMarkFlags_ScanImplicitRoot)
- {
- for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
- {
- // object is allocated during the concurrent mark or it is marked, do rescan
- LargeObjectHeader * header = this->GetHeader(objectIndex);
- // check if the object index is not allocated
- if (header == nullptr)
- {
- continue;
- }
- // check whether the object is a leaf and doesn't need to be scanned
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) != 0)
- {
- recycler->heapBlockMap.SetMark(header->GetAddress());
- }
- }
- }
- }
- LargeObjectHeader *
- LargeHeapBlock::GetHeader(void * objectAddress)
- {
- Assert(objectAddress >= this->address && objectAddress < this->addressEnd);
- return GetHeaderFromAddress(objectAddress);
- }
- LargeObjectHeader *
- LargeHeapBlock::GetHeaderFromAddress(void * objectAddress)
- {
- return (LargeObjectHeader*)(((char *)objectAddress) - sizeof(LargeObjectHeader));
- }
- byte *
- LargeHeapBlock::GetRealAddressFromInterior(void * interiorAddress)
- {
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->HeaderList()[i];
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- if (header != nullptr && !IsPartialSweptHeader(header))
- #else
- if (header != nullptr)
- #endif
- {
- Assert(header->objectIndex == i);
- byte * startAddress = (byte *)header->GetAddress();
- if (startAddress <= interiorAddress && (startAddress + header->objectSize > interiorAddress))
- {
- return startAddress;
- }
- }
- }
- return nullptr;
- }
- #ifdef RECYCLER_VERIFY_MARK
- void
- LargeHeapBlock::VerifyMark()
- {
- Assert(!this->needOOMRescan);
- Recycler* recycler = this->heapInfo->recycler;
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->GetHeader(i);
- if (header == nullptr)
- {
- continue;
- }
- char * objectAddress = (char *)header->GetAddress();
- if (!recycler->heapBlockMap.IsMarked(objectAddress))
- {
- continue;
- }
- unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
- Assert((attributes & NewFinalizeBit) == 0);
- if ((attributes & LeafBit) != 0)
- {
- continue;
- }
- Assert(!header->markOnOOMRescan);
- char * objectAddressEnd = objectAddress + header->objectSize;
- while (objectAddress + sizeof(void *) <= objectAddressEnd)
- {
- void* target = *(void **)objectAddress;
- if (recycler->VerifyMark(target))
- {
- Assert(this->wbVerifyBits.Test((BVIndex)(objectAddress - this->address) / sizeof(void*)));
- }
- objectAddress += sizeof(void *);
- }
- }
- }
- bool
- LargeHeapBlock::VerifyMark(void * objectAddress)
- {
- LargeObjectHeader * header = GetHeader(objectAddress);
- if ((char *)header < this->address)
- {
- return false;
- }
- uint index = header->objectIndex;
- if (index >= this->allocCount)
- {
- // object not allocated
- return false;
- }
- if (this->HeaderList()[index] != header)
- {
- // header doesn't match, not a real object
- return false;
- }
- bool isMarked = this->heapInfo->recycler->heapBlockMap.IsMarked(objectAddress);
- #if DBG
- Assert(isMarked);
- #else
- if (!isMarked)
- {
- DebugBreak();
- }
- #endif
- return isMarked;
- }
- #endif
- void
- LargeHeapBlock::ScanInitialImplicitRoots(Recycler * recycler)
- {
- Assert(recycler->enableScanImplicitRoots);
- const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
- for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
- {
- // object is allocated during the concurrent mark or it is marked, do rescan
- LargeObjectHeader * header = this->GetHeader(objectIndex);
- // check if the object index is not allocated
- if (header == nullptr)
- {
- continue;
- }
- // check whether the object is a leaf and doesn't need to be scanned
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
- {
- continue;
- }
- char * objectAddress = (char *)header->GetAddress();
- // it is not marked, don't scan implicit root
- if (!heapBlockMap.IsMarked(objectAddress))
- {
- continue;
- }
- // TODO: Assume scan interior?
- DUMP_IMPLICIT_ROOT(recycler, objectAddress);
- if (this->InPageHeapMode())
- {
- size_t objectSize = header->objectSize;
- // trim off the trailing part which is not a pointer
- objectSize = HeapInfo::RoundObjectSize(objectSize);
- if (objectSize > 0) // otherwize the object total size is less than a pointer size
- {
- recycler->ScanObjectInlineInterior((void **)objectAddress, objectSize);
- }
- }
- else
- {
- recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
- }
- }
- }
- void
- LargeHeapBlock::ScanNewImplicitRoots(Recycler * recycler)
- {
- Assert(recycler->enableScanImplicitRoots);
- uint objectIndex = 0;
- HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
- while (objectIndex < allocCount)
- {
- // object is allocated during the concurrent mark or it is marked, do rescan
- LargeObjectHeader * header = this->GetHeader(objectIndex);
- objectIndex++;
- // check if the object index is not allocated
- if (header == nullptr)
- {
- continue;
- }
- // check whether the object is an implicit root
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & ImplicitRootBit) == 0)
- {
- continue;
- }
- char * objectAddress = (char *)header->GetAddress();
- bool marked = heapBlockMap.TestAndSetMark(objectAddress);
- if (!marked)
- {
- DUMP_IMPLICIT_ROOT(recycler, objectAddress);
- // check whether the object is a leaf and doesn't need to be scanned
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & LeafBit) != 0)
- {
- continue;
- }
- if (this->InPageHeapMode())
- {
- size_t objectSize = header->objectSize;
- // trim off the trailing part which is not a pointer
- objectSize = HeapInfo::RoundObjectSize(objectSize);
- if (objectSize > 0) // otherwize the object total size is less than a pointer size
- {
- recycler->ScanObjectInlineInterior((void **)objectAddress, objectSize);
- }
- }
- else
- {
- // TODO: Assume scan interior
- recycler->ScanObjectInlineInterior((void **)objectAddress, header->objectSize);
- }
- }
- }
- }
- #if ENABLE_CONCURRENT_GC
- bool LargeHeapBlock::IsPageDirty(char* page, RescanFlags flags, bool isWriteBarrier)
- {
- #ifdef RECYCLER_WRITE_BARRIER
- // TODO: SWB, use special page allocator for large block with write barrier?
- if (CONFIG_FLAG(WriteBarrierTest))
- {
- Assert(isWriteBarrier);
- }
- if (isWriteBarrier)
- {
- return (RecyclerWriteBarrierManager::GetWriteBarrier(page) & DIRTYBIT) == DIRTYBIT;
- }
- #endif
- #ifdef RECYCLER_WRITE_WATCH
- if (!CONFIG_FLAG(ForceSoftwareWriteBarrier))
- {
- ULONG_PTR count = 1;
- DWORD pageSize = AutoSystemInfo::PageSize;
- DWORD const writeWatchFlags = (flags & RescanFlags_ResetWriteWatch ? WRITE_WATCH_FLAG_RESET : 0);
- void * written = nullptr;
- UINT ret = GetWriteWatch(writeWatchFlags, page, AutoSystemInfo::PageSize, &written, &count, &pageSize);
- bool isDirty = (ret != 0) || (count == 1);
- return isDirty;
- }
- else
- {
- Js::Throw::FatalInternalError();
- }
- #else
- Js::Throw::FatalInternalError();
- #endif
- }
- #endif
- #if ENABLE_CONCURRENT_GC
- bool
- LargeHeapBlock::RescanOnePage(Recycler * recycler, RescanFlags flags)
- #else
- bool
- LargeHeapBlock::RescanOnePage(Recycler * recycler)
- #endif
- {
- Assert(this->GetPageCount() == 1);
- bool const oldNeedOOMRescan = this->needOOMRescan;
- // Reset this, we'll increment this if we OOM again
- this->needOOMRescan = false;
- #if ENABLE_CONCURRENT_GC
- // don't need to get the write watch bit if we already need to oom rescan
- if (!oldNeedOOMRescan)
- {
- if (recycler->inEndMarkOnLowMemory)
- {
- // we only do oom rescan if we are on low memory mark
- return false;
- }
- // Check the write watch bit to see if we need to rescan
- // REVIEW: large object size if bigger than one page, to use header index 0 here should be OK
- bool hasWriteBarrier = false;
- #ifdef RECYCLER_WRITE_BARRIER
- hasWriteBarrier = this->GetHeader(0u)->hasWriteBarrier;
- #endif
- if (!IsPageDirty(this->GetBeginAddress(), flags, hasWriteBarrier))
- {
- return false;
- }
- }
- #else
- // Shouldn't be rescanning in cases other than OOM if GetWriteWatch
- Assert(oldNeedOOMRescan);
- #endif
- RECYCLER_STATS_INC(recycler, markData.rescanLargePageCount);
- for (uint objectIndex = 0; objectIndex < allocCount; objectIndex++)
- {
- // object is allocated during the concurrent mark or it is marked, do rescan
- LargeObjectHeader * header = this->GetHeader(objectIndex);
- // check if the object index is not allocated
- if (header == nullptr)
- {
- continue;
- }
- char * objectAddress = (char *)header->GetAddress();
- // it is not marked, don't rescan
- if (!recycler->heapBlockMap.IsMarked(objectAddress))
- {
- continue;
- }
- unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
- #ifdef RECYCLER_STATS
- if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
- {
- // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
- // As such, our finalizeCount is not correct. Update it now.
- RECYCLER_STATS_INC(recycler, finalizeCount);
- header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
- }
- #endif
- // check whether the object is a leaf and doesn't need to be scanned
- if ((attributes & LeafBit) != 0)
- {
- continue;
- }
- RECYCLER_STATS_INC(recycler, markData.rescanLargeObjectCount);
- RECYCLER_STATS_ADD(recycler, markData.rescanLargeByteCount, header->objectSize);
- size_t objectSize = header->objectSize;
- if (this->InPageHeapMode())
- {
- // trim off the trailing part which is not a pointer
- objectSize = HeapInfo::RoundObjectSize(objectSize);
- }
- if (objectSize > 0) // otherwize the object total size is less than a pointer size
- {
- if (!recycler->AddMark(objectAddress, objectSize))
- {
- this->SetNeedOOMRescan(recycler);
- }
- }
- }
- return true;
- }
- size_t
- LargeHeapBlock::Rescan(Recycler * recycler, bool isPartialSwept, RescanFlags flags)
- {
- // Update the lastCollectAllocCount for sweep
- this->lastCollectAllocCount = this->allocCount;
- #if ENABLE_CONCURRENT_GC
- Assert(recycler->collectionState != CollectionStateConcurrentFinishMark || (flags & RescanFlags_ResetWriteWatch));
- if (this->GetPageCount() == 1)
- {
- return RescanOnePage(recycler, flags);
- }
- // Need to rescan for finish mark even if it is done on the background thread
- if (recycler->collectionState != CollectionStateConcurrentFinishMark && recycler->IsConcurrentMarkState())
- {
- // CONCURRENT-TODO: Don't do background rescan for pages with multiple pages because
- // we don't track which page we have queued up
- return 0;
- }
- return RescanMultiPage(recycler, flags);
- #else
- return this->GetPageCount() == 1 ? RescanOnePage(recycler) : RescanMultiPage(recycler);
- #endif
- }
- #if ENABLE_CONCURRENT_GC
- size_t
- LargeHeapBlock::RescanMultiPage(Recycler * recycler, RescanFlags flags)
- #else
- size_t
- LargeHeapBlock::RescanMultiPage(Recycler * recycler)
- #endif
- {
- Assert(this->GetPageCount() != 1);
- DebugOnly(bool oldNeedOOMRescan = this->needOOMRescan);
- // Reset this, we'll increment this if we OOM again
- this->needOOMRescan = false;
- size_t rescanCount = 0;
- uint objectIndex = 0;
- #if ENABLE_CONCURRENT_GC
- char * lastPageCheckedForWriteWatch = nullptr;
- bool isLastPageCheckedForWriteWatchDirty = false;
- #endif
- const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
- while (objectIndex < allocCount)
- {
- // object is allocated during the concurrent mark or it is marked, do rescan
- LargeObjectHeader * header = this->GetHeader(objectIndex);
- objectIndex++;
- // check if the object index is not allocated
- if (header == nullptr)
- {
- continue;
- }
- char * objectAddress = (char *)header->GetAddress();
- // it is not marked, don't rescan
- if (!heapBlockMap.IsMarked(objectAddress))
- {
- continue;
- }
- unsigned char attributes = header->GetAttributes(this->heapInfo->recycler->Cookie);
- #ifdef RECYCLER_STATS
- if (((attributes & FinalizeBit) != 0) && ((attributes & NewFinalizeBit) != 0))
- {
- // The concurrent thread saw a false reference to this object and marked it before the attribute was set.
- // As such, our finalizeCount is not correct. Update it now.
- RECYCLER_STATS_INC(recycler, finalizeCount);
- header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & ~NewFinalizeBit));
- }
- #endif
- // check whether the object is a leaf and doesn't need to be scanned
- if ((attributes & LeafBit) != 0)
- {
- continue;
- }
- #ifdef RECYCLER_STATS
- bool objectScanned = false;
- #endif
- size_t objectSize = header->objectSize;
- if (this->InPageHeapMode())
- {
- // trim off the trailing part which is not a pointer
- objectSize = HeapInfo::RoundObjectSize(objectSize);
- }
- Assert(objectSize > 0);
- Assert(oldNeedOOMRescan || !header->markOnOOMRescan);
- // Avoid writing to the page unnecessary by checking first
- if (header->markOnOOMRescan)
- {
- if (!recycler->AddMark(objectAddress, objectSize))
- {
- this->SetNeedOOMRescan(recycler);
- header->markOnOOMRescan = true;
- // We need to bail out of rescan early only if the recycler is
- // trying to finish marking because of low memory. If this is
- // a regular rescan, we want to try and rescan all the objects
- // on the page. It's possible that the rescan OOMs but if the
- // object rescan does OOM, we'll set the right bit on the
- // object header. When we later rescan it in a low memory
- // situation, when the bit is set, we don't need to check for
- // write-watch etc. since we'd have already done that before
- // setting the bit in the non-low-memory rescan case.
- if (!recycler->inEndMarkOnLowMemory)
- {
- continue;
- }
- return rescanCount;
- }
- header->markOnOOMRescan = false;
- #ifdef RECYCLER_STATS
- objectScanned = true;
- #endif
- }
- #if ENABLE_CONCURRENT_GC
- else if (!recycler->inEndMarkOnLowMemory)
- {
- char * objectAddressEnd = objectAddress + objectSize;
- // Walk through the object, checking if any of its pages have been written to
- // If it has, then queue up this object for marking
- do
- {
- char * pageStart = (char *)(((size_t)objectAddress) & ~(size_t)(AutoSystemInfo::PageSize - 1));
- /*
- * The rescan logic for large object is as follows:
- * - We rescan the object if it was marked during concurrent mark
- * - If it was marked, since the large object has multiple pages, we'll rescan only the parts that were changed
- * - So for each page in the large object, check if it's been written to, and if it hasn't, skip looking at that region
- * - If we can't get the write watch, rescan that region
- * - However, this logic applies only if we're not rescanning because of an OOM
- * - If we are rescanning this object because of OOM (i.e !rescanBecauseOfOOM = false), rescan the whole object
- *
- * We cache the result of the write watch and the page that it was checked on so that we don't call GetWriteWatch on the same
- * page twice and inadvertently reset the write watch on a page where we've already scanned an object
- */
- if (lastPageCheckedForWriteWatch != pageStart)
- {
- lastPageCheckedForWriteWatch = pageStart;
- isLastPageCheckedForWriteWatchDirty = true;
- bool hasWriteBarrier = false;
- #ifdef RECYCLER_WRITE_BARRIER
- hasWriteBarrier = header->hasWriteBarrier;
- #endif
- if (!IsPageDirty(pageStart, flags, hasWriteBarrier))
- {
- // Fall through to the case below where we'll update objectAddress and continue
- isLastPageCheckedForWriteWatchDirty = false;
- }
- }
- if (!isLastPageCheckedForWriteWatchDirty)
- {
- objectAddress = pageStart + AutoSystemInfo::PageSize;
- continue;
- }
- // We're interested in only rescanning the parts of the object that have changed, not the whole
- // object. So just queue that up for marking
- char * checkEnd = min(pageStart + AutoSystemInfo::PageSize, objectAddressEnd);
- if (!recycler->AddMark(objectAddress, (checkEnd - objectAddress)))
- {
- this->SetNeedOOMRescan(recycler);
- header->markOnOOMRescan = true;
- }
- #ifdef RECYCLER_STATS
- objectScanned = true;
- recycler->collectionStats.markData.rescanLargePageCount++;
- recycler->collectionStats.markData.rescanLargeByteCount += (checkEnd - objectAddress);
- #endif
- objectAddress = checkEnd;
- rescanCount++;
- }
- while (objectAddress < objectAddressEnd);
- }
- #else
- else
- {
- Assert(recycler->inEndMarkOnLowMemory);
- }
- #endif
- RECYCLER_STATS_ADD(recycler, markData.rescanLargeObjectCount, objectScanned);
- }
- return rescanCount;
- }
- /*
- * Sweep the large heap block
- *
- * If there are no finalizable or weak referenced objects, and if nothing is marked
- * that means that everything in this heap block is considered free. So the heap block
- * can be released.
- * In that case, return SweepStateEmpty
- * If there are objects to be freed, first see if they are any finalizable objects. If there
- * aren't any in this heap block, then this heap block can be swept concurrently. So return SweepStatePendingSweep
- * If there are finalizable objects, sweep them in thread. They would have been added to the pendingDispose list
- * during the finalize phase, so we return SweepStatePendingDispose.
- * In any case, if the pendingDispose list is not empty, we return SweepStatePendingDispose.
- * If the allocCount equals the max object count, or if there's no more space to allocate a large object,
- * we return SweepStateFull, so that the HeapInfo can move this to the full block list. Otherwise,
- * we return SweepStateSwept.
- */
- SweepState
- LargeHeapBlock::Sweep(RecyclerSweep& recyclerSweep, bool queuePendingSweep)
- {
- Recycler * recycler = recyclerSweep.GetRecycler();
- uint markCount = GetMarkCount();
- #if DBG
- Assert(this->lastCollectAllocCount == this->allocCount);
- Assert(markCount <= allocCount);
- #endif
- RECYCLER_STATS_INC(recycler, heapBlockCount[HeapBlock::LargeBlockType]);
- #if DBG
- this->expectedSweepCount = allocCount - markCount;
- #endif
- #if ENABLE_CONCURRENT_GC
- Assert(!this->isPendingConcurrentSweep);
- #endif
- bool isAllFreed = (finalizeCount == 0 && markCount == 0);
- if (isAllFreed)
- {
- recycler->NotifyFree(this);
- Assert(this->pendingDisposeObject == nullptr);
- return SweepStateEmpty;
- }
- RECYCLER_STATS_ADD(recycler, largeHeapBlockTotalByteCount, this->pageCount * AutoSystemInfo::PageSize);
- RECYCLER_STATS_ADD(recycler, heapBlockFreeByteCount[HeapBlock::LargeBlockType],
- addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize? 0 : (size_t)(addressEnd - allocAddressEnd));
- // If the number of objects marked is not equal to the number of objects
- // that have been allocated by this large heap block, that means that there
- // could be some objects that need to be swept
- if (markCount != allocCount)
- {
- Assert(this->expectedSweepCount != 0);
- // We need to sweep in thread if there are any finalizable objects so
- // that the PrepareFinalize() can be called before concurrent sweep
- // and other finalizers. This gives the object an opportunity before any
- // other script can be ran to clean up its references/states that are not
- // valid since we've determined that the object is not live any more.
- //
- // An example is the ITrackable's tracking alias. The reference to the alias
- // object needs to be clear so that the reference will not be given out again
- // in other script during concurrent sweep or finalizer called before.
- Assert(!recyclerSweep.IsBackground());
- #if ENABLE_CONCURRENT_GC
- if (queuePendingSweep && finalizeCount == 0)
- {
- this->isPendingConcurrentSweep = true;
- return SweepStatePendingSweep;
- }
- #else
- Assert(!queuePendingSweep);
- #endif
- SweepObjects<SweepMode_InThread>(recycler);
- if (TransferSweptObjects())
- {
- return SweepStatePendingDispose;
- }
- }
- #ifdef RECYCLER_STATS
- else
- {
- Assert(expectedSweepCount == 0);
- isForceSweeping = true;
- SweepObjects<SweepMode_InThread>(recycler);
- isForceSweeping = false;
- }
- #endif
- if (this->pendingDisposeObject != nullptr)
- {
- return SweepStatePendingDispose;
- }
- return (allocCount == objectCount || addressEnd - allocAddressEnd <= HeapConstants::MaxSmallObjectSize) && this->freeList.entries == nullptr ?
- SweepStateFull : SweepStateSwept;
- }
- bool
- LargeHeapBlock::TrimObject(Recycler* recycler, LargeObjectHeader* header, size_t sizeOfObject, bool inDispose)
- {
- IdleDecommitPageAllocator* pageAllocator = recycler->GetRecyclerLargeBlockPageAllocator();
- uint pageSize = AutoSystemInfo::PageSize ;
- // If we have to trim an object, either we need to have more than one object in the
- // heap block or we're being called as a part of force-sweep or dispose
- Assert(this->allocCount > 1 || this->isForceSweeping || inDispose);
- // If we have more than 1 page of bytes to free
- // make sure that the number of bytes doesn't exceed the cap for a PageSegment
- // since this optimization can only be applied to heap blocks using page segments.
- // We also skip this optimization if the allocCount is 1 since that means
- // the heap block is empty and we've been called only because we're force sweeping.
- // So, skip the opt since we're going to be marking the heap block as empty soon
- if (sizeOfObject > pageSize &&
- this->segment->GetPageCount() <= pageAllocator->GetMaxAllocPageCount() &&
- this->allocCount > 1)
- {
- Assert(!this->hadTrimmed);
- // We want to decommit the free pages beyond 4K (the page size)
- // The way large allocations work is that at most we can have 4 objects in a large heap block
- // The first object can span multiple pages, the remaining 3 objects must all fit within a page
- // So if the object being freed is greater than 1 page, then it must be the first object
- // The objectIndex must be 0 and the header must be same as this->address
- // The end address is (baseAddress + objectSize) & ~(4k - 1)
- // The number of pages to free is (freePageEnd - freePageStart) / pageSize
- char* objectAddress = (char*) header;
- char* objectEndAddress = objectAddress + sizeof(LargeObjectHeader) + header->objectSize;
- uintptr_t alignmentMask = ~((uintptr_t) (AutoSystemInfo::PageSize - 1));
- uintptr_t objectFreeAddress = (uintptr_t) objectAddress;
- uintptr_t objectFreeEndAddress = ((uintptr_t) objectEndAddress) & alignmentMask;
- size_t bytesToFree = (objectFreeEndAddress - objectFreeAddress);
- // Verify assumptions
- // Make sure that the object being freed is the first object since
- // the expectation in a large heap block is that the first object is the largest
- // object.
- // The amount of bytes to free is always less than the size of the object being freed including its header
- // The exception is if the original object's size + header size is a multiple of the page size
- Assert(objectAddress == this->address);
- Assert(header->objectIndex == 0);
- Assert(objectFreeEndAddress <= (uintptr_t) objectEndAddress);
- Assert(objectFreeAddress <= objectFreeEndAddress);
- Assert(bytesToFree < sizeOfObject + sizeof(LargeObjectHeader) || (uintptr_t) objectEndAddress == objectFreeEndAddress);
- // If we actually have something to free, release those pages
- // Move the heap block to start from the new start address
- // Change the heap block map to contain an entry for only the pages that haven't been freed
- // Fill up the old object's unreleased memory if we have to
- Assert(bytesToFree > 0);
- Assert((bytesToFree & (AutoSystemInfo::PageSize - 1)) == 0);
- size_t freePageCount = bytesToFree / AutoSystemInfo::PageSize;
- Assert(freePageCount > 0);
- Assert(freePageCount < this->pageCount);
- // If this call to trim needs idle decommit to be suspended (e.g. dispose case)
- // check if IdleDecommit has been suspended already. If it hasn't, suspend it
- // This is to prevent reentrant idle decommits (e.g. sometimes dispose is called with
- if (inDispose)
- {
- pageAllocator->SuspendIdleDecommit();
- }
- pageAllocator->Release((char*) objectFreeAddress, freePageCount, this->GetSegment());
- if (inDispose)
- {
- pageAllocator->ResumeIdleDecommit();
- }
- // Remove the freed pages from the heap block map
- // and move the heap block to start from after the pages that were freed
- // and update the page count
- recycler->heapBlockMap.ClearHeapBlock(this->address, freePageCount);
- this->address = (char*) objectFreeEndAddress;
- this->pageCount -= freePageCount;
- FillFreeMemory(recycler, (void*) objectFreeEndAddress, (size_t) (objectEndAddress - objectFreeEndAddress));
- #if DBG
- this->hadTrimmed = true;
- #endif
- return true;
- }
- return false;
- }
- template <>
- void
- LargeHeapBlock::SweepObject<SweepMode_InThread>(Recycler * recycler, LargeObjectHeader * header)
- {
- Assert(this->HeaderList()[header->objectIndex] == header);
- // Set the header and object to null only if this is not a finalizable object
- // If it's finalizable, it'll be zeroed out during dispose
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != FinalizeBit)
- {
- this->HeaderList()[header->objectIndex] = nullptr;
- size_t sizeOfObject = header->objectSize;
- bool objectTrimmed = false;
- if (this->bucket == nullptr)
- {
- objectTrimmed = TrimObject(recycler, header, sizeOfObject);
- }
- if (!objectTrimmed)
- {
- FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + sizeOfObject);
- }
- }
- }
- //
- // Call the finalizer on the heapblock object and add it to the pending dispose list
- //
- void
- LargeHeapBlock::FinalizeObject(Recycler* recycler, LargeObjectHeader* header)
- {
- // The header count can also be null if this object has already been finalized
- // but this method should never be called if the header list header is null
- Assert(this->HeaderList()[header->objectIndex] == header);
- Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
- // Call finalize to do clean up that needs to be done immediately
- // (e.g. Clear the ITrackable alias reference, so it can't be revived during
- // other finalizers or concurrent sweep)
- // Call it only if it hasn't already been finalized
- ((FinalizableObject *)header->GetAddress())->Finalize(false);
- header->SetNext(this->heapInfo->recycler->Cookie, this->pendingDisposeObject);
- this->pendingDisposeObject = header;
- // Null out the header in the header list- this means that this object has already
- // been finalized and is just pending dispose
- this->HeaderList()[header->objectIndex] = nullptr;
- #ifdef RECYCLER_FINALIZE_CHECK
- recycler->autoHeap.pendingDisposableObjectCount++;
- #endif
- }
- // Explicitly instantiate all the sweep modes
- template void LargeHeapBlock::SweepObjects<SweepMode_InThread>(Recycler * recycler);
- #if ENABLE_CONCURRENT_GC
- template <>
- void
- LargeHeapBlock::SweepObject<SweepMode_Concurrent>(Recycler * recycler, LargeObjectHeader * header)
- {
- Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
- Assert(this->HeaderList()[header->objectIndex] == header);
- this->HeaderList()[header->objectIndex] = nullptr;
- FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
- }
- // Explicitly instantiate all the sweep modes
- template void LargeHeapBlock::SweepObjects<SweepMode_Concurrent>(Recycler * recycler);
- #if ENABLE_PARTIAL_GC
- template <>
- void
- LargeHeapBlock::SweepObject<SweepMode_ConcurrentPartial>(Recycler * recycler, LargeObjectHeader * header)
- {
- Assert(!(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit));
- Assert(this->HeaderList()[header->objectIndex] == header);
- this->HeaderList()[header->objectIndex] = (LargeObjectHeader *)((size_t)header | PartialFreeBit);
- DebugOnly(this->hasPartialFreeObjects = true);
- }
- // Explicitly instantiate all the sweep modes
- template void LargeHeapBlock::SweepObjects<SweepMode_ConcurrentPartial>(Recycler * recycler);
- #endif
- #endif
- //
- // Walk through the objects in this heap block and call finalize
- // on them if they're not marked and finalizable.
- //
- // At the end of this phase, if there were any finalizable objects,
- // they would be in the pendingDisposeObject list. When we later call
- // sweep on this heapblock, we'd simply null out the header and zero out the memory
- // and then Sweep would return PendingDispose as its state
- //
- void LargeHeapBlock::FinalizeObjects(Recycler* recycler)
- {
- const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
- for (uint i = 0; i < this->lastCollectAllocCount; i++)
- {
- LargeObjectHeader * header = this->GetHeader(i);
- if (header == nullptr)
- {
- continue;
- }
- Assert(header->objectIndex == i);
- // Skip finalization if the object is alive
- if (heapBlockMap.IsMarked(header->GetAddress()))
- {
- continue;
- }
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) == FinalizeBit)
- {
- recycler->NotifyFree((char *)header->GetAddress(), header->objectSize);
- FinalizeObject(recycler, header);
- }
- }
- }
- template <SweepMode mode>
- void
- LargeHeapBlock::SweepObjects(Recycler * recycler)
- {
- #if ENABLE_CONCURRENT_GC
- Assert(mode == SweepMode_InThread || this->isPendingConcurrentSweep);
- #else
- Assert(mode == SweepMode_InThread);
- #endif
- const HeapBlockMap& heapBlockMap = recycler->heapBlockMap;
- #if DBG
- uint markCount = GetMarkCount();
- // mark count included newly allocated objects
- #if ENABLE_CONCURRENT_GC
- Assert(expectedSweepCount == allocCount - markCount || recycler->collectionState == CollectionStateConcurrentSweep);
- #else
- Assert(expectedSweepCount == allocCount - markCount);
- #endif
- Assert(expectedSweepCount != 0 || isForceSweeping);
- uint sweepCount = 0;
- #endif
- for (uint i = 0; i < lastCollectAllocCount; i++)
- {
- RECYCLER_STATS_ADD(recycler, objectSweepScanCount, !isForceSweeping);
- LargeObjectHeader * header = this->GetHeader(i);
- if (header == nullptr)
- {
- #if DBG
- Assert(expectedSweepCount != 0);
- expectedSweepCount--;
- #endif
- #if DBG
- LargeAllocationVerboseTrace(recycler->GetRecyclerFlagsTable(), _u("Index %d empty\n"), i);
- #endif
- continue;
- }
- Assert(header->objectIndex == i);
- // Skip sweep if the object is alive
- if (heapBlockMap.IsMarked(header->GetAddress()))
- {
- #if DBG
- Assert((header->GetAttributes(recycler->Cookie) & NewFinalizeBit) == 0);
- #endif
- RECYCLER_STATS_ADD(recycler, largeHeapBlockUsedByteCount, this->GetHeader(i)->objectSize);
- continue;
- }
- size_t objectSize = header->objectSize;
- recycler->NotifyFree((char *)header->GetAddress(), objectSize);
- SweepObject<mode>(recycler, header);
- if (this->bucket != nullptr
- #ifdef RECYCLER_STATS
- && !isForceSweeping
- #endif
- )
- {
- LargeHeapBlockFreeListEntry* head = this->freeList.entries;
- LargeHeapBlockFreeListEntry* entry = (LargeHeapBlockFreeListEntry*) header;
- entry->headerIndex = i;
- entry->heapBlock = this;
- entry->next = head;
- entry->objectSize = objectSize;
- this->freeList.entries = entry;
- }
- #if DBG
- sweepCount++;
- #endif
- }
- Assert(sweepCount == expectedSweepCount);
- #if ENABLE_CONCURRENT_GC
- this->isPendingConcurrentSweep = false;
- #endif
- }
- bool
- LargeHeapBlock::TransferSweptObjects()
- {
- // TODO : Large heap block doesn't do free listing yet
- return pendingDisposeObject != nullptr;
- }
- void
- LargeHeapBlock::DisposeObjects(Recycler * recycler)
- {
- Assert(this->pendingDisposeObject != nullptr || this->hasDisposeBeenCalled);
- while (pendingDisposeObject != nullptr)
- {
- #if DBG
- this->hasDisposeBeenCalled = true;
- #endif
- LargeObjectHeader * header = pendingDisposeObject;
- pendingDisposeObject = header->GetNext(this->heapInfo->recycler->Cookie);
- Assert(header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit);
- Assert(this->HeaderList()[header->objectIndex] == nullptr);
- void * objectAddress = header->GetAddress();
- ((FinalizableObject *)objectAddress)->Dispose(false);
- Assert(finalizeCount != 0);
- finalizeCount--;
- bool objectTrimmed = false;
- if (this->bucket == nullptr)
- {
- objectTrimmed = TrimObject(recycler, header, header->objectSize, true /* need suspend */);
- }
- // GCTODO: Consider free listing items after Dispose too
- // GCTODO: Consider compacting heap blocks- if the last n items are free, move the address pointer
- // back to before the nth item so we can bump allocate from this heap block
- if (!objectTrimmed)
- {
- FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
- }
- RECYCLER_STATS_INC(recycler, finalizeSweepCount);
- #ifdef RECYCLER_FINALIZE_CHECK
- this->heapInfo->liveFinalizableObjectCount--;
- this->heapInfo->pendingDisposableObjectCount--;
- #endif
- }
- }
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- void
- LargeHeapBlock::PartialTransferSweptObjects()
- {
- // Nothing to do
- Assert(this->hasPartialFreeObjects);
- }
- void
- LargeHeapBlock::FinishPartialCollect(Recycler * recycler)
- {
- Assert(this->hasPartialFreeObjects);
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->HeaderList()[i];
- if (header != nullptr && IsPartialSweptHeader(header))
- {
- header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
- Assert(header->objectIndex == i);
- this->HeaderList()[i] = nullptr;
- FillFreeMemory(recycler, header, sizeof(LargeObjectHeader) + header->objectSize);
- }
- }
- DebugOnly(this->hasPartialFreeObjects = false);
- }
- #endif
- void
- LargeHeapBlock::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size))
- {
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->GetHeader(i);
- if (header == nullptr)
- {
- continue;
- }
- if ((header->GetAttributes(this->heapInfo->recycler->Cookie) & infoBits) != 0)
- {
- CallBackFunction(header->GetAddress(), header->objectSize);
- }
- }
- }
- uint
- LargeHeapBlock::GetMaxLargeObjectCount(size_t pageCount, size_t firstAllocationSize)
- {
- size_t freeSize = (AutoSystemInfo::PageSize * pageCount) - firstAllocationSize - sizeof(LargeObjectHeader);
- Assert(freeSize < AutoSystemInfo::Data.dwAllocationGranularity);
- size_t objectCount = (freeSize / HeapConstants::MaxSmallObjectSize) + 1;
- Assert(objectCount <= UINT_MAX);
- return (uint)objectCount;
- }
- #ifdef RECYCLER_SLOW_CHECK_ENABLED
- void
- LargeHeapBlock::Check(bool expectFull, bool expectPending)
- {
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->HeaderList()[i];
- if (header == nullptr)
- {
- continue;
- }
- #if ENABLE_PARTIAL_GC && ENABLE_CONCURRENT_GC
- header = (LargeObjectHeader *)((size_t)header & ~PartialFreeBit);
- Assert(this->hasPartialFreeObjects || header == this->HeaderList()[i]);
- #endif
- Assert(header->objectIndex == i);
- }
- }
- #endif
- void LargeHeapBlock::FillFreeMemory(Recycler * recycler, __in_bcount(size) void * address, size_t size)
- {
- // For now, we don't do anything in release build because we don't reuse this memory until we return
- // the pages to the allocator which will zero out the whole page
- #ifdef RECYCLER_MEMORY_VERIFY
- if (recycler->VerifyEnabled())
- {
- memset(address, Recycler::VerifyMemFill, size);
- return;
- }
- #endif
- #ifdef RECYCLER_FREE_MEM_FILL
- memset(address, DbgMemFill, size);
- #endif
- }
- size_t LargeHeapBlock::GetObjectSize(void* objectAddress)
- {
- LargeObjectHeader * header = GetHeader(objectAddress);
- Assert((char *)header >= this->address);
- return header->objectSize;
- }
- #ifdef RECYCLER_MEMORY_VERIFY
- void
- LargeHeapBlock::Verify(Recycler * recycler)
- {
- char * lastAddress = this->address;
- uint verifyFinalizeCount = 0;
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->HeaderList()[i];
- if (header == nullptr)
- {
- // Check if the object if on the free list
- LargeHeapBlockFreeListEntry* current = this->freeList.entries;
- while (current != nullptr)
- {
- // Verify the free listed object
- if (current->headerIndex == i)
- {
- BYTE* objectAddress = (BYTE *)current + sizeof(LargeObjectHeader);
- Recycler::VerifyCheck(current->heapBlock == this, _u("Invalid heap block"), this, current->heapBlock);
- Recycler::VerifyCheck((char *)current >= lastAddress, _u("LargeHeapBlock invalid object header order"), this->address, current);
- Recycler::VerifyCheckFill(lastAddress, (char *)current - lastAddress);
- recycler->VerifyCheckPad(objectAddress, current->objectSize);
- lastAddress = (char *) objectAddress + current->objectSize;
- break;
- }
- current = current->next;
- }
- continue;
- }
- Recycler::VerifyCheck((char *)header >= lastAddress, _u("LargeHeapBlock invalid object header order"), this->address, header);
- Recycler::VerifyCheckFill(lastAddress, (char *)header - lastAddress);
- Recycler::VerifyCheck(header->objectIndex == i, _u("LargeHeapBlock object index mismatch"), this->address, &header->objectIndex);
- recycler->VerifyCheckPad((BYTE *)header->GetAddress(), header->objectSize);
- verifyFinalizeCount += ((header->GetAttributes(this->heapInfo->recycler->Cookie) & FinalizeBit) != 0);
- lastAddress = (char *)header->GetAddress() + header->objectSize;
- }
- Recycler::VerifyCheck(verifyFinalizeCount == this->finalizeCount, _u("LargeHeapBlock finalize object count mismatch"), this->address, &this->finalizeCount);
- }
- #endif
- uint
- LargeHeapBlock::GetMarkCount()
- {
- uint markCount = 0;
- const HeapBlockMap& heapBlockMap = this->heapInfo->recycler->heapBlockMap;
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader* header = this->HeaderList()[i];
- if (header && header->objectIndex == i && heapBlockMap.IsMarked(header->GetAddress()))
- {
- markCount++;
- }
- }
- return markCount;
- }
- #ifdef RECYCLER_PERF_COUNTERS
- void
- LargeHeapBlock::UpdatePerfCountersOnFree()
- {
- Assert(GetMarkCount() == 0);
- size_t usedCount = 0;
- size_t usedBytes = 0;
- for (uint i = 0; i < allocCount; i++)
- {
- LargeObjectHeader * header = this->HeaderList()[i];
- if (header == nullptr)
- {
- continue;
- }
- usedCount++;
- usedBytes += header->objectSize;
- }
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObject, usedCount);
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockLiveObjectSize, usedBytes);
- RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockFreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
- RECYCLER_PERF_COUNTER_SUB(LiveObject, usedCount);
- RECYCLER_PERF_COUNTER_SUB(LiveObjectSize, usedBytes);
- RECYCLER_PERF_COUNTER_SUB(FreeObjectSize, this->GetPageCount() * AutoSystemInfo::PageSize - usedBytes);
- }
- #endif
- #ifdef PROFILE_RECYCLER_ALLOC
- void *
- LargeHeapBlock::GetTrackerData(void * address)
- {
- Assert(Recycler::DoProfileAllocTracker());
- LargeObjectHeader * header = GetHeader(address);
- Assert((char *)header >= this->address);
- uint index = header->objectIndex;
- Assert(index < this->allocCount);
- Assert(this->HeaderList()[index] == header);
- return this->GetTrackerDataArray()[index];
- }
- void
- LargeHeapBlock::SetTrackerData(void * address, void * data)
- {
- Assert(Recycler::DoProfileAllocTracker());
- LargeObjectHeader * header = GetHeader(address);
- Assert((char *)header >= this->address);
- uint index = header->objectIndex;
- Assert(index < this->allocCount);
- Assert(this->HeaderList()[index] == header);
- this->GetTrackerDataArray()[index] = data;
- }
- void **
- LargeHeapBlock::GetTrackerDataArray()
- {
- // See LargeHeapBlock::GetAllocPlusSize for layout description
- return (void **)((char *)(this + 1) + LargeHeapBlock::GetAllocPlusSize(this->objectCount) - this->objectCount * sizeof(void *));
- }
- #endif
- #ifdef RECYCLER_PAGE_HEAP
- void
- LargeHeapBlock::CapturePageHeapAllocStack()
- {
- #ifdef STACK_BACK_TRACE
- if (this->InPageHeapMode()) // pageheap can be enabled only for some of the buckets
- {
- // These asserts are true because explicit free is disallowed in
- // page heap mode. If they weren't, we'd have to modify the asserts
- Assert(this->pageHeapFreeStack == nullptr);
- Assert(this->pageHeapAllocStack == nullptr);
- // Note: NoCheckHeapAllocator will fail fast if we can't allocate the stack to capture
- // REVIEW: Should we have a flag to configure the number of frames captured?
- if (pageHeapAllocStack != nullptr && this->pageHeapAllocStack != s_StackTraceAllocFailed)
- {
- this->pageHeapAllocStack->Capture(Recycler::s_numFramesToSkipForPageHeapAlloc);
- }
- else
- {
- this->pageHeapAllocStack = StackBackTrace::Capture(&NoThrowHeapAllocator::Instance,
- Recycler::s_numFramesToSkipForPageHeapAlloc, Recycler::s_numFramesToCaptureForPageHeap);
- }
- if (this->pageHeapAllocStack == nullptr)
- {
- this->pageHeapAllocStack = const_cast<StackBackTrace*>(s_StackTraceAllocFailed); // allocate failed, mark it we have tried
- }
- }
- #endif
- }
- void
- LargeHeapBlock::CapturePageHeapFreeStack()
- {
- #ifdef STACK_BACK_TRACE
- if (this->InPageHeapMode()) // pageheap can be enabled only for some of the buckets
- {
- // These asserts are true because explicit free is disallowed in
- // page heap mode. If they weren't, we'd have to modify the asserts
- Assert(this->pageHeapFreeStack == nullptr);
- Assert(this->pageHeapAllocStack != nullptr);
- if (this->pageHeapFreeStack != nullptr)
- {
- this->pageHeapFreeStack->Capture(Recycler::s_numFramesToSkipForPageHeapFree);
- }
- else
- {
- this->pageHeapFreeStack = StackBackTrace::Capture(&NoThrowHeapAllocator::Instance,
- Recycler::s_numFramesToSkipForPageHeapFree, Recycler::s_numFramesToCaptureForPageHeap);
- }
- }
- #endif
- }
- #endif
- #if DBG
- void LargeHeapBlock::WBSetBit(char* addr)
- {
- uint index = (uint)(addr - this->address) / sizeof(void*);
- try
- {
- AUTO_NESTED_HANDLED_EXCEPTION_TYPE(static_cast<ExceptionType>(ExceptionType_DisableCheck));
- wbVerifyBits.Set(index);
- }
- catch (Js::OutOfMemoryException&)
- {
- }
- }
- void LargeHeapBlock::WBSetBits(char* addr, uint length)
- {
- uint index = (uint)(addr - this->address) / sizeof(void*);
- try
- {
- AUTO_NESTED_HANDLED_EXCEPTION_TYPE(static_cast<ExceptionType>(ExceptionType_DisableCheck));
- for (uint i = 0; i < length; i++)
- {
- wbVerifyBits.Set(index + i);
- }
- }
- catch (Js::OutOfMemoryException&)
- {
- }
- }
- void LargeHeapBlock::WBClearBits(char* addr)
- {
- uint index = (uint)(addr - this->address) / sizeof(void*);
- size_t objectSize = this->GetHeader(addr)->objectSize;
- for (uint i = 0; i < (uint)objectSize / sizeof(void*); i++)
- {
- wbVerifyBits.Clear(index + i);
- }
- }
- #endif
|