| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029 |
- //-------------------------------------------------------------------------------------------------------
- // Copyright (C) Microsoft. All rights reserved.
- // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
- //-------------------------------------------------------------------------------------------------------
- #include "CommonMemoryPch.h"
- #define UpdateMinimum(dst, src) if (dst > src) { dst = src; }
- #if ENABLE_OOP_NATIVE_CODEGEN
- THREAD_LOCAL HRESULT MemoryOperationLastError::MemOpLastError = 0;
- #endif
- //=============================================================================================================
- // Segment
- //=============================================================================================================
- SegmentBaseCommon::SegmentBaseCommon(PageAllocatorBaseCommon* allocator)
- : allocator(allocator)
- {
- }
- bool SegmentBaseCommon::IsInPreReservedHeapPageAllocator() const
- {
- #if ENABLE_NATIVE_CODEGEN
- return allocator->GetAllocatorType() == PageAllocatorBaseCommon::AllocatorType::PreReservedVirtualAlloc
- #if ENABLE_OOP_NATIVE_CODEGEN
- || allocator->GetAllocatorType() == PageAllocatorBaseCommon::AllocatorType::PreReservedSectionAlloc
- #endif
- #else
- return false
- #endif
- ;
- }
- template<typename T>
- SegmentBase<T>::SegmentBase(PageAllocatorBase<T> * allocator, size_t pageCount, bool enableWriteBarrier) :
- SegmentBaseCommon(allocator),
- address(nullptr),
- trailingGuardPageCount(0),
- leadingGuardPageCount(0),
- secondaryAllocPageCount(allocator->secondaryAllocPageCount),
- secondaryAllocator(nullptr)
- #if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER)
- , isWriteBarrierAllowed(false)
- , isWriteBarrierEnabled(enableWriteBarrier)
- #endif
- #if DBG
- , isPageSegment(false)
- #endif // DBG
- {
- this->segmentPageCount = pageCount + secondaryAllocPageCount;
- }
- template<typename T>
- SegmentBase<T>::~SegmentBase()
- {
- Assert(this->allocator != nullptr);
- // Cleanup secondaryAllocator before releasing pages so the destructor
- // still has access to segment memory.
- if(this->secondaryAllocator)
- {
- this->secondaryAllocator->Delete();
- this->secondaryAllocator = nullptr;
- }
- if (this->address)
- {
- char* originalAddress = this->address - (leadingGuardPageCount * AutoSystemInfo::PageSize);
- GetAllocator()->GetVirtualAllocator()->Free(originalAddress, GetPageCount() * AutoSystemInfo::PageSize, MEM_RELEASE);
- GetAllocator()->ReportFree(this->segmentPageCount * AutoSystemInfo::PageSize); //Note: We reported the guard pages free when we decommitted them during segment initialization
- #if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if (CONFIG_FLAG(StrictWriteBarrierCheck) && this->isWriteBarrierEnabled)
- {
- RecyclerWriteBarrierManager::ToggleBarrier(this->address, this->segmentPageCount * AutoSystemInfo::PageSize, false);
- }
- #endif
- RecyclerWriteBarrierManager::OnSegmentFree(this->address, this->segmentPageCount);
- #endif
- }
- }
- template<typename T>
- bool
- SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
- {
- Assert(this->address == nullptr);
- char* originalAddress = nullptr;
- bool addGuardPages = false;
- if (!excludeGuardPages)
- {
- addGuardPages = (this->segmentPageCount * AutoSystemInfo::PageSize) > VirtualAllocThreshold;
- #if TARGET_32
- unsigned int randomNumber2 = static_cast<unsigned int>(Math::Rand());
- addGuardPages = addGuardPages && (randomNumber2 % 4 == 1);
- #endif
- #if DEBUG
- addGuardPages = addGuardPages || Js::Configuration::Global.flags.ForceGuardPages;
- #endif
- if (addGuardPages)
- {
- unsigned int randomNumber = static_cast<unsigned int>(Math::Rand());
- this->leadingGuardPageCount = randomNumber % maxGuardPages + minGuardPages;
- this->trailingGuardPageCount = minGuardPages;
- }
- }
- // We can only allocate with this granularity using VirtualAlloc
- size_t totalPages = Math::Align<size_t>(this->segmentPageCount + leadingGuardPageCount + trailingGuardPageCount, AutoSystemInfo::Data.GetAllocationGranularityPageCount());
- this->segmentPageCount = totalPages - (leadingGuardPageCount + trailingGuardPageCount);
- #ifdef FAULT_INJECTION
- if (Js::FaultInjection::Global.ShouldInjectFault(Js::FaultInjection::Global.NoThrow))
- {
- this->address = nullptr;
- return false;
- }
- #endif
- if (!this->GetAllocator()->RequestAlloc(totalPages * AutoSystemInfo::PageSize))
- {
- return false;
- }
- this->address = (char *)GetAllocator()->GetVirtualAllocator()->AllocPages(NULL, totalPages, MEM_RESERVE | allocFlags, PAGE_READWRITE, this->IsInCustomHeapAllocator());
- if (this->address == nullptr)
- {
- this->GetAllocator()->ReportFailure(totalPages * AutoSystemInfo::PageSize);
- return false;
- }
- Assert( ((ULONG_PTR)this->address % (64 * 1024)) == 0 );
- originalAddress = this->address;
- bool committed = (allocFlags & MEM_COMMIT) != 0;
- if (addGuardPages)
- {
- #if DBG_DUMP
- GUARD_PAGE_TRACE(_u("Number of Leading Guard Pages: %d\n"), leadingGuardPageCount);
- GUARD_PAGE_TRACE(_u("Starting address of Leading Guard Pages: 0x%p\n"), address);
- GUARD_PAGE_TRACE(_u("Offset of Segment Start address: 0x%p\n"), this->address + (leadingGuardPageCount*AutoSystemInfo::PageSize));
- GUARD_PAGE_TRACE(_u("Starting address of Trailing Guard Pages: 0x%p\n"), address + ((leadingGuardPageCount + this->segmentPageCount)*AutoSystemInfo::PageSize));
- #endif
- if (committed)
- {
- GetAllocator()->GetVirtualAllocator()->Free(address,
- leadingGuardPageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- GetAllocator()->GetVirtualAllocator()->Free(address +
- ((leadingGuardPageCount + this->segmentPageCount) * AutoSystemInfo::PageSize),
- trailingGuardPageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- }
- this->GetAllocator()->ReportFree((leadingGuardPageCount + trailingGuardPageCount) * AutoSystemInfo::PageSize);
- this->address = this->address + (leadingGuardPageCount*AutoSystemInfo::PageSize);
- }
- if (!GetAllocator()->CreateSecondaryAllocator(this, committed, &this->secondaryAllocator))
- {
- GetAllocator()->GetVirtualAllocator()->Free(originalAddress,
- GetPageCount() * AutoSystemInfo::PageSize, MEM_RELEASE);
- this->GetAllocator()->ReportFailure(GetPageCount() * AutoSystemInfo::PageSize);
- this->address = nullptr;
- return false;
- }
- #ifdef RECYCLER_WRITE_BARRIER
- #if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
- bool registerBarrierResult = true;
- #if ENABLE_DEBUG_CONFIG_OPTIONS
- if (CONFIG_FLAG(StrictWriteBarrierCheck))
- {
- if (this->isWriteBarrierEnabled)
- {
- // only commit card table for write barrier pages for strict check
- // we can do this in free build if all write barrier annotated struct
- // only allocate with write barrier pages
- registerBarrierResult = RecyclerWriteBarrierManager::OnSegmentAlloc(this->address, this->segmentPageCount);
- }
- }
- else
- #endif
- {
- registerBarrierResult = RecyclerWriteBarrierManager::OnSegmentAlloc(this->address, this->segmentPageCount);
- }
- if (!registerBarrierResult)
- {
- GetAllocator()->GetVirtualAllocator()->Free(originalAddress,
- GetPageCount() * AutoSystemInfo::PageSize, MEM_RELEASE);
- this->GetAllocator()->ReportFailure(GetPageCount() * AutoSystemInfo::PageSize);
- this->address = nullptr;
- return false;
- }
- #endif
- this->isWriteBarrierAllowed = true;
- #if DBG
- if (this->isWriteBarrierEnabled)
- {
- RecyclerWriteBarrierManager::ToggleBarrier(this->address,
- this->segmentPageCount * AutoSystemInfo::PageSize, true);
- }
- #endif
- #endif
- return true;
- }
- //=============================================================================================================
- // PageSegment
- //=============================================================================================================
- template<typename T>
- PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, bool committed, bool allocated, bool enableWriteBarrier) :
- SegmentBase<T>(allocator, allocator->maxAllocPageCount, enableWriteBarrier), decommitPageCount(0)
- {
- #if DBG
- this->isPageSegment = true;
- #endif // DBG
- Assert(this->segmentPageCount == allocator->maxAllocPageCount + allocator->secondaryAllocPageCount);
- uint maxPageCount = GetMaxPageCount();
- if (committed)
- {
- Assert(!allocated);
- this->freePageCount = this->GetAvailablePageCount();
- this->SetRangeInFreePagesBitVector(0, this->freePageCount);
- if (this->freePageCount != maxPageCount)
- {
- this->ClearRangeInFreePagesBitVector(this->freePageCount, (maxPageCount - this->freePageCount));
- }
- Assert(this->GetCountOfFreePages() == this->freePageCount);
- }
- else
- {
- this->freePageCount = 0;
- this->ClearAllInFreePagesBitVector();
- if (!allocated)
- {
- this->decommitPageCount = this->GetAvailablePageCount();
- this->SetRangeInDecommitPagesBitVector(0, this->decommitPageCount);
- if (this->decommitPageCount != maxPageCount)
- {
- this->ClearRangeInDecommitPagesBitVector(this->decommitPageCount, (maxPageCount - this->decommitPageCount));
- }
- }
- }
- }
- template<typename T>
- PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, void* address, uint pageCount, uint committedCount, bool enableWriteBarrier) :
- SegmentBase<T>(allocator, allocator->maxAllocPageCount, enableWriteBarrier), decommitPageCount(0), freePageCount(0)
- {
- #if DBG
- this->isPageSegment = true;
- #endif // DBG
- this->address = (char*)address;
- this->segmentPageCount = pageCount;
- }
- #ifdef PAGEALLOCATOR_PROTECT_FREEPAGE
- template<typename T>
- bool
- PageSegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
- {
- Assert(freePageCount + this->GetAllocator()->secondaryAllocPageCount == this->segmentPageCount || freePageCount == 0);
- if (__super::Initialize(allocFlags, excludeGuardPages))
- {
- if (freePageCount != 0)
- {
- if (this->GetAllocator()->processHandle == GetCurrentProcess())
- {
- DWORD oldProtect;
- BOOL vpresult = VirtualProtect(this->address, this->GetAvailablePageCount() * AutoSystemInfo::PageSize, PAGE_NOACCESS, &oldProtect);
- if(vpresult == FALSE)
- {
- Assert(UNREACHED);
- return false;
- }
- Assert(oldProtect == PAGE_READWRITE);
- }
- }
- return true;
- }
- return false;
- }
- #endif
- template<typename T>
- void
- PageSegmentBase<T>::Prime()
- {
- #ifndef PAGEALLOCATOR_PROTECT_FREEPAGE
- for (uint i = 0; i < this->GetAvailablePageCount(); i++)
- {
- this->address[i * AutoSystemInfo::PageSize] = 0;
- }
- #endif
- }
- template<typename T>
- bool
- PageSegmentBase<T>::IsAllocationPageAligned(__in char* address, size_t pageCount, uint *nextIndex)
- {
- // Require that allocations are aligned at a boundary
- // corresponding to the page count
- // REVIEW: This might actually lead to additional address space fragmentation
- // because of the leading guard pages feature in the page allocator
- // We can restrict the guard pages to be an even number to improve the chances
- // of having the first allocation be aligned but that reduces the effectiveness
- // of having a random number of guard pages
- uintptr_t mask = (pageCount * AutoSystemInfo::PageSize) - 1;
- if ((reinterpret_cast<uintptr_t>(address)& mask) == 0)
- {
- return true;
- }
- if (nextIndex != nullptr)
- {
- *nextIndex = (uint) ((reinterpret_cast<uintptr_t>(address) % (mask + 1)) / AutoSystemInfo::PageSize);
- }
- return false;
- }
- template<typename T>
- template <bool notPageAligned>
- char *
- PageSegmentBase<T>::AllocPages(uint pageCount)
- {
- Assert(freePageCount != 0);
- Assert(freePageCount == (uint)this->GetCountOfFreePages());
- if (freePageCount < pageCount)
- {
- return nullptr;
- }
- Assert(!IsFull());
- uint index = this->GetNextBitInFreePagesBitVector(0);
- while (index != -1)
- {
- Assert(index < this->GetAllocator()->GetMaxAllocPageCount());
- if (GetAvailablePageCount() - index < pageCount)
- {
- break;
- }
- if (pageCount == 1 || this->TestRangeInFreePagesBitVector(index, pageCount))
- {
- char * allocAddress = this->address + index * AutoSystemInfo::PageSize;
- if (pageCount > 1 && !notPageAligned)
- {
- uint nextIndex = 0;
- if (!IsAllocationPageAligned(allocAddress, pageCount, &nextIndex))
- {
- if (index + nextIndex >= this->GetAllocator()->GetMaxAllocPageCount())
- {
- return nullptr;
- }
- index = this->freePages.GetNextBit(index + nextIndex);
- continue;
- }
- }
- this->ClearRangeInFreePagesBitVector(index, pageCount);
- freePageCount -= pageCount;
- Assert(freePageCount == (uint)this->GetCountOfFreePages());
- #ifdef PAGEALLOCATOR_PROTECT_FREEPAGE
- if (this->GetAllocator()->processHandle == GetCurrentProcess())
- {
- DWORD oldProtect;
- BOOL vpresult = VirtualProtect(allocAddress, pageCount * AutoSystemInfo::PageSize, PAGE_READWRITE, &oldProtect);
- if (vpresult == FALSE)
- {
- Assert(UNREACHED);
- return nullptr;
- }
- Assert(oldProtect == PAGE_NOACCESS);
- }
- #endif
- return allocAddress;
- }
- index = this->GetNextBitInFreePagesBitVector(index + 1);
- }
- return nullptr;
- }
- template<typename TVirtualAlloc>
- template<typename T, bool notPageAligned>
- char *
- PageSegmentBase<TVirtualAlloc>::AllocDecommitPages(uint pageCount, T freePages, T decommitPages)
- {
- Assert(freePageCount == (uint)this->GetCountOfFreePages());
- Assert(decommitPageCount == (uint)this->GetCountOfDecommitPages());
- Assert(decommitPageCount != 0);
- if (freePageCount + decommitPageCount < pageCount)
- {
- return nullptr;
- }
- Assert(this->secondaryAllocator == nullptr || this->secondaryAllocator->CanAllocate());
- T freeAndDecommitPages = freePages;
- freeAndDecommitPages.Or(&decommitPages);
- uint oldFreePageCount = freePageCount;
- uint index = freeAndDecommitPages.GetNextBit(0);
- while (index != -1)
- {
- Assert(index < this->GetAllocator()->GetMaxAllocPageCount());
- if (GetAvailablePageCount() - index < pageCount)
- {
- break;
- }
- if (pageCount == 1 || freeAndDecommitPages.TestRange(index, pageCount))
- {
- char * pages = this->address + index * AutoSystemInfo::PageSize;
- if (!notPageAligned)
- {
- uint nextIndex = 0;
- if (!IsAllocationPageAligned(pages, pageCount, &nextIndex))
- {
- if (index + nextIndex >= this->GetAllocator()->GetMaxAllocPageCount())
- {
- return nullptr;
- }
- index = freeAndDecommitPages.GetNextBit(index + nextIndex);
- continue;
- }
- }
- void * ret = this->GetAllocator()->GetVirtualAllocator()->AllocPages(pages, pageCount, MEM_COMMIT, PAGE_READWRITE, this->IsInCustomHeapAllocator());
- if (ret != nullptr)
- {
- Assert(ret == pages);
- this->ClearRangeInFreePagesBitVector(index, pageCount);
- this->ClearRangeInDecommitPagesBitVector(index, pageCount);
- uint newFreePageCount = this->GetCountOfFreePages();
- freePageCount = freePageCount - oldFreePageCount + newFreePageCount;
- decommitPageCount -= pageCount - (oldFreePageCount - newFreePageCount);
- Assert(freePageCount == (uint)this->GetCountOfFreePages());
- Assert(decommitPageCount == (uint)this->GetCountOfDecommitPages());
- return pages;
- }
- else if (pageCount == 1)
- {
- // if we failed to commit one page, we should just give up.
- return nullptr;
- }
- }
- index = freeAndDecommitPages.GetNextBit(index + 1);
- }
- return nullptr;
- }
- template<typename T>
- void
- PageSegmentBase<T>::ReleasePages(__in void * address, uint pageCount)
- {
- Assert(address >= this->address);
- Assert(pageCount <= this->GetAllocator()->maxAllocPageCount);
- Assert(((uint)(((char *)address) - this->address)) <= (this->GetAllocator()->maxAllocPageCount - pageCount) * AutoSystemInfo::PageSize);
- Assert(!IsFreeOrDecommitted(address, pageCount));
- uint base = this->GetBitRangeBase(address);
- this->SetRangeInFreePagesBitVector(base, pageCount);
- this->freePageCount += pageCount;
- Assert(freePageCount == (uint)this->GetCountOfFreePages());
- #ifdef PAGEALLOCATOR_PROTECT_FREEPAGE
- if (this->GetAllocator()->processHandle == GetCurrentProcess())
- {
- DWORD oldProtect;
- BOOL vpresult = VirtualProtect(address, pageCount * AutoSystemInfo::PageSize, PAGE_NOACCESS, &oldProtect);
- Assert(vpresult != FALSE);
- Assert(oldProtect == PAGE_READWRITE);
- }
- #endif
- }
- template<typename T>
- void
- PageSegmentBase<T>::ChangeSegmentProtection(DWORD protectFlags, DWORD expectedOldProtectFlags)
- {
- // TODO: There is a discrepancy in PageSegmentBase
- // The segment page count is initialized in PageSegmentBase::Initialize. It takes into account
- // the guard pages + any additional pages for alignment.
- // However, the free page count is calculated for the segment before initialize is called.
- // In practice, what happens is the following. The initial segment page count is 256. This
- // ends up being the free page count too. When initialize is called, we allocate the guard
- // pages and the alignment pages, which causes the total page count to be 272. The segment
- // page count is then calculated as total - guard, which means 256 <= segmentPageCount < totalPageCount
- // The code in PageSegment's constructor will mark the pages between 256 and 272 as in use,
- // which is why it generally works. However, it breaks in the case where we want to know the end
- // address of the page. It should really be address + 256 * 4k but this->GetEndAddress will return
- // a value greater than that. Need to do a pass through the counts and make sure that it's rational.
- // For now, simply calculate the end address from the allocator's page count
- Assert(this->GetAllocator()->processHandle == GetCurrentProcess());
- char* segmentEndAddress = this->address + (this->GetAllocator()->GetMaxAllocPageCount() * AutoSystemInfo::PageSize);
- for (char* address = this->address; address < segmentEndAddress; address += AutoSystemInfo::PageSize)
- {
- if (!IsFreeOrDecommitted(address))
- {
- char* endAddress = address;
- do
- {
- endAddress += AutoSystemInfo::PageSize;
- } while (endAddress < segmentEndAddress && !IsFreeOrDecommitted(endAddress));
- Assert(((uintptr_t)(endAddress - address)) < UINT_MAX);
- DWORD regionSize = (DWORD) (endAddress - address);
- DWORD oldProtect = 0;
- #if DBG
- MEMORY_BASIC_INFORMATION info = { 0 };
- VirtualQuery(address, &info, sizeof(MEMORY_BASIC_INFORMATION));
- Assert(info.Protect == expectedOldProtectFlags);
- #endif
- BOOL fSuccess = VirtualProtect(address, regionSize, protectFlags, &oldProtect);
- Assert(fSuccess == TRUE);
- Assert(oldProtect == expectedOldProtectFlags);
- address = endAddress;
- }
- }
- }
- template<typename T>
- template <bool onlyUpdateState>
- void
- PageSegmentBase<T>::DecommitPages(__in void * address, uint pageCount)
- {
- Assert(address >= this->address);
- Assert(pageCount <= this->GetAllocator()->maxAllocPageCount);
- Assert(((uint)(((char *)address) - this->address))
- <= (this->GetAllocator()->maxAllocPageCount - pageCount) * AutoSystemInfo::PageSize);
- Assert(!IsFreeOrDecommitted(address, pageCount));
- uint base = this->GetBitRangeBase(address);
- this->SetRangeInDecommitPagesBitVector(base, pageCount);
- this->decommitPageCount += pageCount;
- if (!onlyUpdateState)
- {
- #pragma warning(suppress: 6250)
- this->GetAllocator()->GetVirtualAllocator()->Free(address,
- pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- }
- Assert(decommitPageCount == (uint)this->GetCountOfDecommitPages());
- }
- template<typename T>
- void
- PageSegmentBase<T>::DecommitFreePagesInternal(uint index, uint pageCount)
- {
- Assert(pageCount > 0 && (index + pageCount) <= this->GetAvailablePageCount());
- this->ClearRangeInFreePagesBitVector(index, pageCount);
- this->SetRangeInDecommitPagesBitVector(index, pageCount);
- char * currentAddress = this->address + (index * AutoSystemInfo::PageSize);
- #pragma warning(suppress: 6250)
- this->GetAllocator()->GetVirtualAllocator()->Free(currentAddress,
- pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- }
- template<typename T>
- size_t
- PageSegmentBase<T>::DecommitFreePages(size_t pageToDecommit)
- {
- Assert(pageToDecommit != 0 && this->GetAvailablePageCount() > 0);
- uint startIndex = 0, index = 0, decommitCount = 0;
- do
- {
- if (!this->TestInFreePagesBitVector(index))
- {
- if (startIndex < index)
- {
- uint pageCount = index - startIndex;
- this->DecommitFreePagesInternal(startIndex, pageCount);
- }
- startIndex = index + 1;
- }
- else
- {
- decommitCount++;
- }
- }
- while (++index < this->GetAvailablePageCount() && decommitCount < pageToDecommit);
- if (startIndex < index)
- {
- uint pageCount = index - startIndex;
- this->DecommitFreePagesInternal(startIndex, pageCount);
- }
- Assert(decommitCount <= this->freePageCount);
- this->decommitPageCount += decommitCount;
- this->freePageCount -= decommitCount;
- return decommitCount;
- }
- //=============================================================================================================
- // PageAllocator
- //=============================================================================================================
- #if DBG
- #define ASSERT_THREAD() AssertMsg(this->ValidThreadAccess(), "Page allocation should only be used by a single thread");
- #else
- #define ASSERT_THREAD()
- #endif
- /*
- * Global counter to keep track of the total used bytes by the page allocator
- * per process for performance tooling. This is reported through the
- * JSCRIPT_PAGE_ALLOCATOR_USED_SIZE ETW event.
- */
- static size_t totalUsedBytes = 0;
- static size_t maxUsedBytes = 0;
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- size_t PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::GetAndResetMaxUsedBytes()
- {
- size_t value = maxUsedBytes;
- maxUsedBytes = 0;
- return value;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- size_t
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::GetProcessUsedBytes()
- {
- return totalUsedBytes;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- uint
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::GetMaxAllocPageCount()
- {
- return maxAllocPageCount;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::PageAllocatorBase(AllocationPolicyManager * policyManager,
- Js::ConfigFlagsTable& flagTable,
- PageAllocatorType type,
- uint maxFreePageCount, bool zeroPages,
- #if ENABLE_BACKGROUND_PAGE_FREEING
- BackgroundPageQueue * backgroundPageQueue,
- #endif
- uint maxAllocPageCount,
- uint secondaryAllocPageCount,
- bool stopAllocationOnOutOfMemory,
- bool excludeGuardPages,
- HANDLE processHandle,
- bool enableWriteBarrier
- ) :
- policyManager(policyManager),
- pageAllocatorFlagTable(flagTable),
- maxFreePageCount(maxFreePageCount),
- freePageCount(0),
- allocFlags(0),
- zeroPages(zeroPages),
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- queueZeroPages(false),
- hasZeroQueuedPages(false),
- backgroundPageQueue(backgroundPageQueue),
- #endif
- minFreePageCount(0),
- isUsed(false),
- waitingToEnterIdleDecommit(false),
- #if DBG
- idleDecommitBackOffCount(0),
- #endif
- idleDecommitEnterCount(1),
- isClosed(false),
- stopAllocationOnOutOfMemory(stopAllocationOnOutOfMemory),
- disableAllocationOutOfMemory(false),
- secondaryAllocPageCount(secondaryAllocPageCount),
- excludeGuardPages(excludeGuardPages),
- type(type)
- , reservedBytes(0)
- , committedBytes(0)
- , usedBytes(0)
- , numberOfSegments(0)
- , processHandle(processHandle)
- , enableWriteBarrier(enableWriteBarrier)
- #ifdef ENABLE_BASIC_TELEMETRY
- ,decommitStats(nullptr)
- #endif
- {
- AssertMsg(Math::IsPow2(maxAllocPageCount + secondaryAllocPageCount), "Illegal maxAllocPageCount: Why is this not a power of 2 aligned?");
- this->maxAllocPageCount = maxAllocPageCount;
- #if DBG
- // By default, a page allocator is not associated with any thread context
- // Any host which wishes to associate it with a thread context must do so explicitly
- this->threadContextHandle = NULL;
- this->concurrentThreadId = (DWORD)-1;
- #endif
- #if DBG
- this->disableThreadAccessCheck = false;
- this->debugMinFreePageCount = 0;
- #endif
- #if DBG_DUMP
- this->decommitPageCount = 0;
- this->debugName = nullptr;
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- this->verifyEnabled = false;
- this->disablePageReuse = false;
- #endif
- #ifdef PROFILE_MEM
- this->memoryData = MemoryProfiler::GetPageMemoryData(type);
- #endif
- PageTracking::PageAllocatorCreated((PageAllocator*)this);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::~PageAllocatorBase()
- {
- AssertMsg(this->ValidThreadAccess(), "Page allocator tear-down should only happen on the owning thread");
- #if DBG
- Assert(!this->HasMultiThreadAccess());
- #endif
- SubUsedBytes(usedBytes);
- SubCommittedBytes(committedBytes);
- SubReservedBytes(reservedBytes);
- ReleaseSegmentList(&segments);
- ReleaseSegmentList(&fullSegments);
- ReleaseSegmentList(&emptySegments);
- ReleaseSegmentList(&decommitSegments);
- ReleaseSegmentList(&largeSegments);
- PageTracking::PageAllocatorDestroyed((PageAllocator*)this);
- }
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::StartQueueZeroPage()
- {
- Assert(HasZeroPageQueue());
- Assert(!queueZeroPages);
- queueZeroPages = true;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::StopQueueZeroPage()
- {
- Assert(HasZeroPageQueue());
- Assert(queueZeroPages);
- queueZeroPages = false;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- bool
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::HasZeroPageQueue() const
- {
- bool hasZeroPageQueue = (ZeroPages() && this->backgroundPageQueue != nullptr);
- Assert(backgroundPageQueue == nullptr || hasZeroPageQueue == backgroundPageQueue->isZeroPageQueue);
- return hasZeroPageQueue;
- }
- #if DBG
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- bool
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::HasZeroQueuedPages() const
- {
- Assert(!HasZeroPageQueue() || hasZeroQueuedPages ||
- ((ZeroPageQueue *)this->backgroundPageQueue)->QueryDepth() == 0);
- return hasZeroQueuedPages;
- }
- #endif
- #endif //ENABLE_BACKGROUND_PAGE_ZEROING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- PageAllocation *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPagesForBytes(size_t requestBytes)
- {
- Assert(!isClosed);
- ASSERT_THREAD();
- uint pageSize = AutoSystemInfo::PageSize;
- uint addSize = sizeof(PageAllocation) + pageSize - 1; // this shouldn't overflow
- // overflow check
- size_t allocSize = AllocSizeMath::Add(requestBytes, addSize);
- if (allocSize == (size_t)-1)
- {
- return nullptr;
- }
- size_t pages = allocSize / pageSize;
- return this->AllocAllocation(pages);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- TPageSegment *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPageSegment(
- DListBase<TPageSegment>& segmentList,
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment> * pageAllocator,
- bool committed, bool allocated, bool enableWriteBarrier)
- {
- TPageSegment * segment = segmentList.PrependNode(&NoThrowNoMemProtectHeapAllocator::Instance,
- pageAllocator, committed, allocated, enableWriteBarrier);
- if (segment == nullptr)
- {
- return nullptr;
- }
- if (!segment->Initialize((committed ? MEM_COMMIT : 0) | pageAllocator->allocFlags, pageAllocator->excludeGuardPages))
- {
- segmentList.RemoveHead(&NoThrowNoMemProtectHeapAllocator::Instance);
- return nullptr;
- }
- return segment;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- TPageSegment *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPageSegment(
- DListBase<TPageSegment>& segmentList,
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment> * pageAllocator,
- void* address, uint pageCount, uint committedCount, bool enableWriteBarrier)
- {
- TPageSegment * segment = segmentList.PrependNode(&NoThrowNoMemProtectHeapAllocator::Instance,
- pageAllocator, address, pageCount, committedCount, enableWriteBarrier);
- pageAllocator->ReportExternalAlloc(pageCount * AutoSystemInfo::PageSize);
- return segment;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- TPageSegment *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddPageSegment(DListBase<TPageSegment>& segmentList)
- {
- Assert(!this->HasMultiThreadAccess());
- TPageSegment * segment = AllocPageSegment(segmentList, this, true, false, this->enableWriteBarrier);
- if (segment != nullptr)
- {
- this->LogAllocSegment(segment);
- this->AddFreePageCount(this->maxAllocPageCount);
- }
- return segment;
- }
- #if ENABLE_BACKGROUND_PAGE_FREEING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::TryAllocFromZeroPagesList(
- uint pageCount, TPageSegment ** pageSegment, BackgroundPageQueue* bgPageQueue, bool isPendingZeroList)
- {
- FAULTINJECT_MEMORY_NOTHROW(this->debugName, pageCount*AutoSystemInfo::PageSize);
- char * pages = nullptr;
- FreePageEntry* localList = nullptr;
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- while (true)
- {
- FreePageEntry * freePage = isPendingZeroList ? ((ZeroPageQueue *)backgroundPageQueue)->PopZeroPageEntry() : backgroundPageQueue->PopFreePageEntry();
- if (freePage == nullptr)
- {
- break;
- }
- if (freePage->pageCount == pageCount)
- {
- *pageSegment = freePage->segment;
- pages = (char *)freePage;
- memset(pages, 0, isPendingZeroList ? (pageCount*AutoSystemInfo::PageSize) : sizeof(FreePageEntry));
- this->FillAllocPages(pages, pageCount);
- break;
- }
- else
- {
- if (isPendingZeroList)
- {
- memset((char *)freePage + sizeof(FreePageEntry), 0, (freePage->pageCount*AutoSystemInfo::PageSize) - sizeof(FreePageEntry));
- }
- freePage->Next = localList;
- localList = (FreePageEntry*)freePage;
- if (freePage->pageCount > pageCount)
- {
- *pageSegment = freePage->segment;
- freePage->pageCount -= pageCount;
- pages = (char *)freePage + freePage->pageCount * AutoSystemInfo::PageSize;
- this->FillAllocPages(pages, pageCount);
- break;
- }
- }
- }
- }
- #endif
- if (localList != nullptr)
- {
- uint newFreePages = 0;
- while (localList != nullptr)
- {
- FreePageEntry* freePagesEntry = localList;
- localList = (FreePageEntry*)localList->Next;
- TPageSegment * segment = freePagesEntry->segment;
- pageCount = freePagesEntry->pageCount;
- DListBase<TPageSegment> * fromSegmentList = GetSegmentList(segment);
- Assert(fromSegmentList != nullptr);
- memset(freePagesEntry, 0, sizeof(FreePageEntry));
- segment->ReleasePages(freePagesEntry, pageCount);
- newFreePages += pageCount;
- TransferSegment(segment, fromSegmentList);
- }
- LogFreePages(newFreePages);
- PAGE_ALLOC_VERBOSE_TRACE(_u("New free pages: %d\n"), newFreePages);
- this->AddFreePageCount(newFreePages);
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- #endif
- }
- return pages;
- }
- #endif
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::TryAllocFromZeroPages(uint pageCount, TPageSegment ** pageSegment)
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- if (backgroundPageQueue != nullptr)
- {
- return TryAllocFromZeroPagesList(pageCount, pageSegment, backgroundPageQueue, false);
- }
- if (this->hasZeroQueuedPages)
- {
- __analysis_assume(backgroundPageQueue != nullptr);
- return TryAllocFromZeroPagesList(pageCount, pageSegment, backgroundPageQueue, true);
- }
- }
- #endif
- return nullptr;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <bool notPageAligned>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::TryAllocFreePages(uint pageCount, TPageSegment ** pageSegment)
- {
- Assert(!HasMultiThreadAccess());
- char* pages = nullptr;
- if (this->freePageCount >= pageCount)
- {
- FAULTINJECT_MEMORY_NOTHROW(this->debugName, pageCount*AutoSystemInfo::PageSize);
- typename DListBase<TPageSegment>::EditingIterator i(&this->segments);
- while (i.Next())
- {
- TPageSegment * freeSegment = &i.Data();
- pages = freeSegment->template AllocPages<notPageAligned>(pageCount);
- if (pages != nullptr)
- {
- LogAllocPages(pageCount);
- if (freeSegment->GetFreePageCount() == 0)
- {
- i.MoveCurrentTo(&this->fullSegments);
- }
- this->freePageCount -= pageCount;
- *pageSegment = freeSegment;
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- #endif
- this->FillAllocPages(pages, pageCount);
- return pages;
- }
- }
- }
- pages = TryAllocFromZeroPages(pageCount, pageSegment);
- return pages;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::FillAllocPages(__in void * address, uint pageCount)
- {
- const size_t bufferSize = AutoSystemInfo::PageSize * pageCount;
- #if DBG
- #ifdef RECYCLER_ZERO_MEM_CHECK
- byte * localAddr = (byte *)this->GetVirtualAllocator()->AllocLocal(address, bufferSize);
- if (!localAddr)
- {
- MemoryOperationLastError::RecordError(E_OUTOFMEMORY);
- return;
- }
- for (size_t i = 0; i < bufferSize; i++)
- {
- // new pages are filled with zeros, old pages are filled with DbgMemFill
- Assert(localAddr[i] == 0 || localAddr[i] == DbgMemFill);
- }
- this->GetVirtualAllocator()->FreeLocal(localAddr);
- #endif
- #endif
- #ifdef RECYCLER_MEMORY_VERIFY
- if (verifyEnabled)
- {
- Assert(this->processHandle == GetCurrentProcess());
- memset(address, Recycler::VerifyMemFill, bufferSize);
- return;
- }
- #endif
- #if DBG
- if (ZeroPages())
- {
- // for release build, the page is zeroed in ReleasePages
- Assert(this->processHandle == GetCurrentProcess());
- memset(address, 0, bufferSize);
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::FillFreePages(__in void * address, uint pageCount)
- {
- #if DBG
- MemSetLocal(address, DbgMemFill, AutoSystemInfo::PageSize * pageCount);
- #else
- #ifdef RECYCLER_MEMORY_VERIFY
- if (verifyEnabled)
- {
- return;
- }
- #endif
- if (ZeroPages())
- {
- //
- // Do memset via non-temporal store to avoid evicting existing processor cache.
- // This helps low-end machines with limited cache size.
- //
- #if defined(_M_IX86) || defined(_M_X64)
- if (CONFIG_FLAG(ZeroMemoryWithNonTemporalStore))
- {
- js_memset_zero_nontemporal(address, AutoSystemInfo::PageSize * pageCount);
- }
- else
- #endif
- {
- memset(address, 0, AutoSystemInfo::PageSize * pageCount);
- }
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <bool notPageAligned>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::TryAllocDecommittedPages(uint pageCount, TPageSegment ** pageSegment)
- {
- Assert(!this->HasMultiThreadAccess());
- typename DListBase<TPageSegment>::EditingIterator i(&decommitSegments);
- while (i.Next())
- {
- TPageSegment * freeSegment = &i.Data();
- uint oldFreePageCount = freeSegment->GetFreePageCount();
- uint oldDecommitPageCount = freeSegment->GetDecommitPageCount();
- char * pages = freeSegment->template DoAllocDecommitPages<notPageAligned>(pageCount);
- if (pages != nullptr)
- {
- this->freePageCount = this->freePageCount - oldFreePageCount + freeSegment->GetFreePageCount();
- #if DBG_DUMP
- this->decommitPageCount = this->decommitPageCount - oldDecommitPageCount + freeSegment->GetDecommitPageCount();
- #endif
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- #endif
- uint recommitPageCount = pageCount - (oldFreePageCount - freeSegment->GetFreePageCount());
- LogRecommitPages(recommitPageCount);
- LogAllocPages(pageCount);
- if (freeSegment->GetDecommitPageCount() == 0)
- {
- auto toList = GetSegmentList(freeSegment);
- i.MoveCurrentTo(toList);
- }
- *pageSegment = freeSegment;
- return pages;
- }
- }
- return nullptr;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- PageAllocation *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocAllocation(size_t pageCount)
- {
- PageAllocation * pageAllocation;
- TSegment * segment;
- if (pageCount > this->maxAllocPageCount)
- {
- // We need some space reserved for secondary allocations
- segment = AllocSegment(pageCount);
- if (segment == nullptr)
- {
- return nullptr;
- }
- pageAllocation = (PageAllocation *)segment->GetAddress();
- pageAllocation->pageCount = segment->GetAvailablePageCount();
- }
- else
- {
- Assert(pageCount <= UINT_MAX);
- pageAllocation = (PageAllocation *)AllocPages((uint)pageCount, (TPageSegment **)&segment);
- if (pageAllocation == nullptr)
- {
- return nullptr;
- }
- pageAllocation->pageCount = pageCount;
- }
- pageAllocation->segment = segment;
- return pageAllocation;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- TSegment *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocSegment(size_t pageCount)
- {
- Assert(!isClosed);
- ASSERT_THREAD();
- // Even though we don't idle decommit large segments, we still need to consider these allocations
- // as using the page allocator
- this->isUsed = true;
- TSegment * segment = largeSegments.PrependNode(&NoThrowNoMemProtectHeapAllocator::Instance,
- this, pageCount, enableWriteBarrier);
- if (segment == nullptr)
- {
- return nullptr;
- }
- if (!segment->Initialize(MEM_COMMIT | allocFlags, excludeGuardPages))
- {
- largeSegments.RemoveHead(&NoThrowNoMemProtectHeapAllocator::Instance);
- return nullptr;
- }
- LogAllocSegment(segment);
- LogAllocPages(segment->GetPageCount());
- PageTracking::ReportAllocation((PageAllocator*)this, segment->GetAddress(), AutoSystemInfo::PageSize * segment->GetPageCount());
- #ifdef RECYCLER_MEMORY_VERIFY
- if (verifyEnabled)
- {
- Assert(this->processHandle == GetCurrentProcess());
- memset(segment->GetAddress(), Recycler::VerifyMemFill, AutoSystemInfo::PageSize * segment->GetPageCount());
- }
- #endif
- return segment;
- }
- template <>
- void PageAllocatorBase<VirtualAllocWrapper>::InitVirtualAllocator(VirtualAllocWrapper * virtualAllocator)
- {
- this->allocatorType = GetAllocatorType<VirtualAllocWrapper>();
- // default page allocator must keep virtualAllocator nullptr
- Assert(this->virtualAllocator == nullptr && virtualAllocator == nullptr);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::InitVirtualAllocator(TVirtualAlloc * virtualAllocator)
- {
- Assert(this->virtualAllocator == nullptr);
- this->virtualAllocator = virtualAllocator; // Init to given virtualAllocator, may be nullptr temporarily
- this->allocatorType = GetAllocatorType<TVirtualAlloc>();
- }
- template <>
- VirtualAllocWrapper* PageAllocatorBase<VirtualAllocWrapper>::GetVirtualAllocator() const
- {
- Assert(this->allocatorType == GetAllocatorType<VirtualAllocWrapper>());
- return &VirtualAllocWrapper::Instance;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- TVirtualAlloc*
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::GetVirtualAllocator() const
- {
- Assert(this->allocatorType == GetAllocatorType<TVirtualAlloc>());
- return reinterpret_cast<TVirtualAlloc*>(this->virtualAllocator);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::Alloc(size_t * pageCount, TSegment ** segment)
- {
- Assert(this->allocatorType == GetAllocatorType<TVirtualAlloc>());
- return AllocInternal<false>(pageCount, segment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <bool doPageAlign>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocInternal(size_t * pageCount, TSegment ** segment)
- {
- char * addr = nullptr;
- if (*pageCount > this->maxAllocPageCount)
- {
- // Don't bother trying to do single chunk allocation here
- // We're allocating a new segment. If the segment size is
- // within a single chunk, great, otherwise, doesn't matter
- // We need some space reserved for secondary allocations
- TSegment * newSegment = this->AllocSegment(*pageCount);
- if (newSegment != nullptr)
- {
- addr = newSegment->GetAddress();
- *pageCount = newSegment->GetAvailablePageCount();
- *segment = newSegment;
- }
- }
- else
- {
- Assert(*pageCount <= UINT_MAX);
- TPageSegment * pageSegment;
- if (doPageAlign)
- {
- // TODO: Remove this entire codepath since doPageAlign is not being used anymore
- addr = this->AllocPagesPageAligned((uint)*pageCount, &pageSegment);
- }
- else
- {
- addr = this->AllocPages((uint) *pageCount, &pageSegment);
- }
- if (addr != nullptr)
- {
- *segment = pageSegment;
- }
- }
- return addr;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::UpdateMinFreePageCount()
- {
- UpdateMinimum(minFreePageCount, freePageCount);
- Assert(debugMinFreePageCount == minFreePageCount);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ResetMinFreePageCount()
- {
- minFreePageCount = freePageCount;
- #if DBG
- debugMinFreePageCount = freePageCount;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ClearMinFreePageCount()
- {
- minFreePageCount = 0;
- #if DBG
- debugMinFreePageCount = 0;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPages(uint pageCount, TPageSegment ** pageSegment)
- {
- Assert(this->allocatorType == GetAllocatorType<TVirtualAlloc>());
- return AllocPagesInternal<true /* noPageAligned */>(pageCount, pageSegment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPagesPageAligned(uint pageCount, TPageSegment ** pageSegment)
- {
- return AllocPagesInternal<false /* noPageAligned */>(pageCount, pageSegment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <bool notPageAligned>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AllocPagesInternal(uint pageCount, TPageSegment ** pageSegment)
- {
- Assert(!isClosed);
- ASSERT_THREAD();
- Assert(pageCount <= this->maxAllocPageCount);
- this->isUsed = true;
- SuspendIdleDecommit();
- char * allocation = TryAllocFreePages<notPageAligned>(pageCount, pageSegment);
- if (allocation == nullptr)
- {
- allocation = SnailAllocPages<notPageAligned>(pageCount, pageSegment);
- }
- ResumeIdleDecommit();
- PageTracking::ReportAllocation((PageAllocator*)this, allocation, AutoSystemInfo::PageSize * pageCount);
- if (!notPageAligned)
- {
- Assert(TPageSegment::IsAllocationPageAligned(allocation, pageCount));
- }
- return allocation;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::OnAllocFromNewSegment(uint pageCount, __in void* pages, TSegment* newSegment)
- {
- DListBase<TPageSegment>* targetSegmentList = (pageCount == maxAllocPageCount) ? &fullSegments : &segments;
- LogAllocPages(pageCount);
- this->FillAllocPages(pages, pageCount);
- this->freePageCount -= pageCount;
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- #endif
- Assert(targetSegmentList != nullptr);
- emptySegments.MoveHeadTo(targetSegmentList);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <bool notPageAligned>
- char *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SnailAllocPages(uint pageCount, TPageSegment ** pageSegment)
- {
- Assert(!this->HasMultiThreadAccess());
- char * pages = nullptr;
- TPageSegment * newSegment = nullptr;
- if (!emptySegments.Empty())
- {
- newSegment = &emptySegments.Head();
- if (!notPageAligned && !TPageSegment::IsAllocationPageAligned(newSegment->GetAddress(), pageCount))
- {
- newSegment = nullptr;
- // Scan through the empty segments for a segment that can fit this allocation
- FOREACH_DLISTBASE_ENTRY_EDITING(TPageSegment, emptySegment, &this->emptySegments, iter)
- {
- if (TPageSegment::IsAllocationPageAligned(emptySegment.GetAddress(), pageCount))
- {
- iter.MoveCurrentTo(&this->emptySegments);
- newSegment = &emptySegment;
- break;
- }
- }
- NEXT_DLISTBASE_ENTRY_EDITING
- }
- if (newSegment != nullptr)
- {
- pages = newSegment->template AllocPages<notPageAligned>(pageCount);
- if (pages != nullptr)
- {
- OnAllocFromNewSegment(pageCount, pages, newSegment);
- *pageSegment = newSegment;
- return pages;
- }
- }
- }
- pages = TryAllocDecommittedPages<notPageAligned>(pageCount, pageSegment);
- if (pages != nullptr)
- {
- // TryAllocDecommittedPages may give out a mix of free pages and decommitted pages.
- // Free pages are filled with 0xFE in debug build, so we need to zero them
- // out before giving it out. In release build, free page is already zeroed
- // in ReleasePages
- this->FillAllocPages(pages, pageCount);
- return pages;
- }
- Assert(pages == nullptr);
- Assert(maxAllocPageCount >= pageCount);
- if (maxAllocPageCount != pageCount && (maxFreePageCount < maxAllocPageCount - pageCount + freePageCount))
- {
- // If we exceed the number of max free page count, allocate from a new fully decommit block
- TPageSegment * decommitSegment = AllocPageSegment(
- this->decommitSegments, this, false, false, this->enableWriteBarrier);
- if (decommitSegment == nullptr)
- {
- return nullptr;
- }
- pages = decommitSegment->template DoAllocDecommitPages<notPageAligned>(pageCount);
- if (pages != nullptr)
- {
- #if DBG_DUMP
- this->decommitPageCount = this->decommitPageCount + decommitSegment->GetDecommitPageCount();
- #endif
- this->FillAllocPages(pages, pageCount);
- LogRecommitPages(pageCount);
- LogAllocPages(pageCount);
- *pageSegment = decommitSegment;
- }
- return pages;
- }
- // At this point, we haven't been able to allocate either from the
- // decommitted pages, or from the empty segment list, so we'll
- // try allocating a segment. In a page allocator with a pre-reserved segment,
- // we're not allowed to allocate additional segments so return here.
- // Otherwise, add a new segment and allocate from it
- newSegment = AddPageSegment(emptySegments);
- if (newSegment == nullptr)
- {
- return nullptr;
- }
- pages = newSegment->template AllocPages<notPageAligned>(pageCount);
- if (notPageAligned)
- {
- // REVIEW: Is this true for single-chunk allocations too? Are new segments guaranteed to
- // allow for single-chunk allocations to succeed?
- Assert(pages != nullptr);
- }
- if (pages != nullptr)
- {
- OnAllocFromNewSegment(pageCount, pages, newSegment);
- *pageSegment = newSegment;
- }
- return pages;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- DListBase<TPageSegment> *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::GetSegmentList(TPageSegment * segment)
- {
- Assert(!this->HasMultiThreadAccess());
- return
- (segment->IsAllDecommitted()) ? nullptr :
- (segment->IsFull()) ? &fullSegments :
- (segment->ShouldBeInDecommittedList()) ? &decommitSegments :
- (segment->IsEmpty()) ? &emptySegments :
- &segments;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ReleaseAllocation(PageAllocation * allocation)
- {
- SuspendIdleDecommit();
- ReleaseAllocationNoSuspend(allocation);
- ResumeIdleDecommit();
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ReleaseAllocationNoSuspend(PageAllocation * allocation)
- {
- this->Release((char *)allocation, allocation->pageCount, allocation->segment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::Release(void * address, size_t pageCount, void * segmentParam)
- {
- TSegment * segment = (TSegment*)segmentParam;
- Assert(!this->HasMultiThreadAccess());
- Assert(segment->GetAllocator() == this);
- if (pageCount > this->maxAllocPageCount)
- {
- Assert(address == segment->GetAddress());
- Assert(pageCount == segment->GetAvailablePageCount());
- this->ReleaseSegment(segment);
- }
- else
- {
- Assert(pageCount <= UINT_MAX);
- this->ReleasePages(address, static_cast<uint>(pageCount), (TPageSegment *)segment);
- }
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ReleaseSegment(TSegment * segment)
- {
- ASSERT_THREAD();
- #ifdef RECYCLER_NO_PAGE_REUSE
- if (disablePageReuse)
- {
- Assert(this->processHandle == GetCurrentProcess());
- #pragma prefast(suppress:6250, "Calling 'VirtualFree' without the MEM_RELEASE flag might free memory but not address descriptors (VADs).")
- VirtualFree(segment->GetAddress(), segment->GetPageCount() * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- return;
- }
- #endif
- PageTracking::ReportFree((PageAllocator*)this, segment->GetAddress(), AutoSystemInfo::PageSize * segment->GetPageCount());
- LogFreePages(segment->GetPageCount());
- LogFreeSegment(segment);
- largeSegments.RemoveElement(&NoThrowNoMemProtectHeapAllocator::Instance, segment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddFreePageCount(uint pageCount)
- {
- // minFreePageCount is only updated on release of a page or before decommit
- // so that we don't have to update it on every page allocation.
- UpdateMinFreePageCount();
- this->freePageCount += pageCount;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ReleasePages(__in void * address, uint pageCount, __in void * segmentParam)
- {
- Assert(pageCount <= this->maxAllocPageCount);
- TPageSegment * segment = (TPageSegment*) segmentParam;
- ASSERT_THREAD();
- Assert(!this->HasMultiThreadAccess());
- #ifdef RECYCLER_NO_PAGE_REUSE
- if (disablePageReuse)
- {
- Assert(this->processHandle == GetCurrentProcess());
- #pragma prefast(suppress:6250, "Calling 'VirtualFree' without the MEM_RELEASE flag might free memory but not address descriptors (VADs).")
- VirtualFree(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- return;
- }
- #endif
- PageTracking::ReportFree((PageAllocator*)this, address, AutoSystemInfo::PageSize * pageCount);
- DListBase<TPageSegment> * fromSegmentList = GetSegmentList(segment);
- Assert(fromSegmentList != nullptr);
- /**
- * The logic here is as follows:
- * - If we have sufficient pages already, such that the newly free pages are going
- * to cause us to exceed the threshold of free pages we want:
- * - First check and see if we have empty segments. If we do, just release that
- * entire segment back to the operating system, and add the current segments
- * free pages to our free page pool
- * - Otherwise, if there are no empty segments (i.e our memory is fragmented),
- * decommit the pages that are being released so that they don't count towards
- * our working set
- * - If we don't have enough pages:
- * - If we're in the free page queuing mode where we have a "pages to zero out" queue
- * put it in that queue and we're done
- * - Otherwise, zero it out, and add it to the free page pool
- * Now that we've either decommitted or freed the pages in the segment,
- * move the segment to the right segment list
- */
- if (this->freePageCount + pageCount > maxFreePageCount)
- {
- // Release a whole segment if possible to reduce the number of VirtualFree and fragmentation
- if (!ZeroPages() && !emptySegments.Empty())
- {
- Assert(emptySegments.Head().GetDecommitPageCount() == 0);
- LogFreeSegment(&emptySegments.Head());
- emptySegments.RemoveHead(&NoThrowNoMemProtectHeapAllocator::Instance);
- this->freePageCount -= maxAllocPageCount;
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- MemSetLocal(address, DbgMemFill, AutoSystemInfo::PageSize * pageCount);
- #endif
- segment->ReleasePages(address, pageCount);
- LogFreePages(pageCount);
- this->AddFreePageCount(pageCount);
- }
- else
- {
- segment->template DecommitPages<false>(address, pageCount);
- LogFreePages(pageCount);
- LogDecommitPages(pageCount);
- #if DBG_DUMP
- this->decommitPageCount += pageCount;
- #endif
- }
- }
- else
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- if (QueueZeroPages())
- {
- Assert(HasZeroPageQueue());
- AddPageToZeroQueue(address, pageCount, segment);
- return;
- }
- }
- #endif
- this->FillFreePages((char *)address, pageCount);
- segment->ReleasePages(address, pageCount);
- LogFreePages(pageCount);
- this->AddFreePageCount(pageCount);
- }
- TransferSegment(segment, fromSegmentList);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::MemSetLocal(_In_ void *dst, int val, size_t sizeInBytes)
- {
- memset(dst, val, sizeInBytes);
- }
- #if ENABLE_OOP_NATIVE_CODEGEN
- template<>
- void
- PageAllocatorBase<SectionAllocWrapper>::MemSetLocal(_In_ void *dst, int val, size_t sizeInBytes)
- {
- LPVOID localAddr = this->GetVirtualAllocator()->AllocLocal(dst, sizeInBytes);
- if (localAddr == nullptr)
- {
- MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
- }
- else
- {
- memset(localAddr, val, sizeInBytes);
- this->GetVirtualAllocator()->FreeLocal(localAddr);
- }
- }
- template<>
- void
- PageAllocatorBase<PreReservedSectionAllocWrapper>::MemSetLocal(_In_ void *dst, int val, size_t sizeInBytes)
- {
- LPVOID localAddr = this->GetVirtualAllocator()->AllocLocal(dst, sizeInBytes);
- if (localAddr == nullptr)
- {
- MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
- }
- else
- {
- memset(localAddr, val, sizeInBytes);
- this->GetVirtualAllocator()->FreeLocal(localAddr);
- }
- }
- #endif
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- typename PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::FreePageEntry *
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::PopPendingZeroPage()
- {
- Assert(HasZeroPageQueue());
- return ((ZeroPageQueue *) backgroundPageQueue)->PopZeroPageEntry();
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddPageToZeroQueue(__in void * address, uint pageCount, __in TPageSegment * pageSegment)
- {
- Assert(HasZeroPageQueue());
- Assert(pageSegment->GetAllocator() == this);
- FreePageEntry * entry = (FreePageEntry *)address;
- entry->segment = pageSegment;
- entry->pageCount = pageCount;
- ((ZeroPageQueue *)backgroundPageQueue)->PushZeroPageEntry(entry);
- this->hasZeroQueuedPages = true;
- }
- #endif
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::TransferSegment(TPageSegment * segment, DListBase<TPageSegment> * fromSegmentList)
- {
- DListBase<TPageSegment> * toSegmentList = GetSegmentList(segment);
- if (fromSegmentList != toSegmentList)
- {
- if (toSegmentList)
- {
- AssertMsg(segment->GetSecondaryAllocator() == nullptr || fromSegmentList != &fullSegments || segment->GetSecondaryAllocator()->CanAllocate(),
- "If it's being moved from a full segment it should be able to do secondary allocations");
- fromSegmentList->MoveElementTo(segment, toSegmentList);
- }
- else
- {
- LogFreePartiallyDecommittedPageSegment(segment);
- fromSegmentList->RemoveElement(&NoThrowNoMemProtectHeapAllocator::Instance, segment);
- #if DBG_DUMP
- this->decommitPageCount -= maxAllocPageCount;
- #endif
- }
- }
- }
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::BackgroundZeroQueuedPages()
- {
- Assert(HasZeroPageQueue());
- AutoCriticalSection autocs(&backgroundPageQueue->backgroundPageQueueCriticalSection);
- ZeroQueuedPages();
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ZeroQueuedPages()
- {
- Assert(HasZeroPageQueue());
- while (true)
- {
- FreePageEntry * freePageEntry = PopPendingZeroPage();
- if (freePageEntry == nullptr)
- {
- break;
- }
- TPageSegment * segment = freePageEntry->segment;
- uint pageCount = freePageEntry->pageCount;
- //
- // Do memset via non-temporal store to avoid evicting existing processor cache.
- // This helps low-end machines with limited cache size.
- //
- Assert(this->processHandle == GetCurrentProcess());
- #if defined(_M_IX86) || defined(_M_X64)
- if (CONFIG_FLAG(ZeroMemoryWithNonTemporalStore))
- {
- js_memset_zero_nontemporal(freePageEntry, AutoSystemInfo::PageSize * pageCount);
- }
- else
- #endif
- {
- memset(freePageEntry, 0, pageCount * AutoSystemInfo::PageSize);
- }
- QueuePages(freePageEntry, pageCount, segment);
- }
- this->hasZeroQueuedPages = false;
- }
- #endif
- #if ENABLE_BACKGROUND_PAGE_FREEING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::BackgroundReleasePages(void * address, uint pageCount, TPageSegment * segment)
- {
- // TODO: no need to zero the pages if it's going to be decommitted
- FillFreePages(address, pageCount);
- QueuePages(address, pageCount, segment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::QueuePages(void * address, uint pageCount, TPageSegment * segment)
- {
- Assert(backgroundPageQueue);
- FreePageEntry * freePageEntry = (FreePageEntry *)address;
- freePageEntry->segment = segment;
- freePageEntry->pageCount = pageCount;
- backgroundPageQueue->PushFreePageEntry(freePageEntry);
- }
- #endif
- #if ENABLE_BACKGROUND_PAGE_FREEING
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::FlushBackgroundPages()
- {
- Assert(!this->HasMultiThreadAccess());
- Assert(backgroundPageQueue);
- // We can have additional pages queued up to be zeroed out here
- // and that's okay since they'll eventually be zeroed out before being flushed
- uint newFreePages = 0;
- while (true)
- {
- FreePageEntry * freePageEntry = backgroundPageQueue->PopFreePageEntry();
- if (freePageEntry == nullptr)
- {
- break;
- }
- TPageSegment * segment = freePageEntry->segment;
- uint pageCount = freePageEntry->pageCount;
- DListBase<TPageSegment> * fromSegmentList = GetSegmentList(segment);
- Assert(fromSegmentList != nullptr);
- Assert(this->processHandle == GetCurrentProcess());
- memset(freePageEntry, 0, sizeof(FreePageEntry));
- segment->ReleasePages(freePageEntry, pageCount);
- newFreePages += pageCount;
- TransferSegment(segment, fromSegmentList);
- }
- LogFreePages(newFreePages);
- PAGE_ALLOC_VERBOSE_TRACE(_u("New free pages: %d\n"), newFreePages);
- this->AddFreePageCount(newFreePages);
- }
- #endif
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SuspendIdleDecommit()
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- if (this->idleDecommitEnterCount != 0)
- {
- return;
- }
- Assert(this->IsIdleDecommitPageAllocator());
- ((IdleDecommitPageAllocator *)this)->cs.Enter();
- PAGE_ALLOC_VERBOSE_TRACE_0(_u("SuspendIdleDecommit"));
- #endif
- }
- #if ENABLE_OOP_NATIVE_CODEGEN
- template<>
- void
- PageAllocatorBase<SectionAllocWrapper>::SuspendIdleDecommit()
- {
- Assert(!this->IsIdleDecommitPageAllocator());
- }
- template<>
- void
- PageAllocatorBase<PreReservedSectionAllocWrapper>::SuspendIdleDecommit()
- {
- Assert(!this->IsIdleDecommitPageAllocator());
- }
- #endif
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ResumeIdleDecommit()
- {
- #ifdef IDLE_DECOMMIT_ENABLED
- if (this->idleDecommitEnterCount != 0)
- {
- return;
- }
- Assert(this->IsIdleDecommitPageAllocator());
- PAGE_ALLOC_VERBOSE_TRACE(_u("ResumeIdleDecommit"));
- ((IdleDecommitPageAllocator *)this)->cs.Leave();
- #endif
- }
- #if ENABLE_OOP_NATIVE_CODEGEN
- template<>
- void
- PageAllocatorBase<SectionAllocWrapper>::ResumeIdleDecommit()
- {
- Assert(!this->IsIdleDecommitPageAllocator());
- }
- template<>
- void
- PageAllocatorBase<PreReservedSectionAllocWrapper>::ResumeIdleDecommit()
- {
- Assert(!this->IsIdleDecommitPageAllocator());
- }
- #endif
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::DecommitNow(bool all)
- {
- Assert(!this->HasMultiThreadAccess());
- #ifdef ENABLE_BASIC_TELEMETRY
- if (this->decommitStats != nullptr)
- {
- this->decommitStats->numDecommitCalls++;
- if (this->decommitStats->lastLeaveDecommitRegion.ToMicroseconds() > 0)
- {
- Js::TickDelta delta = Js::Tick::Now() - this->decommitStats->lastLeaveDecommitRegion;
- if (delta > this->decommitStats->maxDeltaBetweenDecommitRegionLeaveAndDecommit)
- {
- this->decommitStats->maxDeltaBetweenDecommitRegionLeaveAndDecommit = delta;
- }
- }
- }
- #endif
- size_t deleteCount = 0;
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // First, drain the zero page queue.
- // This will cause the free page count to be accurate
- if (HasZeroPageQueue())
- {
- int numZeroPagesFreed = 0;
- // There might be queued zero pages. Drain them first
- bool zeroPageQueueEmpty = false;
- while (true)
- {
- FreePageEntry * freePageEntry = PopPendingZeroPage();
- if (freePageEntry == nullptr)
- {
- zeroPageQueueEmpty = true;
- break;
- }
- // Back-off from decommit if we are trying to enter IdleDecommit again.
- if (this->waitingToEnterIdleDecommit)
- {
- break;
- }
- PAGE_ALLOC_TRACE_AND_STATS_0(_u("Freeing page from zero queue"));
- TPageSegment * segment = freePageEntry->segment;
- uint pageCount = freePageEntry->pageCount;
- numZeroPagesFreed += pageCount;
- DListBase<TPageSegment> * fromSegmentList = GetSegmentList(segment);
- Assert(fromSegmentList != nullptr);
- // Check for all here, since the actual free page count can't be determined
- // until we've flushed the zeroed page queue
- if (all)
- {
- // Decommit them immediately if we are decommitting all pages.
- segment->template DecommitPages<false>(freePageEntry, pageCount);
- LogFreePages(pageCount);
- LogDecommitPages(pageCount);
- if (segment->IsAllDecommitted())
- {
- LogFreePartiallyDecommittedPageSegment(segment);
- fromSegmentList->RemoveElement(&NoThrowNoMemProtectHeapAllocator::Instance, segment);
- deleteCount += maxAllocPageCount;
- continue;
- }
- }
- else
- {
- // Zero them and release them in case we don't decommit them.
- Assert(this->processHandle == GetCurrentProcess());
- memset(freePageEntry, 0, pageCount * AutoSystemInfo::PageSize);
- segment->ReleasePages(freePageEntry, pageCount);
- LogFreePages(pageCount);
- }
- TransferSegment(segment, fromSegmentList);
- }
- // Take the lock to make sure the recycler thread has finished zeroing out the pages after
- // we drained the queue
- if(zeroPageQueueEmpty)
- {
- AutoCriticalSection autoCS(&backgroundPageQueue->backgroundPageQueueCriticalSection);
- this->hasZeroQueuedPages = false;
- Assert(!this->HasZeroQueuedPages());
- }
- FlushBackgroundPages();
- }
- }
- #endif
- if (this->freePageCount == 0)
- {
- Assert(debugMinFreePageCount == 0);
- return;
- }
- PAGE_ALLOC_TRACE_AND_STATS_0(_u("Decommit now"));
- // minFreePageCount is not updated on every page allocate,
- // so we have to do a final update here.
- UpdateMinFreePageCount();
- size_t newFreePageCount;
- if (all)
- {
- newFreePageCount = this->GetFreePageLimit();
- PAGE_ALLOC_TRACE_AND_STATS_0(_u("Full decommit"));
- }
- else
- {
- // Decommit half the min free page count since last partial decommit
- Assert(this->minFreePageCount <= this->freePageCount);
- newFreePageCount = this->freePageCount - (this->minFreePageCount / 2);
- // Ensure we don't decommit down to fewer than our partial decommit minimum
- newFreePageCount = max(newFreePageCount, static_cast<size_t>(MinPartialDecommitFreePageCount));
- PAGE_ALLOC_TRACE_AND_STATS_0(_u("Partial decommit"));
- }
- if (newFreePageCount >= this->freePageCount)
- {
- PAGE_ALLOC_TRACE_AND_STATS_0(_u("No pages to decommit"));
- return;
- }
- size_t pageToDecommit = this->freePageCount - newFreePageCount;
- PAGE_ALLOC_TRACE_AND_STATS(_u("Decommit page count = %d"), pageToDecommit);
- PAGE_ALLOC_TRACE_AND_STATS(_u("Free page count = %d"), this->freePageCount);
- PAGE_ALLOC_TRACE_AND_STATS(_u("New free page count = %d"), newFreePageCount);
- size_t decommitCount = 0;
- // decommit from page that already has other decommitted page already
- {
- typename DListBase<TPageSegment>::EditingIterator i(&decommitSegments);
- while (pageToDecommit > 0 && i.Next())
- {
- size_t pageDecommitted = i.Data().DecommitFreePages(pageToDecommit);
- LogDecommitPages(pageDecommitted);
- decommitCount += pageDecommitted;
- if (i.Data().GetDecommitPageCount() == maxAllocPageCount)
- {
- LogFreePartiallyDecommittedPageSegment(&i.Data());
- i.RemoveCurrent(&NoThrowNoMemProtectHeapAllocator::Instance);
- deleteCount += maxAllocPageCount;
- }
- pageToDecommit -= pageDecommitted;
- // Back-off from decommit if we are trying to enter IdleDecommit again.
- if (this->waitingToEnterIdleDecommit)
- {
- break;
- }
- }
- }
- // decommit pages that are empty.
- // back-off from decommit if we are trying to enter IdleDecommit again.
- while (!this->waitingToEnterIdleDecommit && pageToDecommit > 0 && !emptySegments.Empty())
- {
- if (pageToDecommit >= maxAllocPageCount)
- {
- Assert(emptySegments.Head().GetDecommitPageCount() == 0);
- LogFreeSegment(&emptySegments.Head());
- emptySegments.RemoveHead(&NoThrowNoMemProtectHeapAllocator::Instance);
- pageToDecommit -= maxAllocPageCount;
- decommitCount += maxAllocPageCount;
- deleteCount += maxAllocPageCount;
- }
- else
- {
- size_t pageDecommitted = emptySegments.Head().DecommitFreePages(pageToDecommit);
- LogDecommitPages(pageDecommitted);
- decommitCount += pageDecommitted;
- Assert(pageDecommitted == pageToDecommit);
- emptySegments.MoveHeadTo(&decommitSegments);
- pageToDecommit = 0;
- }
- }
- if(!this->waitingToEnterIdleDecommit)
- {
- typename DListBase<TPageSegment>::EditingIterator i(&segments);
- while (pageToDecommit > 0 && i.Next())
- {
- size_t pageDecommitted = i.Data().DecommitFreePages(pageToDecommit);
- LogDecommitPages(pageDecommitted);
- decommitCount += pageDecommitted;
- Assert(i.Data().GetDecommitPageCount() != 0);
- Assert(i.Data().GetDecommitPageCount() <= maxAllocPageCount);
- i.MoveCurrentTo(&decommitSegments);
- pageToDecommit -= pageDecommitted;
- // Back-off from decommit if we are trying to enter IdleDecommit again.
- if (this->waitingToEnterIdleDecommit)
- {
- break;
- }
- }
- }
- Assert(pageToDecommit == 0 || this->waitingToEnterIdleDecommit);
- #if DBG
- if (pageToDecommit != 0 && this->waitingToEnterIdleDecommit)
- {
- this->idleDecommitBackOffCount++;
- }
- #endif
- #if DBG_DUMP
- Assert(this->freePageCount == newFreePageCount + decommitCount + pageToDecommit);
- #endif
- // If we had to back-off from decommiting then we may still have some free pages left to decommit.
- this->freePageCount = newFreePageCount + pageToDecommit;
- #ifdef ENABLE_BASIC_TELEMETRY
- if (this->decommitStats != nullptr)
- {
- this->decommitStats->numPagesDecommitted += decommitCount;
- this->decommitStats->numFreePageCount += newFreePageCount + pageToDecommit;
- }
- #endif
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- Check();
- #endif
- #if DBG_DUMP
- this->decommitPageCount += (decommitCount - deleteCount);
- if (CUSTOM_PHASE_TRACE1(this->pageAllocatorFlagTable, Js::PageAllocatorPhase))
- {
- if (CUSTOM_PHASE_STATS1(this->pageAllocatorFlagTable, Js::PageAllocatorPhase))
- {
- Output::Print(_u(" After decommit now:\n"));
- this->DumpStats();
- }
- Output::Flush();
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddReservedBytes(size_t bytes)
- {
- reservedBytes += bytes;
- #ifdef PERF_COUNTERS
- GetReservedSizeCounter() += bytes;
- GetTotalReservedSizeCounter() += bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SubReservedBytes(size_t bytes)
- {
- reservedBytes -= bytes;
- #ifdef PERF_COUNTERS
- GetReservedSizeCounter() -= bytes;
- GetTotalReservedSizeCounter() -= bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddCommittedBytes(size_t bytes)
- {
- committedBytes += bytes;
- #ifdef PERF_COUNTERS
- GetCommittedSizeCounter() += bytes;
- GetTotalCommittedSizeCounter() += bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SubCommittedBytes(size_t bytes)
- {
- committedBytes -= bytes;
- #ifdef PERF_COUNTERS
- GetCommittedSizeCounter() -= bytes;
- GetTotalCommittedSizeCounter() -= bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddUsedBytes(size_t bytes)
- {
- usedBytes += bytes;
- #if defined(TARGET_64)
- size_t lastTotalUsedBytes = ::InterlockedExchangeAdd64((volatile LONG64 *)&totalUsedBytes, bytes);
- #else
- DWORD lastTotalUsedBytes = ::InterlockedExchangeAdd(&totalUsedBytes, bytes);
- #endif
- if (totalUsedBytes > maxUsedBytes)
- {
- maxUsedBytes = totalUsedBytes;
- }
- // ETW events from different threads may be reported out of order, producing an
- // incorrect representation of current used bytes in the process. We've determined that this is an
- // acceptable issue, which will be mitigated at the level of the application consuming the event.
- JS_ETW(EventWriteJSCRIPT_PAGE_ALLOCATOR_USED_SIZE(lastTotalUsedBytes + bytes));
- #ifndef ENABLE_JS_ETW
- Unused(lastTotalUsedBytes);
- #endif
- #ifdef PERF_COUNTERS
- GetUsedSizeCounter() += bytes;
- GetTotalUsedSizeCounter() += bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SubUsedBytes(size_t bytes)
- {
- Assert(bytes <= usedBytes);
- Assert(bytes <= totalUsedBytes);
- usedBytes -= bytes;
- #if defined(TARGET_64)
- size_t lastTotalUsedBytes = ::InterlockedExchangeAdd64((volatile LONG64 *)&totalUsedBytes, -(LONG64)bytes);
- #else
- DWORD lastTotalUsedBytes = ::InterlockedExchangeSubtract(&totalUsedBytes, bytes);
- #endif
- // ETW events from different threads may be reported out of order, producing an
- // incorrect representation of current used bytes in the process. We've determined that this is an
- // acceptable issue, which will be mitigated at the level of the application consuming the event.
- JS_ETW(EventWriteJSCRIPT_PAGE_ALLOCATOR_USED_SIZE(lastTotalUsedBytes - bytes));
- #ifndef ENABLE_JS_ETW
- Unused(lastTotalUsedBytes);
- #endif
- #ifdef PERF_COUNTERS
- GetUsedSizeCounter() -= bytes;
- GetTotalUsedSizeCounter() -= bytes;
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddNumberOfSegments(size_t segmentCount)
- {
- numberOfSegments += segmentCount;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SubNumberOfSegments(size_t segmentCount)
- {
- numberOfSegments -= segmentCount;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::IntegrateSegments(DListBase<TPageSegment>& segmentList, uint segmentCount, size_t pageCount)
- {
- #if DBG
- size_t debugPageCount = 0;
- uint debugSegmentCount = 0;
- typename DListBase<TPageSegment>::Iterator i(&segmentList);
- while (i.Next())
- {
- Assert(i.Data().GetAllocator() == this);
- debugSegmentCount++;
- debugPageCount += i.Data().GetPageCount();
- }
- Assert(debugSegmentCount == segmentCount);
- Assert(debugPageCount == pageCount);
- #endif
- LogAllocSegment(segmentCount, pageCount);
- LogAllocPages(pageCount);
- this->SuspendIdleDecommit();
- segmentList.MoveTo(&this->fullSegments);
- this->ResumeIdleDecommit();
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogAllocSegment(TSegment * segment)
- {
- LogAllocSegment(1, segment->GetPageCount());
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogAllocSegment(uint segmentCount, size_t pageCount)
- {
- size_t bytes = pageCount * AutoSystemInfo::PageSize;
- AddReservedBytes(bytes);
- AddCommittedBytes(bytes);
- AddNumberOfSegments(segmentCount);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->allocSegmentCount += segmentCount;
- this->memoryData->allocSegmentBytes += pageCount * AutoSystemInfo::PageSize;
- this->memoryData->currentCommittedPageCount += pageCount;
- this->memoryData->peakCommittedPageCount = max(this->memoryData->peakCommittedPageCount, this->memoryData->currentCommittedPageCount);
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogFreeSegment(TSegment * segment)
- {
- size_t bytes = segment->GetPageCount() * AutoSystemInfo::PageSize;
- SubCommittedBytes(bytes);
- SubReservedBytes(bytes);
- SubNumberOfSegments(1);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->releaseSegmentCount++;
- this->memoryData->releaseSegmentBytes += segment->GetPageCount() * AutoSystemInfo::PageSize;
- this->memoryData->currentCommittedPageCount -= segment->GetPageCount();
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogFreeDecommittedSegment(TSegment * segment)
- {
- SubReservedBytes(segment->GetPageCount() * AutoSystemInfo::PageSize);
- SubNumberOfSegments(1);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->releaseSegmentCount++;
- this->memoryData->releaseSegmentBytes += segment->GetPageCount() * AutoSystemInfo::PageSize;
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogFreePages(size_t pageCount)
- {
- SubUsedBytes(pageCount * AutoSystemInfo::PageSize);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->releasePageCount += pageCount;
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogFreePartiallyDecommittedPageSegment(TPageSegment * pageSegment)
- {
- AddCommittedBytes(pageSegment->GetDecommitPageCount() * AutoSystemInfo::PageSize);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->currentCommittedPageCount += pageSegment->GetDecommitPageCount();
- }
- #endif
- LogFreeSegment(pageSegment);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogAllocPages(size_t pageCount)
- {
- AddUsedBytes(pageCount * AutoSystemInfo::PageSize);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->allocPageCount += pageCount;
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogRecommitPages(size_t pageCount)
- {
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->recommitPageCount += pageCount;
- }
- #endif
- LogCommitPages(pageCount);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogCommitPages(size_t pageCount)
- {
- AddCommittedBytes(pageCount * AutoSystemInfo::PageSize);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->currentCommittedPageCount += pageCount;
- this->memoryData->peakCommittedPageCount = max(this->memoryData->peakCommittedPageCount, this->memoryData->currentCommittedPageCount);
- }
- #endif
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::LogDecommitPages(size_t pageCount)
- {
- SubCommittedBytes(pageCount * AutoSystemInfo::PageSize);
- #ifdef PROFILE_MEM
- if (this->memoryData)
- {
- this->memoryData->decommitPageCount += pageCount;
- this->memoryData->currentCommittedPageCount -= pageCount;
- }
- #endif
- }
- #if DBG_DUMP
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::DumpStats() const
- {
- Output::Print(_u(" Full/Partial/Empty/Decommit/Large Segments: %4d %4d %4d %4d %4d\n"),
- fullSegments.Count(), segments.Count(), emptySegments.Count(), decommitSegments.Count(), largeSegments.Count());
- Output::Print(_u(" Free/Decommit/Min Free Pages : %4d %4d %4d\n"),
- this->freePageCount, this->decommitPageCount, this->minFreePageCount);
- }
- #endif
- #if DBG
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- void
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::Check()
- {
- #if ENABLE_BACKGROUND_PAGE_ZEROING
- if (CONFIG_FLAG(EnableBGFreeZero))
- {
- // We may have backed-off from the idle decommit on the background thread.
- Assert(!this->HasZeroQueuedPages() || this->waitingToEnterIdleDecommit);
- }
- #endif
- size_t currentFreePageCount = 0;
- typename DListBase<TPageSegment>::Iterator segmentsIterator(&segments);
- while (segmentsIterator.Next())
- {
- currentFreePageCount += segmentsIterator.Data().GetFreePageCount();
- }
- typename DListBase<TPageSegment>::Iterator fullSegmentsIterator(&fullSegments);
- while (fullSegmentsIterator.Next())
- {
- currentFreePageCount += fullSegmentsIterator.Data().GetFreePageCount();
- }
- typename DListBase<TPageSegment>::Iterator emptySegmentsIterator(&emptySegments);
- while (emptySegmentsIterator.Next())
- {
- currentFreePageCount += emptySegmentsIterator.Data().GetFreePageCount();
- }
- typename DListBase<TPageSegment>::Iterator decommitSegmentsIterator(&decommitSegments);
- while (decommitSegmentsIterator.Next())
- {
- currentFreePageCount += decommitSegmentsIterator.Data().GetFreePageCount();
- }
- Assert(freePageCount == currentFreePageCount);
- }
- #endif
- template<typename T>
- HeapPageAllocator<T>::HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages, T * virtualAllocator, HANDLE processHandle) :
- PageAllocatorBase<T>(policyManager,
- Js::Configuration::Global.flags,
- PageAllocatorType_CustomHeap,
- /*maxFreePageCount*/ 0,
- /*zeroPages*/ false,
- #if ENABLE_BACKGROUND_PAGE_FREEING || ENABLE_BACKGROUND_PAGE_ZEROING
- /*zeroPageQueue*/ nullptr,
- #endif
- /*maxAllocPageCount*/ allocXdata ? (Base::DefaultMaxAllocPageCount - XDATA_RESERVE_PAGE_COUNT) : Base::DefaultMaxAllocPageCount,
- /*secondaryAllocPageCount=*/ allocXdata ? XDATA_RESERVE_PAGE_COUNT : 0,
- /*stopAllocationOnOutOfMemory*/ false,
- excludeGuardPages,
- processHandle),
- allocXdata(allocXdata)
- {
- this->InitVirtualAllocator(virtualAllocator);
- }
- template<typename T>
- void
- HeapPageAllocator<T>::ReleaseDecommitted(void * address, size_t pageCount, __in void * segmentParam)
- {
- SegmentBase<T> * segment = (SegmentBase<T>*) segmentParam;
- if (pageCount > this->maxAllocPageCount)
- {
- Assert(address == segment->GetAddress());
- Assert(pageCount == segment->GetAvailablePageCount());
- this->ReleaseDecommittedSegment(segment);
- }
- else
- {
- Assert(pageCount <= UINT_MAX);
- this->TrackDecommittedPages(address, (uint)pageCount, (PageSegment *)segment);
- }
- }
- template<typename T>
- void
- HeapPageAllocator<T>::ReleaseDecommittedSegment(__in SegmentBase<T>* segment)
- {
- ASSERT_THREAD();
- this->LogFreeDecommittedSegment(segment);
- this->largeSegments.RemoveElement(&NoThrowNoMemProtectHeapAllocator::Instance, segment);
- }
- // decommit the page but don't release it
- template<typename T>
- void
- HeapPageAllocator<T>::DecommitPages(__in char* address, size_t pageCount /* = 1 */)
- {
- Assert(pageCount <= MAXUINT32);
- #pragma prefast(suppress:__WARNING_WIN32UNRELEASEDVADS, "The remainder of the clean-up is done later.");
- this->GetVirtualAllocator()->Free(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
- this->LogFreePages(pageCount);
- this->LogDecommitPages(pageCount);
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- template <typename T>
- void PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::ReleaseSegmentList(DListBase<T> * segmentList)
- {
- segmentList->Clear(&NoThrowNoMemProtectHeapAllocator::Instance);
- }
- template<typename T>
- BOOL
- HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in void* segmentParam, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
- {
- SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
- #if DBG
- Assert(address >= segment->GetAddress());
- Assert(((uint)(((char *)address) - segment->GetAddress()) <= (segment->GetPageCount() - pageCount) * AutoSystemInfo::PageSize));
- if (this->IsPageSegment(segment))
- {
- PageSegmentBase<T> * pageSegment = static_cast<PageSegmentBase<T>*>(segment);
- AssertMsg(pageCount <= MAXUINT32, "PageSegment should always be smaller than 4G pages");
- Assert(!pageSegment->IsFreeOrDecommitted(address, static_cast<uint>(pageCount)));
- }
- #endif
- // check address alignment, and that the address is in correct range
- if (((uintptr_t)address & (AutoSystemInfo::PageSize - 1)) != 0
- || address < segment->GetAddress()
- || ((uint)(((char *)address) - segment->GetAddress()) > (segment->GetPageCount() - pageCount) * AutoSystemInfo::PageSize))
- {
- // OOPJIT TODO: don't bring down the whole JIT process
- CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
- return FALSE;
- }
- // OOP JIT page protection is immutable
- if (this->processHandle != GetCurrentProcess())
- {
- return TRUE;
- }
- MEMORY_BASIC_INFORMATION memBasicInfo;
- // check old protection on all pages about to change, ensure the fidelity
- size_t bytes = VirtualQuery(address, &memBasicInfo, sizeof(memBasicInfo));
- if (bytes == 0)
- {
- MemoryOperationLastError::RecordLastError();
- }
- if (bytes == 0
- || memBasicInfo.RegionSize < pageCount * AutoSystemInfo::PageSize
- || desiredOldProtectFlag != memBasicInfo.Protect)
- {
- CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
- return FALSE;
- }
- /*Verify if we always pass the PAGE_TARGETS_NO_UPDATE flag, if the protect flag is EXECUTE*/
- #if defined(_CONTROL_FLOW_GUARD)
- if (AutoSystemInfo::Data.IsCFGEnabled() &&
- (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) &&
- ((dwVirtualProtectFlags & PAGE_TARGETS_NO_UPDATE) == 0))
- {
- CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
- return FALSE;
- }
- #endif
- #if defined(ENABLE_JIT_CLAMP)
- bool makeExecutable = (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) ? true : false;
- AutoEnableDynamicCodeGen enableCodeGen(makeExecutable);
- #endif
- #if DBG_DUMP || defined(RECYCLER_TRACE)
- if (this->pageAllocatorFlagTable.IsEnabled(Js::TraceProtectPagesFlag))
- {
- Output::Print(_u("VirtualProtect(0x%p, %d, %d, %d)\n"), address, pageCount, pageCount * AutoSystemInfo::PageSize, dwVirtualProtectFlags);
- }
- #endif
- DWORD oldProtect; // this is only for first page
- BOOL retVal = VirtualProtect(address, pageCount * AutoSystemInfo::PageSize, dwVirtualProtectFlags, &oldProtect);
- if (retVal == FALSE)
- {
- CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
- }
- else
- {
- Assert(oldProtect == desiredOldProtectFlag);
- }
- return retVal;
- }
- template<typename T>
- void
- HeapPageAllocator<T>::TrackDecommittedPages(void * address, uint pageCount, __in void* segmentParam)
- {
- PageSegmentBase<T> * segment = (PageSegmentBase<T>*)segmentParam;
- ASSERT_THREAD();
- Assert(!this->HasMultiThreadAccess());
- Assert(pageCount <= this->maxAllocPageCount);
- DListBase<PageSegmentBase<T>> * fromSegmentList = this->GetSegmentList(segment);
- // Update the state of the segment with the decommitted pages
- segment->template DecommitPages<true>(address, pageCount);
- // Move the segment to its appropriate list
- this->TransferSegment(segment, fromSegmentList);
- }
- template<typename T>
- bool HeapPageAllocator<T>::AllocSecondary(void* segmentParam, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
- {
- SegmentBase<T> * segment = (SegmentBase<T> *)segmentParam;
- Assert(segment->GetSecondaryAllocator());
- bool success;
- if (this->IsPageSegment(segment))
- {
- PageSegmentBase<T>* pageSegment = static_cast<PageSegmentBase<T>*>(segment);
- // We should get the segment list BEFORE xdata allocation happens.
- DListBase<PageSegmentBase<T>> * fromSegmentList = this->GetSegmentList(pageSegment);
- success = segment->GetSecondaryAllocator()->Alloc(functionStart, functionSize, pdataCount, xdataSize, allocation);
- // If no more XDATA allocations can take place.
- if (success && !pageSegment->CanAllocSecondary() && fromSegmentList != &this->fullSegments)
- {
- AssertMsg(this->GetSegmentList(pageSegment) == &this->fullSegments, "This segment should now be in the full list if it can't allocate secondary");
- OUTPUT_TRACE(Js::EmitterPhase, _u("XDATA Wasted pages:%u\n"), pageSegment->GetFreePageCount());
- this->freePageCount -= pageSegment->GetFreePageCount();
- fromSegmentList->MoveElementTo(pageSegment, &this->fullSegments);
- #if DBG
- UpdateMinimum(this->debugMinFreePageCount, this->freePageCount);
- #endif
- }
- }
- else
- {
- // A large segment should always be able to do secondary allocations
- Assert(segment->CanAllocSecondary());
- success = segment->GetSecondaryAllocator()->Alloc(functionStart, functionSize, pdataCount, xdataSize, allocation);
- }
- #ifdef _M_X64
- // In ARM it's OK to have xdata size be 0
- AssertMsg(allocation->address != nullptr, "All segments that cannot allocate xdata should have been already moved to full segments list");
- #endif
- return success;
- }
- template<typename T>
- bool HeapPageAllocator<T>::ReleaseSecondary(const SecondaryAllocation& allocation, void* segmentParam)
- {
- SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
- Assert(allocation.address != nullptr);
- Assert(segment->GetSecondaryAllocator());
- if (this->IsPageSegment(segment))
- {
- PageSegmentBase<T>* pageSegment = static_cast<PageSegmentBase<T>*>(segment);
- auto fromList = this->GetSegmentList(pageSegment);
- pageSegment->GetSecondaryAllocator()->Release(allocation);
- auto toList = this->GetSegmentList(pageSegment);
- if (fromList != toList)
- {
- OUTPUT_TRACE(Js::EmitterPhase, _u("XDATA reclaimed pages:%u\n"), pageSegment->GetFreePageCount());
- fromList->MoveElementTo(pageSegment, toList);
- AssertMsg(fromList == &this->fullSegments, "Releasing a secondary allocator should make a state change only if the segment was originally in the full list");
- AssertMsg(pageSegment->CanAllocSecondary(), "It should be allocate secondary now");
- this->AddFreePageCount(pageSegment->GetFreePageCount());
- return true;
- }
- }
- else
- {
- Assert(segment->CanAllocSecondary());
- segment->GetSecondaryAllocator()->Release(allocation);
- }
- return false;
- }
- template<typename T>
- bool
- HeapPageAllocator<T>::IsAddressFromAllocator(__in void* address)
- {
- typename DListBase<PageSegmentBase<T>>::Iterator segmentsIterator(&this->segments);
- while (segmentsIterator.Next())
- {
- if (this->IsAddressInSegment(address, segmentsIterator.Data()))
- {
- return true;
- }
- }
- typename DListBase<PageSegmentBase<T>>::Iterator fullSegmentsIterator(&this->fullSegments);
- while (fullSegmentsIterator.Next())
- {
- if (this->IsAddressInSegment(address, fullSegmentsIterator.Data()))
- {
- return true;
- }
- }
- typename DListBase<SegmentBase<T>>::Iterator largeSegmentsIterator(&this->largeSegments);
- while (largeSegmentsIterator.Next())
- {
- if (this->IsAddressInSegment(address, largeSegmentsIterator.Data()))
- {
- return true;
- }
- }
- typename DListBase<PageSegmentBase<T>>::Iterator decommitSegmentsIterator(&this->decommitSegments);
- while (decommitSegmentsIterator.Next())
- {
- if (this->IsAddressInSegment(address, decommitSegmentsIterator.Data()))
- {
- return true;
- }
- }
- return false;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- bool
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::IsAddressInSegment(__in void* address, const TPageSegment& segment)
- {
- bool inSegment = this->IsAddressInSegment(address, static_cast<const TSegment&>(segment));
- if (inSegment)
- {
- return !segment.IsFreeOrDecommitted(address);
- }
- return inSegment;
- }
- template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
- bool
- PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::IsAddressInSegment(__in void* address, const TSegment& segment)
- {
- return segment.IsInSegment(address);
- }
- #if PDATA_ENABLED
- #include "Memory/XDataAllocator.h"
- template<typename T>
- bool HeapPageAllocator<T>::CreateSecondaryAllocator(SegmentBase<T>* segment, bool committed, SecondaryAllocator** allocator)
- {
- Assert(segment->GetAllocator() == this);
- Assert(segment->IsInCustomHeapAllocator());
- // If we are not allocating xdata there is nothing to do
- // ARM might allocate XDATA but not have a reserved region for it (no secondary alloc reserved space)
- if (!allocXdata)
- {
- Assert(segment->GetSecondaryAllocSize() == 0);
- *allocator = nullptr;
- return true;
- }
- if (!committed && segment->GetSecondaryAllocSize() != 0 &&
- !this->GetVirtualAllocator()->AllocPages(segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocPageCount(), MEM_COMMIT, PAGE_READWRITE, true))
- {
- *allocator = nullptr;
- return false;
- }
- XDataAllocator* secondaryAllocator = HeapNewNoThrow(XDataAllocator, (BYTE*)segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocSize());
- bool success = false;
- if (secondaryAllocator)
- {
- if (secondaryAllocator->Initialize((BYTE*)segment->GetAddress(), (BYTE*)segment->GetEndAddress()))
- {
- success = true;
- }
- else
- {
- HeapDelete(secondaryAllocator);
- secondaryAllocator = nullptr;
- }
- }
- *allocator = secondaryAllocator;
- return success;
- }
- #endif
- template<typename T>
- uint PageSegmentBase<T>::GetCountOfFreePages() const
- {
- return this->freePages.Count();
- }
- template<typename T>
- uint PageSegmentBase<T>::GetNextBitInFreePagesBitVector(uint index) const
- {
- return this->freePages.GetNextBit(index);
- }
- template<typename T>
- BOOLEAN PageSegmentBase<T>::TestRangeInFreePagesBitVector(uint index, uint pageCount) const
- {
- return this->freePages.TestRange(index, pageCount);
- }
- template<typename T>
- BOOLEAN PageSegmentBase<T>::TestInFreePagesBitVector(uint index) const
- {
- return this->freePages.Test(index);
- }
- template<typename T>
- void PageSegmentBase<T>::ClearAllInFreePagesBitVector()
- {
- return this->freePages.ClearAll();
- }
- template<typename T>
- void PageSegmentBase<T>::ClearRangeInFreePagesBitVector(uint index, uint pageCount)
- {
- return this->freePages.ClearRange(index, pageCount);
- }
- template<typename T>
- void PageSegmentBase<T>::SetRangeInFreePagesBitVector(uint index, uint pageCount)
- {
- return this->freePages.SetRange(index, pageCount);
- }
- template<typename T>
- void PageSegmentBase<T>::ClearBitInFreePagesBitVector(uint index)
- {
- return this->freePages.Clear(index);
- }
- template<typename T>
- BOOLEAN PageSegmentBase<T>::TestInDecommitPagesBitVector(uint index) const
- {
- return this->decommitPages.Test(index);
- }
- template<typename T>
- BOOLEAN PageSegmentBase<T>::TestRangeInDecommitPagesBitVector(uint index, uint pageCount) const
- {
- return this->decommitPages.TestRange(index, pageCount);
- }
- template<typename T>
- void PageSegmentBase<T>::SetRangeInDecommitPagesBitVector(uint index, uint pageCount)
- {
- return this->decommitPages.SetRange(index, pageCount);
- }
- template<typename T>
- void PageSegmentBase<T>::ClearRangeInDecommitPagesBitVector(uint index, uint pageCount)
- {
- return this->decommitPages.ClearRange(index, pageCount);
- }
- template<typename T>
- uint PageSegmentBase<T>::GetCountOfDecommitPages() const
- {
- return this->decommitPages.Count();
- }
- template<typename T>
- void PageSegmentBase<T>::SetBitInDecommitPagesBitVector(uint index)
- {
- this->decommitPages.Set(index);
- }
- template<typename T>
- template <bool noPageAligned>
- char * PageSegmentBase<T>::DoAllocDecommitPages(uint pageCount)
- {
- return this->AllocDecommitPages<PageSegmentBase<T>::PageBitVector, noPageAligned>(pageCount, this->freePages, this->decommitPages);
- }
- template<typename T>
- uint PageSegmentBase<T>::GetMaxPageCount()
- {
- return MaxPageCount;
- }
- namespace Memory
- {
- //Instantiate all the Templates in this class below.
-
- template class PageAllocatorBase < VirtualAllocWrapper >;
- template class HeapPageAllocator < VirtualAllocWrapper >;
- template class SegmentBase < VirtualAllocWrapper >;
- template class PageSegmentBase < VirtualAllocWrapper >;
- #if ENABLE_NATIVE_CODEGEN
- template class PageAllocatorBase < PreReservedVirtualAllocWrapper >;
- template class HeapPageAllocator < PreReservedVirtualAllocWrapper >;
- template class SegmentBase < PreReservedVirtualAllocWrapper >;
- template class PageSegmentBase < PreReservedVirtualAllocWrapper >;
- #endif
-
- #if ENABLE_OOP_NATIVE_CODEGEN
- template class PageAllocatorBase < SectionAllocWrapper >;
- template class PageAllocatorBase < PreReservedSectionAllocWrapper >;
- template class HeapPageAllocator < SectionAllocWrapper >;
- template class HeapPageAllocator < PreReservedSectionAllocWrapper >;
- template class SegmentBase < SectionAllocWrapper >;
- template class SegmentBase < PreReservedSectionAllocWrapper >;
- template class PageSegmentBase < SectionAllocWrapper >;
- template class PageSegmentBase < PreReservedSectionAllocWrapper >;
- #endif
- }
|