فهرست منبع

[MERGE #368] Improve JIT code page allocator virtual address use and reduced standing free page.

Merge pull request #368 from curtisman:page
Share CodePageAllocator within a thread by moving it into ThreadContext.
This reduce the amount of virtual address we reserved, and tighter usage of the prereserved segment.

Also commit only page requested instead of the whole segment if the maxFreePageCount will be exceeded.
When we don't have a lot of page used within a page allocator, we waste about ~100K per page allocator,
because we commit the whole segment (128K), and only use a couple of page. So in general, we wastes
about 500K-1M per thread depending on how many script context is in the thread context.

Refactor the code that manages HeapPageAllocators in CustomHeap into CodePageAllocator.
Rationalize the locking and IsNativeAddress checks
Reimplement IsInRange as IsInHeap to work the heap list instead of the page allocator, instead of
relying on the page allocator, which is now shared.
Curtis Man 10 سال پیش
والد
کامیت
783d034e10

+ 9 - 6
lib/Backend/CodeGenAllocators.cpp

@@ -10,18 +10,21 @@ CodeGenAllocators::CodeGenAllocators(AllocationPolicyManager * policyManager, Js
                  PageAllocator::DefaultLowMaxFreePageCount :
                  PageAllocator::DefaultMaxFreePageCount))
 , allocator(L"NativeCode", &pageAllocator, Js::Throw::OutOfMemory)
-, emitBufferManager(policyManager, &allocator, scriptContext, L"JIT code buffer", ALLOC_XDATA)
+, emitBufferManager(&allocator, scriptContext->GetThreadContext()->GetCodePageAllocators(), scriptContext, L"JIT code buffer")
 #if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
 , canCreatePreReservedSegment(false)
 #endif
-#ifdef PERF_COUNTERS
-, staticNativeCodeData(0)
-#endif
 {
 }
 
 CodeGenAllocators::~CodeGenAllocators()
 {
-    PERF_COUNTER_SUB(Code, StaticNativeCodeDataSize, staticNativeCodeData);
-    PERF_COUNTER_SUB(Code, TotalNativeCodeDataSize, staticNativeCodeData);
 }
+
+#if DBG
+void
+CodeGenAllocators::ClearConcurrentThreadId()
+{    
+    this->pageAllocator.ClearConcurrentThreadId();
+}
+#endif

+ 9 - 14
lib/Backend/CodeGenAllocators.h

@@ -4,27 +4,22 @@
 //-------------------------------------------------------------------------------------------------------
 #pragma once
 
-#if PDATA_ENABLED
-#define ALLOC_XDATA (true)
-#else
-#define ALLOC_XDATA (false)
-#endif
-
-struct CodeGenAllocators
+class CodeGenAllocators
 {
     // emitBufferManager depends on allocator which in turn depends on pageAllocator, make sure the sequence is right
+private:
     PageAllocator pageAllocator;
-    NoRecoverMemoryArenaAllocator    allocator;
+    NoRecoverMemoryArenaAllocator  allocator;
+public:
     EmitBufferManager<CriticalSection> emitBufferManager;
 #if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
     bool canCreatePreReservedSegment;
 #endif
 
-#ifdef PERF_COUNTERS
-    size_t staticNativeCodeData;
-#endif
-
-    CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext);
-    PageAllocator *GetPageAllocator() { return &pageAllocator; };
+    CodeGenAllocators(AllocationPolicyManager * policyManager, Js::ScriptContext * scriptContext);    
     ~CodeGenAllocators();
+
+#if DBG
+    void ClearConcurrentThreadId();
+#endif
 };

+ 2 - 2
lib/Backend/CodeGenNumberAllocator.cpp

@@ -106,7 +106,7 @@ CodeGenNumberThreadAllocator::AllocNewNumberBlock()
     {
         Assert(cs.IsLocked());
         // Reserve the segment, but not committing it
-        currentNumberSegment = PageAllocator::AllocPageSegment(pendingIntegrationNumberSegment, this->recycler->GetRecyclerLeafPageAllocator(), true);
+        currentNumberSegment = PageAllocator::AllocPageSegment(pendingIntegrationNumberSegment, this->recycler->GetRecyclerLeafPageAllocator(), false, true);
         if (currentNumberSegment == nullptr)
         {
             currentNumberBlockEnd = nullptr;
@@ -153,7 +153,7 @@ CodeGenNumberThreadAllocator::AllocNewChunkBlock()
     {
         Assert(cs.IsLocked());
         // Reserve the segment, but not committing it
-        currentChunkSegment = PageAllocator::AllocPageSegment(pendingIntegrationChunkSegment, this->recycler->GetRecyclerPageAllocator(), true);
+        currentChunkSegment = PageAllocator::AllocPageSegment(pendingIntegrationChunkSegment, this->recycler->GetRecyclerPageAllocator(), false, true);
         if (currentChunkSegment == nullptr)
         {
             currentChunkBlockEnd = nullptr;

+ 10 - 12
lib/Backend/EmitBuffer.cpp

@@ -12,9 +12,9 @@ template class EmitBufferManager<CriticalSection>;
 //      Constructor
 //----------------------------------------------------------------------------
 template <typename SyncObject>
-EmitBufferManager<SyncObject>::EmitBufferManager(AllocationPolicyManager * policyManager, ArenaAllocator * allocator,
-    Js::ScriptContext * scriptContext, LPCWSTR name, bool allocXdata) :
-    allocationHeap(policyManager, allocator, allocXdata),
+EmitBufferManager<SyncObject>::EmitBufferManager(ArenaAllocator * allocator, CustomHeap::CodePageAllocators * codePageAllocators,
+    Js::ScriptContext * scriptContext, LPCWSTR name) :
+    allocationHeap(allocator, codePageAllocators),
     allocator(allocator),
     allocations(nullptr),
     scriptContext(scriptContext)
@@ -100,6 +100,13 @@ EmitBufferManager<SyncObject>::FreeAllocations(bool release)
 
 }
 
+template <typename SyncObject>
+bool EmitBufferManager<SyncObject>::IsInHeap(__in void* address)
+{
+    AutoRealOrFakeCriticalSection<SyncObject> autocs(&this->criticalSection);
+    return this->allocationHeap.IsInHeap(address);
+}
+
 class AutoCustomHeapPointer
 {
 public:
@@ -142,15 +149,6 @@ EmitBufferManager<SyncObject>::NewAllocation(size_t bytes, ushort pdataCount, us
 
     Assert(this->criticalSection.IsLocked());
 
-    PreReservedVirtualAllocWrapper  * preReservedVirtualAllocator = nullptr;
-
-    if (canAllocInPreReservedHeapPageSegment)
-    {
-        Assert(scriptContext && scriptContext->GetThreadContext());
-        preReservedVirtualAllocator = this->scriptContext->GetThreadContext()->GetPreReservedVirtualAllocator();
-        this->EnsurePreReservedPageAllocation(preReservedVirtualAllocator);
-    }
-
     bool isAllJITCodeInPreReservedRegion = true;
     CustomHeap::Allocation* heapAllocation = this->allocationHeap.Alloc(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, &isAllJITCodeInPreReservedRegion);
 

+ 2 - 53
lib/Backend/EmitBuffer.h

@@ -30,7 +30,7 @@ template <class SyncObject = FakeCriticalSection>
 class EmitBufferManager
 {
 public:
-    EmitBufferManager(AllocationPolicyManager * policyManager, ArenaAllocator * allocator, Js::ScriptContext * scriptContext, LPCWSTR name, bool allocXdata);
+    EmitBufferManager(ArenaAllocator * allocator, CustomHeap::CodePageAllocators * codePageAllocators, Js::ScriptContext * scriptContext, LPCWSTR name);
     ~EmitBufferManager();
 
     // All the following methods are guarded with the SyncObject
@@ -45,58 +45,7 @@ public:
     bool FreeAllocation(void* address);
     //Ends here
 
-    bool IsInRange(void* address)
-    {
-        return this->allocationHeap.IsInRange(address);
-    }
-
-    HeapPageAllocator<VirtualAllocWrapper>* GetHeapPageAllocator()
-    {
-        return this->allocationHeap.GetHeapPageAllocator();
-    }
-
-    HeapPageAllocator<PreReservedVirtualAllocWrapper>* GetPreReservedHeapPageAllocator()
-    {
-        return this->allocationHeap.GetPreReservedHeapPageAllocator();
-    }
-
-    char * EnsurePreReservedPageAllocation(PreReservedVirtualAllocWrapper * preReservedVirtualAllocator)
-    {
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-        bool canPreReserveSegmentForCustomHeap = scriptContext && scriptContext->GetThreadContext()->CanPreReserveSegmentForCustomHeap();
-#endif
-        AssertMsg(preReservedVirtualAllocator, "Virtual Allocator for pre reserved Segment should not be null when EnsurePreReservedPageAllocation is called");
-
-        if (this->GetPreReservedHeapPageAllocator()->GetVirtualAllocator() == nullptr)
-        {
-            this->GetPreReservedHeapPageAllocator()->SetVirtualAllocator(preReservedVirtualAllocator);
-        }
-
-        if (preReservedVirtualAllocator->IsPreReservedRegionPresent())
-        {
-            return (char*) preReservedVirtualAllocator->GetPreReservedStartAddress();
-        }
-
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-        if (!canPreReserveSegmentForCustomHeap)
-        {
-            VerboseHeapTrace(L"PRE-RESERVE: Upper Cap for PreReservedSegment reached.\n");
-            return nullptr;
-        }
-#endif
-        char * startAddressOfPreReservedRegion = this->allocationHeap.EnsurePreReservedPageAllocation(preReservedVirtualAllocator);
-
-        //We have newly reserved a segment at this point
-        if (startAddressOfPreReservedRegion != nullptr)
-        {
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-            Assert(canPreReserveSegmentForCustomHeap);
-            this->scriptContext->GetThreadContext()->IncrementThreadContextsWithPreReservedSegment();
-#endif
-        }
-
-        return startAddressOfPreReservedRegion;
-    }
+    bool IsInHeap(void* address);
 
 #if DBG_DUMP
     void DumpAndResetStats(wchar_t const * source);

+ 0 - 1
lib/Backend/Func.h

@@ -75,7 +75,6 @@ public:
         Js::RegSlot returnValueRegSlot = Js::Constants::NoRegister, const bool isInlinedConstructor = false,
         Js::ProfileId callSiteIdInParentFunc = UINT16_MAX, bool isGetterSetter = false);
 public:
-    ArenaAllocator *GetCodeGenAllocator() const { return &this->m_codeGenAllocators->allocator; }
     CodeGenAllocators * const GetCodeGenAllocators()
     {
         return this->GetTopFunc()->m_codeGenAllocators;

+ 2 - 2
lib/Backend/InterpreterThunkEmitter.cpp

@@ -189,8 +189,8 @@ const BYTE InterpreterThunkEmitter::HeaderSize = sizeof(InterpreterThunk);
 const BYTE InterpreterThunkEmitter::ThunkSize = sizeof(Call);
 const uint InterpreterThunkEmitter::ThunksPerBlock = (BlockSize - HeaderSize) / ThunkSize;
 
-InterpreterThunkEmitter::InterpreterThunkEmitter(AllocationPolicyManager * policyManager, ArenaAllocator* allocator, void * interpreterThunk) :
-    emitBufferManager(policyManager, allocator, /*scriptContext*/ nullptr, L"Interpreter thunk buffer", /*allocXdata*/ false),
+InterpreterThunkEmitter::InterpreterThunkEmitter(ArenaAllocator* allocator, CustomHeap::CodePageAllocators * codePageAllocators, void * interpreterThunk) :
+    emitBufferManager(allocator, codePageAllocators, /*scriptContext*/ nullptr, L"Interpreter thunk buffer"),
     allocation(nullptr),
     allocator(allocator),
     thunkCount(0),

+ 4 - 3
lib/Backend/InterpreterThunkEmitter.h

@@ -116,7 +116,7 @@ public:
     static const uint BlockSize;
     static void* ConvertToEntryPoint(PVOID dynamicInterpreterThunk);
 
-    InterpreterThunkEmitter(AllocationPolicyManager * policyManager, ArenaAllocator* allocator, void * interpreterThunk);
+    InterpreterThunkEmitter(ArenaAllocator* allocator, CustomHeap::CodePageAllocators * codePageAllocators, void * interpreterThunk);
 
     BYTE* GetNextThunk(PVOID* ppDynamicInterpreterThunk);
 
@@ -124,10 +124,11 @@ public:
 
     void Close();
     void Release(BYTE* thunkAddress, bool addtoFreeList);
+
     // Returns true if the argument falls within the range managed by this buffer.
-    inline bool IsInRange(void* address)
+    inline bool IsInHeap(void* address)
     {
-        return emitBufferManager.IsInRange(address);
+        return emitBufferManager.IsInHeap(address);
     }
     const EmitBufferManager<>* GetEmitBufferManager() const
     {

+ 1 - 1
lib/Backend/LowerMDShared.cpp

@@ -5820,7 +5820,7 @@ LowererMD::GenerateCFGCheck(IR::Opnd * entryPointOpnd, IR::Instr * insertBeforeI
     if (m_func->CanAllocInPreReservedHeapPageSegment())
     {
         PreReservedVirtualAllocWrapper * preReservedVirtualAllocator = m_func->GetScriptContext()->GetThreadContext()->GetPreReservedVirtualAllocator();
-        preReservedRegionStartAddress = m_func->GetEmitBufferManager()->EnsurePreReservedPageAllocation(preReservedVirtualAllocator);
+        preReservedRegionStartAddress = (char *)preReservedVirtualAllocator->EnsurePreReservedRegion();
         if (preReservedRegionStartAddress != nullptr)
         {
             Assert(preReservedVirtualAllocator);

+ 1 - 1
lib/Backend/NativeCodeData.h

@@ -9,7 +9,7 @@
 #define NativeCodeDataNewArray(alloc, T, count) AllocatorNewArray(NativeCodeData::Allocator, alloc, T, count)
 #define NativeCodeDataNewArrayZ(alloc, T, count) AllocatorNewArrayZ(NativeCodeData::Allocator, alloc, T, count)
 
-struct CodeGenAllocators;
+class CodeGenAllocators;
 
 class NativeCodeData
 {

+ 1 - 24
lib/Backend/NativeCodeGenerator.cpp

@@ -109,9 +109,7 @@ NativeCodeGenerator::~NativeCodeGenerator()
         // We have already removed this manager from the job queue and hence its fine to set the threadId to -1.
         // We can't DissociatePageAllocator here as its allocated ui thread.
         //this->Processor()->DissociatePageAllocator(allocator->GetPageAllocator());
-        this->backgroundAllocators->emitBufferManager.GetHeapPageAllocator()->ClearConcurrentThreadId();
-        this->backgroundAllocators->emitBufferManager.GetPreReservedHeapPageAllocator()->ClearConcurrentThreadId();
-        this->backgroundAllocators->GetPageAllocator()->ClearConcurrentThreadId();
+        this->backgroundAllocators->ClearConcurrentThreadId();
 #endif
         // The native code generator may be deleted after Close was called on the job processor. In that case, the
         // background thread is no longer running, so clean things up in the foreground.
@@ -2661,18 +2659,6 @@ NativeCodeGenerator::EnterScriptStart()
     Processor()->PrioritizeManagerAndWait(this, CONFIG_FLAG(BgJitDelay) - CONFIG_FLAG(BgJitDelayFgBuffer));
 }
 
-// Is the given address within one of our JIT'd frame?
-bool
-IsNativeFunctionAddr(Js::ScriptContext *scriptContext, void * address)
-{
-    if (!scriptContext->GetNativeCodeGenerator())
-    {
-        return false;
-    }
-
-    return scriptContext->GetNativeCodeGenerator()->IsNativeFunctionAddr(address);
-}
-
 void
 FreeNativeCodeGenAllocation(Js::ScriptContext *scriptContext, void * address)
 {
@@ -2711,15 +2697,6 @@ bool NativeCodeGenerator::TryReleaseNonHiPriWorkItem(CodeGenWorkItem* workItem)
     return true;
 }
 
-// Called on the same thread that did the allocation
-bool
-NativeCodeGenerator::IsNativeFunctionAddr(void * address)
-{
-    return
-        (this->backgroundAllocators && this->backgroundAllocators->emitBufferManager.IsInRange(address)) ||
-        (this->foregroundAllocators && this->foregroundAllocators->emitBufferManager.IsInRange(address));
-}
-
 void
 NativeCodeGenerator::FreeNativeCodeGenAllocation(void* address)
 {

+ 0 - 1
lib/Backend/NativeCodeGenerator.h

@@ -97,7 +97,6 @@ public:
     void UpdateQueueForDebugMode();
     bool IsBackgroundJIT() const;
     void EnterScriptStart();
-    bool IsNativeFunctionAddr(void * address);
     void FreeNativeCodeGenAllocation(void* address);
     bool TryReleaseNonHiPriWorkItem(CodeGenWorkItem* workItem);
 

+ 1 - 2
lib/Backend/Opnd.cpp

@@ -3356,8 +3356,7 @@ Opnd::GetAddrDescription(__out_ecount(count) wchar_t *const description, const s
                 WriteToBuffer(&buffer, &n, L" (&StackLimit)");
             }
             else if (func->CanAllocInPreReservedHeapPageSegment() &&
-                func->GetScriptContext()->GetThreadContext()->GetPreReservedVirtualAllocator()->IsPreReservedRegionPresent() &&
-                address == func->GetScriptContext()->GetThreadContext()->GetPreReservedVirtualAllocator()->GetPreReservedEndAddress())
+                func->GetScriptContext()->GetThreadContext()->GetPreReservedVirtualAllocator()->IsPreReservedEndAddress(address))
             {
                 WriteToBuffer(&buffer, &n, L" (PreReservedCodeSegmentEnd)");
             }

+ 0 - 1
lib/Common/BackendApi.h

@@ -57,7 +57,6 @@ void UpdateNativeCodeGeneratorForDebugMode(NativeCodeGenerator* nativeCodeGen);
 CriticalSection *GetNativeCodeGenCriticalSection(NativeCodeGenerator *pNativeCodeGen);
 bool TryReleaseNonHiPriWorkItem(Js::ScriptContext* scriptContext, CodeGenWorkItem* workItem);
 void NativeCodeGenEnterScriptStart(NativeCodeGenerator * nativeCodeGen);
-bool IsNativeFunctionAddr(Js::ScriptContext *scriptContext, void * address);
 void FreeNativeCodeGenAllocation(Js::ScriptContext* scriptContext, void* address);
 CodeGenAllocators* GetForegroundAllocator(NativeCodeGenerator * nativeCodeGen, PageAllocator* pageallocator);
 void GenerateFunction(NativeCodeGenerator * nativeCodeGen, Js::FunctionBody * functionBody, Js::ScriptFunction * function = NULL);

+ 91 - 111
lib/Common/Memory/CustomHeap.cpp

@@ -20,21 +20,18 @@ namespace CustomHeap
 
 #pragma region "Constructor and Destructor"
 
-Heap::Heap(AllocationPolicyManager * policyManager, ArenaAllocator * alloc, bool allocXdata):
+Heap::Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators):
     auxiliaryAllocator(alloc),
-    allocXdata(allocXdata),
+    codePageAllocators(codePageAllocators)
 #if DBG_DUMP
-    freeObjectSize(0),
-    totalAllocationSize(0),
-    allocationsSinceLastCompact(0),
-    freesSinceLastCompact(0),
+    , freeObjectSize(0)
+    , totalAllocationSize(0)
+    , allocationsSinceLastCompact(0)
+    , freesSinceLastCompact(0)
 #endif
 #if DBG
-    inDtor(false),
+    , inDtor(false)
 #endif
-    pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/),
-    preReservedHeapPageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/),
-    cs(4000)
 {
     for (int i = 0; i < NumBuckets; i++)
     {
@@ -55,6 +52,7 @@ Heap::~Heap()
 #pragma region "Public routines"
 void Heap::FreeAll()
 {
+    CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
     FreeBuckets(false);
 
     FreeLargeObjects();
@@ -132,7 +130,7 @@ bool Heap::Decommit(__in Allocation* object)
                 FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
             }
 #endif
-            this->DecommitPages(object->address, object->GetPageCount(), object->largeObjectAllocation.segment);
+            this->codePageAllocators->DecommitPages(object->address, object->GetPageCount(), object->largeObjectAllocation.segment);
             this->largeObjectAllocations.MoveElementTo(object, &this->decommittedLargeObjects);
             object->largeObjectAllocation.isDecommitted = true;
             return true;
@@ -153,7 +151,7 @@ bool Heap::Decommit(__in Allocation* object)
 #endif
         bucket = object->page->currentBucket;
 
-        this->DecommitPages(object->page->address, 1, object->page->segment);
+        this->codePageAllocators->DecommitPages(object->page->address, 1, object->page->segment);
 
         if (this->ShouldBeInFullList(object->page))
         {
@@ -169,11 +167,50 @@ bool Heap::Decommit(__in Allocation* object)
     return true;
 }
 
-bool Heap::IsInRange(__in void* address)
+bool Heap::IsInHeap(DListBase<Page> const& bucket, __in void * address)
 {
-    AutoCriticalSection autocs(&this->cs);
+    DListBase<Page>::Iterator i(&bucket);
+    while (i.Next())
+    {
+        Page& page = i.Data();
+        if (page.address <= address && address < page.address + AutoSystemInfo::PageSize)
+        {
+            return true;
+        }
+    }
+    return false;
+}
 
-    return (this->preReservedHeapPageAllocator.GetVirtualAllocator()->IsInRange(address) || this->pageAllocator.IsAddressFromAllocator(address));
+bool Heap::IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void * address)
+{
+    for (uint i = 0; i < NumBuckets; i++)
+    {
+        if (this->IsInHeap(buckets[i], address))
+        {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool Heap::IsInHeap(DListBase<Allocation> const& allocations, __in void *address)
+{
+    DListBase<Allocation>::Iterator i(&allocations);
+    while (i.Next())
+    {
+        Allocation& allocation = i.Data();
+        if (allocation.address <= address && address < allocation.address + allocation.size)
+        {
+            return true;
+        }
+    }
+    return false;
+}
+
+
+bool Heap::IsInHeap(__in void* address)
+{
+    return IsInHeap(buckets, address) || IsInHeap(fullPages, address) || IsInHeap(largeObjectAllocations, address);
 }
 
 /*
@@ -186,7 +223,7 @@ bool Heap::IsInRange(__in void* address)
 Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
 {
     Assert(bytes > 0);
-    Assert((allocXdata || pdataCount == 0) && (!allocXdata || pdataCount > 0));
+    Assert((codePageAllocators->AllocXdata() || pdataCount == 0) && (!codePageAllocators->AllocXdata() || pdataCount > 0));
     Assert(pdataCount > 0 || (pdataCount == 0 && xdataSize == 0));
 
     // Round up to power of two to allocate, and figure out which bucket to allocate in
@@ -310,7 +347,7 @@ BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtect
         }
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
+        return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
     else
     {
@@ -325,7 +362,7 @@ BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtect
         pageCount = 1;
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
+        return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
 }
 
@@ -349,20 +386,8 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
 #endif
 
     {
-        AutoCriticalSection autocs(&this->cs);
-        if (canAllocInPreReservedHeapPageSegment)
-        {
-            address = this->preReservedHeapPageAllocator.Alloc(&pages, (SegmentBase<PreReservedVirtualAllocWrapper>**)(&segment));
-        }
-
-        if (address == nullptr)
-        {
-            if (isAnyJittedCode)
-            {
-                *isAllJITCodeInPreReservedRegion = false;
-            }
-            address = this->pageAllocator.Alloc(&pages, (Segment**)&segment);
-        }
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        address = this->codePageAllocators->Alloc(&pages, &segment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
 
         // Out of memory
         if (address == nullptr)
@@ -380,15 +405,14 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
         {
             protectFlags = PAGE_EXECUTE;
         }
-        this->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
+        this->codePageAllocators->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
 
 #if PDATA_ENABLED
         if(pdataCount > 0)
         {
-            if (!this->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
+            if (!this->codePageAllocators->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
             {
-                AutoCriticalSection autocs(&this->cs);
-                this->Release(address, pages, segment);
+                this->codePageAllocators->Release(address, pages, segment);
                 return nullptr;
             }
         }
@@ -399,13 +423,13 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
     Allocation* allocation = this->largeObjectAllocations.PrependNode(this->auxiliaryAllocator);
     if (allocation == nullptr)
     {
-        AutoCriticalSection autocs(&this->cs);
-        this->Release(address, pages, segment);
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        this->codePageAllocators->Release(address, pages, segment);
 
 #if PDATA_ENABLED
         if(pdataCount > 0)
         {
-            this->ReleaseSecondary(xdata, segment);
+            this->codePageAllocators->ReleaseSecondary(xdata, segment);
         }
 #endif
         return nullptr;
@@ -438,13 +462,13 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
 
 void Heap::FreeDecommittedLargeObjects()
 {
-    // This is only call when the heap is being destroy, so don't need to sync with the background thread.
+    // CodePageAllocators is locked in FreeAll
     Assert(inDtor);
     FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->decommittedLargeObjects, largeObjectIter)
     {
         VerboseHeapTrace(L"Decommitting large object at address 0x%p of size %u\n", allocation.address, allocation.size);
 
-        this->ReleaseDecommitted(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
+        this->codePageAllocators->ReleaseDecommitted(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
 
         largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
     }
@@ -479,7 +503,7 @@ DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
 template <bool freeAll>
 bool Heap::FreeLargeObject(Allocation* address)
 {
-    AutoCriticalSection autocs(&this->cs);
+    CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
     FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
     {
         if (address == (&allocation) || freeAll)
@@ -488,7 +512,7 @@ bool Heap::FreeLargeObject(Allocation* address)
 #if PDATA_ENABLED
             Assert(allocation.xdata.IsFreed());
 #endif
-            this->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
+            this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
 
             largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
             if (!freeAll) return true;
@@ -520,12 +544,10 @@ Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushor
     XDataAllocation xdata;
     if(pdataCount > 0)
     {
-        AutoCriticalSection autocs(&this->cs);
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        if(!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
         {
-            if(!this->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
-            {
-                return nullptr;
-            }
+            return nullptr;
         }
     }
 #endif
@@ -536,8 +558,8 @@ Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushor
 #if PDATA_ENABLED
         if(pdataCount > 0)
         {
-            AutoCriticalSection autocs(&this->cs);
-            this->ReleaseSecondary(xdata, page->segment);
+            CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+            this->codePageAllocators->ReleaseSecondary(xdata, page->segment);
         }
 #endif
         return nullptr;
@@ -595,56 +617,14 @@ Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushor
     return allocation;
 }
 
-char *
-Heap::EnsurePreReservedPageAllocation(PreReservedVirtualAllocWrapper * preReservedVirtualAllocator)
-{
-        AutoCriticalSection autocs(&this->cs);
-        Assert(preReservedVirtualAllocator != nullptr);
-        Assert(preReservedHeapPageAllocator.GetVirtualAllocator() == preReservedVirtualAllocator);
-
-        char * preReservedRegionStartAddress = (char*)preReservedVirtualAllocator->GetPreReservedStartAddress();
-        if (preReservedRegionStartAddress == nullptr)
-        {
-            preReservedRegionStartAddress = preReservedHeapPageAllocator.InitPageSegment();
-        }
-
-        if (preReservedRegionStartAddress == nullptr)
-        {
-            VerboseHeapTrace(L"PRE-RESERVE: PreReserved Segment CANNOT be allocated \n");
-        }
-        return preReservedRegionStartAddress;
-}
-
 Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
 {
     void* pageSegment = nullptr;
 
     char* address = nullptr;
     {
-        AutoCriticalSection autocs(&this->cs);
-
-        if (canAllocInPreReservedHeapPageSegment)
-        {
-            address = this->preReservedHeapPageAllocator.AllocPages(1, (PageSegmentBase<PreReservedVirtualAllocWrapper>**)&pageSegment);
-
-            if (address == nullptr)
-            {
-                VerboseHeapTrace(L"PRE-RESERVE: PreReserved Segment CANNOT be allocated \n");
-            }
-        }
-
-        if (address == nullptr)    // if no space in Pre-reserved Page Segment, then allocate in regular ones.
-        {
-            if (isAnyJittedCode)
-            {
-                *isAllJITCodeInPreReservedRegion = false;
-            }
-            address = this->pageAllocator.AllocPages(1, (PageSegmentBase<VirtualAllocWrapper>**)&pageSegment);
-        }
-        else
-        {
-            VerboseHeapTrace(L"PRE-RESERVE: Allocing new page in PreReserved Segment \n");
-        }
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        address = this->codePageAllocators->AllocPages(1, &pageSegment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
     }
 
     if (address == nullptr)
@@ -666,7 +646,7 @@ Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegm
     }
 
     //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
-    ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
+    this->codePageAllocators->ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
 
     // Switch to allocating on a list of pages so we can do leak tracking later
     VerboseHeapTrace(L"Allocing new page in bucket %d\n", bucket);
@@ -674,8 +654,8 @@ Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegm
 
     if (page == nullptr)
     {
-        AutoCriticalSection autocs(&this->cs);
-        this->ReleasePages(address, 1, pageSegment);
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        this->codePageAllocators->ReleasePages(address, 1, pageSegment);
         return nullptr;
     }
 
@@ -729,7 +709,7 @@ Page* Heap::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages
         #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
         FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
         {
-            if (findPreReservedHeapPages && !IsPreReservedSegment(pageInBucket.segment))
+            if (findPreReservedHeapPages && !this->codePageAllocators->IsPreReservedSegment(pageInBucket.segment))
             {
                 //Find only pages that are pre-reserved using preReservedHeapPageAllocator
                 continue;
@@ -827,8 +807,8 @@ bool Heap::FreeAllocation(Allocation* object)
 #endif
             this->auxiliaryAllocator->Free(object, sizeof(Allocation));
             {
-                AutoCriticalSection autocs(&this->cs);
-                this->ReleasePages(pageAddress, 1, segment);
+                CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+                this->codePageAllocators->ReleasePages(pageAddress, 1, segment);
             }
             VerboseHeapTrace(L"FastPath: freeing page-sized object directly\n");
             return true;
@@ -880,8 +860,8 @@ bool Heap::FreeAllocation(Allocation* object)
             {
                 VerboseHeapTrace(L"Removing page in bucket %d\n", page->currentBucket);
                 {
-                    AutoCriticalSection autocs(&this->cs);
-                    this->ReleasePages(page->address, 1, page->segment);
+                    CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+                    this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
                 }
                 pageIter.RemoveCurrent(this->auxiliaryAllocator);
 
@@ -908,18 +888,18 @@ bool Heap::FreeAllocation(Allocation* object)
         {
             protectFlags = PAGE_EXECUTE;
         }
-        this->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
+        this->codePageAllocators->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
         return true;
     }
 }
 
 void Heap::FreeDecommittedBuckets()
 {
-    // This is only call when the heap is being destroy, so don't need to sync with the background thread.
+    // CodePageAllocators is locked in FreeAll
     Assert(inDtor);
     FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->decommittedPages, iter)
     {
-        this->TrackDecommittedPages(page.address, 1, page.segment);
+        this->codePageAllocators->TrackDecommittedPages(page.address, 1, page.segment);
         iter.RemoveCurrent(this->auxiliaryAllocator);
     }
     NEXT_DLISTBASE_ENTRY_EDITING;
@@ -927,14 +907,14 @@ void Heap::FreeDecommittedBuckets()
 
 void Heap::FreePage(Page* page)
 {
-    // This is only call when the heap is being destroy, so don't need to sync with the background thread.
+    // CodePageAllocators is locked in FreeAll
     Assert(inDtor);
     DWORD pageSize = AutoSystemInfo::PageSize;
     EnsurePageWriteable(page);
     size_t freeSpace = page->freeBitVector.Count() * Page::Alignment;
 
     VerboseHeapTrace(L"Removing page in bucket %d, freeSpace: %d\n", page->currentBucket, freeSpace);
-    this->ReleasePages(page->address, 1, page->segment);
+    this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
 
 #if DBG_DUMP
     this->freeObjectSize -= freeSpace;
@@ -944,7 +924,7 @@ void Heap::FreePage(Page* page)
 
 void Heap::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
 {
-    // This is only call when the heap is being destroy, so don't need to sync with the background thread.
+    // CodePageAllocators is locked in FreeAll
     Assert(inDtor);
     FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, bucket, pageIter)
     {
@@ -960,7 +940,7 @@ void Heap::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
 
 void Heap::FreeBuckets(bool freeOnlyEmptyPages)
 {
-    // This is only call when the heap is being destroy, so don't need to sync with the background thread.
+    // CodePageAllocators is locked in FreeAll
     Assert(inDtor);
     for (int i = 0; i < NumBuckets; i++)
     {
@@ -992,8 +972,8 @@ void Heap::FreeXdata(XDataAllocation* xdata, void* segment)
         }, this->fullPages, this->buckets);
     }
     {
-        AutoCriticalSection autocs(&this->cs);
-        this->ReleaseSecondary(*xdata, segment);
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        this->codePageAllocators->ReleaseSecondary(*xdata, segment);
         xdata->Free();
     }
 }

+ 143 - 42
lib/Common/Memory/CustomHeap.h

@@ -131,63 +131,100 @@ struct Allocation
 
 };
 
-/*
- * Simple free-listing based heap allocator
- *
- * Each allocation is tracked using a "HeapAllocation" record
- * Once we alloc, we start assigning chunks sliced from the end of a HeapAllocation
- * If we don't have enough to slice off, we push a new heap allocation record to the record stack, and try and assign from that
- */
-class Heap
+// Wrapper for the two HeapPageAllocator with and without the prereserved segment.
+// Supports multiple thread access. Require explicit locking (via CodePageAllocator::AutoLock)
+class CodePageAllocators
 {
 public:
-    Heap(AllocationPolicyManager * policyManager, ArenaAllocator * alloc, bool allocXdata);
-
-    Allocation* Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
-    bool Free(__in Allocation* allocation);
-    bool Decommit(__in Allocation* allocation);
-    void FreeAll();
-    bool IsInRange(__in void* address);
+    class AutoLock : public AutoCriticalSection
+    {
+    public:
+        AutoLock(CodePageAllocators * codePageAllocators) : AutoCriticalSection(&codePageAllocators->cs) {};
+    };
 
-    template<typename T>
-    HeapPageAllocator<T>* GetPageAllocator(Page * page)
+    CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, PreReservedVirtualAllocWrapper * virtualAllocator) :
+        pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, nullptr),
+        preReservedHeapPageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator),
+        cs(4000)
     {
-        AssertMsg(page, "Why is page null?");
-        return GetPageAllocator<T>(page->segment);
+#if DBG
+        this->preReservedHeapPageAllocator.ClearConcurrentThreadId();
+        this->pageAllocator.ClearConcurrentThreadId();
+#endif
     }
 
-    template<typename T>
-    HeapPageAllocator<T>* GetPageAllocator(void * segmentParam)
+    bool AllocXdata()
     {
-        SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
-        AssertMsg(segment, "Why is segment null?");
-        AssertMsg(segment->GetAllocator(), "Segment doesn't have an allocator?");
-
-        Assert((HeapPageAllocator<VirtualAllocWrapper>*)(segment->GetAllocator()) == &this->pageAllocator ||
-            (HeapPageAllocator<PreReservedVirtualAllocWrapper>*)(segment->GetAllocator()) == &this->preReservedHeapPageAllocator);
-
-        return (HeapPageAllocator<T> *)(segment->GetAllocator());
+        // Simple immutable data access, no need for lock
+        return preReservedHeapPageAllocator.AllocXdata();
     }
 
     bool IsPreReservedSegment(void * segment)
     {
+        // Simple immutable data access, no need for lock
         Assert(segment);
         return (((Segment*)(segment))->IsInPreReservedHeapPageAllocator());
     }
 
-    HeapPageAllocator<PreReservedVirtualAllocWrapper> * GetPreReservedHeapPageAllocator()
+    bool IsInNonPreReservedPageAllocator(__in void *address)
+    {
+        Assert(this->cs.IsLocked());
+        return this->pageAllocator.IsAddressFromAllocator(address);
+    }
+
+    char * Alloc(size_t * pages, void ** segment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
     {
-        return &preReservedHeapPageAllocator;
+        Assert(this->cs.IsLocked());
+        char* address = nullptr;
+        if (canAllocInPreReservedHeapPageSegment)
+        {
+            address = this->preReservedHeapPageAllocator.Alloc(pages, (SegmentBase<PreReservedVirtualAllocWrapper>**)(segment));
+        }
+
+        if (address == nullptr)
+        {
+            if (isAnyJittedCode)
+            {
+                *isAllJITCodeInPreReservedRegion = false;
+            }
+            address = this->pageAllocator.Alloc(pages, (Segment**)segment);
+        }
+        return address;
     }
 
-    HeapPageAllocator<VirtualAllocWrapper>* GetHeapPageAllocator()
+    char * AllocPages(size_t pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
     {
-        Assert(!pageAllocator.GetVirtualAllocator()->IsPreReservedRegionPresent());
-        return &pageAllocator;
+        Assert(this->cs.IsLocked());
+        char * address = nullptr;
+        if (canAllocInPreReservedHeapPageSegment)
+        {
+            address = this->preReservedHeapPageAllocator.AllocPages(1, (PageSegmentBase<PreReservedVirtualAllocWrapper>**)pageSegment);
+
+            if (address == nullptr)
+            {
+                VerboseHeapTrace(L"PRE-RESERVE: PreReserved Segment CANNOT be allocated \n");
+            }
+        }
+
+        if (address == nullptr)    // if no space in Pre-reserved Page Segment, then allocate in regular ones.
+        {
+            if (isAnyJittedCode)
+            {
+                *isAllJITCodeInPreReservedRegion = false;
+            }
+            address = this->pageAllocator.AllocPages(1, (PageSegmentBase<VirtualAllocWrapper>**)pageSegment);
+        }
+        else
+        {
+            VerboseHeapTrace(L"PRE-RESERVE: Allocing new page in PreReserved Segment \n");
+        }
+
+        return address;
     }
 
     void ReleasePages(void* pageAddress, uint pageCount, __in void* segment)
     {
+        Assert(this->cs.IsLocked());
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -201,6 +238,8 @@ public:
 
     BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
     {
+        // This is merely a wrapper for VirtualProtect, no need to synchornize, and doesn't touch any data.
+        // No need to assert locked.
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -214,6 +253,7 @@ public:
 
     void TrackDecommittedPages(void * address, uint pageCount, __in void* segment)
     {
+        Assert(this->cs.IsLocked());
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -227,6 +267,7 @@ public:
 
     void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment)
     {
+        Assert(this->cs.IsLocked());
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -240,6 +281,8 @@ public:
 
     void DecommitPages(__in char* address, size_t pageCount, void* segment)
     {
+        // This is merely a wrapper for VirtualFree, no need to synchornize, and doesn't touch any data.
+        // No need to assert locked.
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -253,6 +296,7 @@ public:
 
     bool AllocSecondary(void* segment, ULONG_PTR functionStart, size_t functionSize_t, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
     {
+        Assert(this->cs.IsLocked());
         Assert(functionSize_t <= MAXUINT32);
         DWORD functionSize = static_cast<DWORD>(functionSize_t);
         Assert(segment);
@@ -268,6 +312,7 @@ public:
 
     void Release(void * address, size_t pageCount, void * segment)
     {
+        Assert(this->cs.IsLocked());
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -281,6 +326,7 @@ public:
 
     void ReleaseDecommitted(void * address, size_t pageCount, __in void *  segment)
     {
+        Assert(this->cs.IsLocked());
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
@@ -291,14 +337,68 @@ public:
             this->GetPageAllocator<VirtualAllocWrapper>(segment)->ReleaseDecommitted(address, pageCount, segment);
         }
     }
+private:
+
+    template<typename T>
+    HeapPageAllocator<T>* GetPageAllocator(Page * page)
+    {
+        AssertMsg(page, "Why is page null?");
+        return GetPageAllocator<T>(page->segment);
+    }
+
+    template<typename T>
+    HeapPageAllocator<T>* GetPageAllocator(void * segmentParam);
+
+    template <>
+    HeapPageAllocator<VirtualAllocWrapper>* GetPageAllocator(void * segmentParam)
+    {
+        SegmentBase<VirtualAllocWrapper> * segment = (SegmentBase<VirtualAllocWrapper>*)segmentParam;
+        AssertMsg(segment, "Why is segment null?");
+        Assert((HeapPageAllocator<VirtualAllocWrapper>*)(segment->GetAllocator()) == &this->pageAllocator);
+        return (HeapPageAllocator<VirtualAllocWrapper> *)(segment->GetAllocator());
+    }
+
+
+    template<>
+    HeapPageAllocator<PreReservedVirtualAllocWrapper>* GetPageAllocator(void * segmentParam)
+    {
+        SegmentBase<PreReservedVirtualAllocWrapper> * segment = (SegmentBase<PreReservedVirtualAllocWrapper>*)segmentParam;
+        AssertMsg(segment, "Why is segment null?");
+        Assert((HeapPageAllocator<PreReservedVirtualAllocWrapper>*)(segment->GetAllocator()) == &this->preReservedHeapPageAllocator);
+        return (HeapPageAllocator<PreReservedVirtualAllocWrapper> *)(segment->GetAllocator());
+    }
+
+    HeapPageAllocator<VirtualAllocWrapper>               pageAllocator;
+    HeapPageAllocator<PreReservedVirtualAllocWrapper>    preReservedHeapPageAllocator;
+    CriticalSection cs;
+};
 
-    char * EnsurePreReservedPageAllocation(PreReservedVirtualAllocWrapper * preReservedVirtualAllocator);
+/*
+ * Simple free-listing based heap allocator
+ *
+ * Each allocation is tracked using a "HeapAllocation" record
+ * Once we alloc, we start assigning chunks sliced from the end of a HeapAllocation
+ * If we don't have enough to slice off, we push a new heap allocation record to the record stack, and try and assign from that
+ *
+ * Single thread only. Require external locking.  (Currently, EmitBufferManager manage the locking)
+ */
+class Heap
+{
+public:
+    Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators);
+
+    Allocation* Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
+    bool Free(__in Allocation* allocation);
+    bool Decommit(__in Allocation* allocation);
+    void FreeAll();
+    bool IsInHeap(__in void* address);
+   
     // A page should be in full list if:
     // 1. It does not have any space
     // 2. Parent segment cannot allocate any more XDATA
     bool ShouldBeInFullList(Page* page)
     {
-        return page->HasNoSpace() || (allocXdata && !((Segment*)(page->segment))->CanAllocSecondary());
+        return page->HasNoSpace() || (codePageAllocators->AllocXdata() && !((Segment*)(page->segment))->CanAllocSecondary());
     }
 
     BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
@@ -367,7 +467,7 @@ private:
     {
         Assert(!page->isDecommitted);
 
-        BOOL result = this->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
+        BOOL result = this->codePageAllocators->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
         Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
         return PAGE_EXECUTE;
     }
@@ -431,6 +531,11 @@ private:
     BVIndex     GetIndexInPage(__in Page* page, __in char* address);
     void        RemovePageFromFullList(Page* page);
 
+
+    bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
+    bool IsInHeap(DListBase<Page> const& buckets, __in void *address);
+    bool IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
+
     /**
      * Stats
      */
@@ -444,8 +549,7 @@ private:
     /**
      * Allocator stuff
      */
-    HeapPageAllocator<VirtualAllocWrapper>               pageAllocator;
-    HeapPageAllocator<PreReservedVirtualAllocWrapper>    preReservedHeapPageAllocator;
+    CodePageAllocators *                              codePageAllocators;
     ArenaAllocator*                                   auxiliaryAllocator;
 
     /*
@@ -458,9 +562,6 @@ private:
     DListBase<Page>        decommittedPages;
     DListBase<Allocation>  decommittedLargeObjects;
 
-    // Critical section synchronize in the BGJIT thread and IsInRange in the main thread
-    CriticalSection        cs;
-    bool                   allocXdata;
 #if DBG
     bool inDtor;
 #endif

+ 75 - 77
lib/Common/Memory/PageAllocator.cpp

@@ -86,13 +86,13 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
     if(Js::FaultInjection::Global.ShouldInjectFault(Js::FaultInjection::Global.NoThrow))
     {
         this->address = nullptr;
-        return(address != nullptr);
+        return false;
     }
 #endif
 
     if (!this->allocator->RequestAlloc(totalPages * AutoSystemInfo::PageSize))
     {
-        return nullptr;
+        return false;
     }
 
     this->address = (char *) GetAllocator()->GetVirtualAllocator()->Alloc(NULL, totalPages * AutoSystemInfo::PageSize, MEM_RESERVE | allocFlags, PAGE_READWRITE, this->IsInCustomHeapAllocator());
@@ -101,6 +101,7 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
 
     if (originalAddress != nullptr)
     {
+        bool committed = (allocFlags & MEM_COMMIT) != 0;
         if (addGuardPages)
         {
 #if DBG_DUMP
@@ -109,15 +110,19 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
             GUARD_PAGE_TRACE(L"Offset of Segment Start address: 0x%p\n", this->address + (leadingGuardPageCount*AutoSystemInfo::PageSize));
             GUARD_PAGE_TRACE(L"Starting address of Trailing Guard Pages: 0x%p\n", address + ((leadingGuardPageCount + this->segmentPageCount)*AutoSystemInfo::PageSize));
 #endif
+            if (committed)
+            {
 #pragma warning(suppress: 6250)
-            GetAllocator()->GetVirtualAllocator()->Free(address, leadingGuardPageCount*AutoSystemInfo::PageSize, MEM_DECOMMIT);
+                GetAllocator()->GetVirtualAllocator()->Free(address, leadingGuardPageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
 #pragma warning(suppress: 6250)
-            GetAllocator()->GetVirtualAllocator()->Free(address + ((leadingGuardPageCount + this->segmentPageCount)*AutoSystemInfo::PageSize), trailingGuardPageCount*AutoSystemInfo::PageSize, MEM_DECOMMIT);
-            this->allocator->ReportFree((leadingGuardPageCount + trailingGuardPageCount)*AutoSystemInfo::PageSize);
+                GetAllocator()->GetVirtualAllocator()->Free(address + ((leadingGuardPageCount + this->segmentPageCount)*AutoSystemInfo::PageSize), trailingGuardPageCount*AutoSystemInfo::PageSize, MEM_DECOMMIT);
+            }
+            this->allocator->ReportFree((leadingGuardPageCount + trailingGuardPageCount) * AutoSystemInfo::PageSize);
+
             this->address = this->address + (leadingGuardPageCount*AutoSystemInfo::PageSize);
         }
 
-        if (!allocator->CreateSecondaryAllocator(this, &this->secondaryAllocator))
+        if (!allocator->CreateSecondaryAllocator(this, committed, &this->secondaryAllocator))
         {
             GetAllocator()->GetVirtualAllocator()->Free(originalAddress, GetPageCount() * AutoSystemInfo::PageSize, MEM_RELEASE);
             this->allocator->ReportFree(totalPages * AutoSystemInfo::PageSize);
@@ -140,9 +145,10 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
     if (this->address == nullptr)
     {
         this->allocator->ReportFailure(totalPages * AutoSystemInfo::PageSize);
+        return false;
     }
 
-    return (this->address != nullptr);
+    return true;
 }
 
 //=============================================================================================================
@@ -150,22 +156,17 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
 //=============================================================================================================
 
 template<typename T>
-PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, bool external) :
+PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, bool committed, bool allocated) :
     SegmentBase(allocator, allocator->maxAllocPageCount), decommitPageCount(0)
 {
     Assert(this->segmentPageCount == allocator->maxAllocPageCount + allocator->secondaryAllocPageCount);
 
-    if (external)
-    {
-        this->freePageCount = 0;
-        this->ClearAllInFreePagesBitVector();
-    }
-    else
-    {
+    uint maxPageCount = GetMaxPageCount();
 
+    if (committed)
+    {    
+        Assert(!allocated);
         this->freePageCount = this->GetAvailablePageCount();
-        uint maxPageCount = GetMaxPageCount();
-
         this->SetRangeInFreePagesBitVector(0, this->freePageCount);
         if (this->freePageCount != maxPageCount)
         {
@@ -174,6 +175,21 @@ PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, bool exter
 
         Assert(this->GetCountOfFreePages() == this->freePageCount);
     }
+    else
+    {
+        this->freePageCount = 0;
+        this->ClearAllInFreePagesBitVector();
+        if (!allocated)
+        {
+            this->decommitPageCount = this->GetAvailablePageCount();
+            this->SetRangeInDecommitPagesBitVector(0, this->decommitPageCount);
+
+            if (this->decommitPageCount != maxPageCount)
+            {
+                this->ClearRangeInDecommitPagesBitVector(this->decommitPageCount, (maxPageCount - this->decommitPageCount));
+            }
+        }
+    }
 }
 
 #ifdef PAGEALLOCATOR_PROTECT_FREEPAGE
@@ -699,16 +715,16 @@ PageAllocatorBase<T>::AllocPagesForBytes(size_t requestBytes)
 
 template<typename T>
 PageSegmentBase<T> *
-PageAllocatorBase<T>::AllocPageSegment(DListBase<PageSegmentBase<T>>& segmentList, PageAllocatorBase<T> * pageAllocator, bool external)
+PageAllocatorBase<T>::AllocPageSegment(DListBase<PageSegmentBase<T>>& segmentList, PageAllocatorBase<T> * pageAllocator, bool committed, bool allocated)
 {
-    PageSegmentBase<T> * segment = segmentList.PrependNode(&NoThrowNoMemProtectHeapAllocator::Instance, pageAllocator, external);
+    PageSegmentBase<T> * segment = segmentList.PrependNode(&NoThrowNoMemProtectHeapAllocator::Instance, pageAllocator, committed, allocated);
 
     if (segment == nullptr)
     {
         return nullptr;
     }
 
-    if (!segment->Initialize((external ? 0 : MEM_COMMIT) | pageAllocator->allocFlags, pageAllocator->excludeGuardPages))
+    if (!segment->Initialize((committed ? MEM_COMMIT : 0) | pageAllocator->allocFlags, pageAllocator->excludeGuardPages))
     {
         segmentList.RemoveHead(&NoThrowNoMemProtectHeapAllocator::Instance);
         return nullptr;
@@ -722,44 +738,7 @@ PageAllocatorBase<T>::AddPageSegment(DListBase<PageSegmentBase<T>>& segmentList)
 {
     Assert(!HasMultiThreadAccess());
 
-    PageSegmentBase<T> * segment = AllocPageSegment(segmentList, this, false);
-
-    if (segment != nullptr)
-    {
-        LogAllocSegment(segment);
-        this->AddFreePageCount(maxAllocPageCount);
-    }
-    return segment;
-}
-
-template<>
-char *
-HeapPageAllocator<PreReservedVirtualAllocWrapper>::InitPageSegment()
-{
-    Assert(virtualAllocator);
-    PageSegmentBase<PreReservedVirtualAllocWrapper> * firstPreReservedSegment = AddPageSegment(emptySegments);
-    if (firstPreReservedSegment == nullptr)
-    {
-        return nullptr;
-    }
-    return firstPreReservedSegment->GetAddress();
-}
-
-template<>
-char *
-HeapPageAllocator<VirtualAllocWrapper>::InitPageSegment()
-{
-    Assert(false);
-    return nullptr;
-}
-
-template<typename T>
-PageSegmentBase<T> *
-HeapPageAllocator<T>::AddPageSegment(DListBase<PageSegmentBase<T>>& segmentList)
-{
-    Assert(!HasMultiThreadAccess());
-
-    PageSegmentBase<T> * segment = AllocPageSegment(segmentList, this, false);
+    PageSegmentBase<T> * segment = AllocPageSegment(segmentList, this, true, false);
 
     if (segment != nullptr)
     {
@@ -1010,14 +989,7 @@ char *
 PageAllocatorBase<PreReservedVirtualAllocWrapper>::Alloc(size_t * pageCount, SegmentBase<PreReservedVirtualAllocWrapper> ** segment)
 {
     Assert(virtualAllocator);
-    if (virtualAllocator->IsPreReservedRegionPresent())
-    {
-        return AllocInternal<false>(pageCount, segment);
-    }
-    else
-    {
-        return nullptr;
-    }
+    return AllocInternal<false>(pageCount, segment);
 }
 
 template<typename T>
@@ -1103,14 +1075,7 @@ char *
 PageAllocatorBase<PreReservedVirtualAllocWrapper>::AllocPages(uint pageCount, PageSegmentBase<PreReservedVirtualAllocWrapper> ** pageSegment)
 {
     Assert(virtualAllocator);
-    if (virtualAllocator->IsPreReservedRegionPresent())
-    {
-        return AllocPagesInternal<true /* noPageAligned */>(pageCount, pageSegment);
-    }
-    else
-    {
-        return nullptr;
-    }
+    return AllocPagesInternal<true /* noPageAligned */>(pageCount, pageSegment);
 }
 
 template<typename T>
@@ -1216,6 +1181,31 @@ PageAllocatorBase<T>::SnailAllocPages(uint pageCount, PageSegmentBase<T> ** page
     }
 
     Assert(pages == nullptr);
+    Assert(maxAllocPageCount >= pageCount);
+    if (maxAllocPageCount != pageCount && (maxFreePageCount < maxAllocPageCount - pageCount + freePageCount))
+    {
+        // If we exceed the number of max free page count, allocate from a new fully decommit block
+        PageSegmentBase<T> * decommitSegment = AllocPageSegment(this->decommitSegments, this, false, false);
+        if (decommitSegment == nullptr)
+        {
+            return nullptr;
+        }
+
+        pages = decommitSegment->DoAllocDecommitPages<notPageAligned>(pageCount, pageHeapFlags);
+        if (pages != nullptr)
+        {
+#if DBG_DUMP
+            this->decommitPageCount = this->decommitPageCount + decommitSegment->GetDecommitPageCount();
+#endif
+            this->FillAllocPages(pages, pageCount);
+
+            LogRecommitPages(pageCount);
+            LogAllocPages(pageCount);
+
+            *pageSegment = decommitSegment;
+        }
+        return pages;
+    }
 
     // At this point, we haven't been able to allocate either from the
     // decommitted pages, or from the empty segment list, so we'll
@@ -2129,7 +2119,7 @@ PageAllocatorBase<T>::Check()
 #endif
 
 template<typename T>
-HeapPageAllocator<T>::HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages) :
+HeapPageAllocator<T>::HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages, T * virtualAllocator) :
     PageAllocatorBase(policyManager,
         Js::Configuration::Global.flags,
         PageAllocatorType_CustomHeap,
@@ -2142,6 +2132,7 @@ HeapPageAllocator<T>::HeapPageAllocator(AllocationPolicyManager * policyManager,
         excludeGuardPages),
     allocXdata(allocXdata)
 {
+    this->virtualAllocator = virtualAllocator;
 }
 
 template<typename T>
@@ -2273,7 +2264,6 @@ HeapPageAllocator<T>::TrackDecommittedPages(void * address, uint pageCount, __in
 
     // Update the state of the segment with the decommitted pages
     segment->DecommitPages<true>(address, pageCount);
-
     // Move the segment to its appropriate list
     TransferSegment(segment, fromSegmentList);
 }
@@ -2421,9 +2411,10 @@ PageAllocatorBase<T>::IsAddressInSegment(__in void* address, const SegmentBase<T
 #if PDATA_ENABLED
 #include "Memory/XDataAllocator.h"
 template<typename T>
-bool HeapPageAllocator<T>::CreateSecondaryAllocator(SegmentBase<T>* segment, SecondaryAllocator** allocator)
+bool HeapPageAllocator<T>::CreateSecondaryAllocator(SegmentBase<T>* segment, bool committed, SecondaryAllocator** allocator)
 {
     Assert(segment->GetAllocator() == this);
+    Assert(segment->IsInCustomHeapAllocator());
 
     // If we are not allocating xdata there is nothing to do
 
@@ -2435,6 +2426,13 @@ bool HeapPageAllocator<T>::CreateSecondaryAllocator(SegmentBase<T>* segment, Sec
         return true;
     }
 
+    if (!committed && !this->GetVirtualAllocator()->Alloc(segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocSize(),
+        MEM_COMMIT, PAGE_READWRITE, true))
+    {
+        *allocator = nullptr;
+        return false;
+    }
+
     XDataAllocator* secondaryAllocator = HeapNewNoThrow(XDataAllocator, (BYTE*)segment->GetSecondaryAllocStartAddress(), segment->GetSecondaryAllocSize());
     bool success = false;
     if(secondaryAllocator)

+ 10 - 44
lib/Common/Memory/PageAllocator.h

@@ -188,7 +188,7 @@ template<typename TVirtualAlloc>
 class PageSegmentBase : public SegmentBase<TVirtualAlloc>
 {
 public:
-    PageSegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, bool external);
+    PageSegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, bool committed, bool allocated);
     // Maximum possible size of a PageSegment; may be smaller.
     static const uint MaxDataPageCount = 256;     // 1 MB
     static const uint MaxGuardPageCount = 16;
@@ -368,33 +368,6 @@ public:
 
     static size_t GetAndResetMaxUsedBytes();
 
-    virtual BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
-    {
-        Assert(false);
-        return false;
-    }
-
-    virtual bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
-    {
-        Assert(false);
-        return false;
-    }
-
-    virtual void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment)
-    {
-        Assert(false);
-    }
-
-    virtual void DecommitPages(__in char* address, int pageCount)
-    {
-        Assert(false);
-    }
-
-    virtual void TrackDecommittedPages(void * address, uint pageCount, __in void* segment)
-    {
-        Assert(false);
-    }
-
     struct BackgroundPageQueue
     {
         BackgroundPageQueue();
@@ -437,12 +410,7 @@ public:
 
     //VirtualAllocator APIs
     TVirtualAlloc * GetVirtualAllocator() { return virtualAllocator; }
-    void SetVirtualAllocator(TVirtualAlloc * virtualAllocator)
-    {
-        Assert(virtualAllocator != nullptr);
-        PVOID oldVirtualAllocator = InterlockedCompareExchangePointer((PVOID*) &(this->virtualAllocator), virtualAllocator, NULL);
-        AssertMsg(oldVirtualAllocator == nullptr || oldVirtualAllocator == (PVOID)virtualAllocator, "Trying to set a new value for VirtualAllocWrapper ? - INVALID");
-    }
+
     bool IsPreReservedPageAllocator() { return virtualAllocator != nullptr; }
 
 
@@ -548,7 +516,8 @@ protected:
     virtual void DumpStats() const;
 #endif
     virtual PageSegmentBase<TVirtualAlloc> * AddPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList);
-    static PageSegmentBase<TVirtualAlloc> * AllocPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList, PageAllocatorBase<TVirtualAlloc> * pageAllocator, bool external);
+    static PageSegmentBase<TVirtualAlloc> * AllocPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList, 
+        PageAllocatorBase<TVirtualAlloc> * pageAllocator, bool committed, bool allocated);
 
     // Zero Pages
     void AddPageToZeroQueue(__in void * address, uint pageCount, __in PageSegmentBase<TVirtualAlloc> * pageSegment);
@@ -625,7 +594,7 @@ protected:
     friend class IdleDecommit;
 
 protected:
-    virtual bool CreateSecondaryAllocator(SegmentBase<TVirtualAlloc>* segment, SecondaryAllocator** allocator)
+    virtual bool CreateSecondaryAllocator(SegmentBase<TVirtualAlloc>* segment, bool committed, SecondaryAllocator** allocator)
     {
         *allocator = nullptr;
         return true;
@@ -746,7 +715,7 @@ template<typename TVirtualAlloc>
 class HeapPageAllocator : public PageAllocatorBase<TVirtualAlloc>
 {
 public:
-    HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages);
+    HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages, TVirtualAlloc * virtualAllocator);
 
     BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
     bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation);
@@ -755,18 +724,15 @@ public:
     void DecommitPages(__in char* address, size_t pageCount = 1);
 
     // Release pages that has already been decommitted
-    void    ReleaseDecommitted(void * address, size_t pageCount, __in void * segment);
-    bool    IsAddressFromAllocator(__in void* address);
-    char *  InitPageSegment();
-
-    PageSegmentBase<TVirtualAlloc> * AddPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList);
-
+    void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment);
+    bool IsAddressFromAllocator(__in void* address);    
 
+    bool AllocXdata() { return allocXdata; }
 private:
     bool         allocXdata;
     void         ReleaseDecommittedSegment(__in SegmentBase<TVirtualAlloc>* segment);
 #if PDATA_ENABLED
-    virtual bool CreateSecondaryAllocator(SegmentBase<TVirtualAlloc>* segment, SecondaryAllocator** allocator) override;
+    virtual bool CreateSecondaryAllocator(SegmentBase<TVirtualAlloc>* segment, bool committed, SecondaryAllocator** allocator) override;
 #endif
 
 };

+ 122 - 86
lib/Common/Memory/VirtualAllocWrapper.cpp

@@ -64,61 +64,39 @@ BOOL VirtualAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType
     return VirtualFree(lpAddress, bytes, dwFreeType);
 }
 
-bool
-VirtualAllocWrapper::IsPreReservedRegionPresent()
-{
-    return false;
-}
-
-LPVOID
-VirtualAllocWrapper::GetPreReservedStartAddress()
-{
-    Assert(false);
-    return nullptr;
-}
-
-LPVOID
-VirtualAllocWrapper::GetPreReservedEndAddress()
-{
-    Assert(false);
-    return nullptr;
-}
-
-bool
-VirtualAllocWrapper::IsInRange(void * address)
-{
-    Assert(this == nullptr);
-    return false;
-}
-
-
 /*
 * class PreReservedVirtualAllocWrapper
 */
+#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+uint PreReservedVirtualAllocWrapper::numPreReservedSegment = 0;
+#endif
 
 PreReservedVirtualAllocWrapper::PreReservedVirtualAllocWrapper() :
-preReservedStartAddress(nullptr),
-cs(4000)
+    preReservedStartAddress(nullptr),
+    cs(4000)
 {
     freeSegments.SetAll();
 }
 
-BOOL
+void
 PreReservedVirtualAllocWrapper::Shutdown()
 {
     Assert(this);
-    BOOL success = FALSE;
     if (IsPreReservedRegionPresent())
     {
-        success = VirtualFree(preReservedStartAddress, 0, MEM_RELEASE);
+        BOOL success = VirtualFree(preReservedStartAddress, 0, MEM_RELEASE);
         PreReservedHeapTrace(L"MEM_RELEASE the PreReservedSegment. Start Address: 0x%p, Size: 0x%x * 0x%x bytes", preReservedStartAddress, PreReservedAllocationSegmentCount,
             AutoSystemInfo::Data.GetAllocationGranularityPageSize());
         if (!success)
         {
             Assert(false);
         }
+
+#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+        Assert(numPreReservedSegment > 0);
+        InterlockedDecrement(&PreReservedVirtualAllocWrapper::numPreReservedSegment);
+#endif
     }
-    return success;
 }
 
 bool
@@ -131,7 +109,7 @@ PreReservedVirtualAllocWrapper::IsPreReservedRegionPresent()
 bool
 PreReservedVirtualAllocWrapper::IsInRange(void * address)
 {
-    if (this == nullptr)
+    if (this == nullptr || !this->IsPreReservedRegionPresent())
     {
         return false;
     }
@@ -145,7 +123,7 @@ PreReservedVirtualAllocWrapper::IsInRange(void * address)
     }
 #endif
 
-    return IsPreReservedRegionPresent() && address >= GetPreReservedStartAddress() && address < GetPreReservedEndAddress();
+    return address >= GetPreReservedStartAddress() && address < GetPreReservedEndAddress();
 }
 
 LPVOID
@@ -158,10 +136,81 @@ PreReservedVirtualAllocWrapper::GetPreReservedStartAddress()
 LPVOID
 PreReservedVirtualAllocWrapper::GetPreReservedEndAddress()
 {
-    Assert(this);
+    Assert(this && IsPreReservedRegionPresent());
     return (char*)preReservedStartAddress + (PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount() * AutoSystemInfo::PageSize);
 }
 
+LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegion()
+{
+    LPVOID startAddress = preReservedStartAddress;
+    if (startAddress != nullptr)
+    {
+        return startAddress;
+    }
+
+    {
+        AutoCriticalSection autocs(&this->cs);
+        return EnsurePreReservedRegionInternal();
+    }
+}
+
+LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
+{
+    LPVOID startAddress = preReservedStartAddress;
+    if (startAddress != nullptr)
+    {
+        return startAddress;
+    }
+
+    //PreReserve a (bigger) segment
+    size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
+    if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
+    {
+        //This code is used where CFG is not available, but still PreReserve optimization for CFG can be tested
+        startAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
+        PreReservedHeapTrace(L"Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n", preReservedStartAddress);
+        preReservedStartAddress = startAddress;
+        return startAddress;
+    }
+
+#if defined(_CONTROL_FLOW_GUARD)
+#if !_M_X64_OR_ARM64
+#if _M_IX86
+    // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
+   
+    // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
+    // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
+    // It doesn't affect functionally, and it should be OK if we exceed.
+
+    if (PreReservedVirtualAllocWrapper::numPreReservedSegment > PreReservedVirtualAllocWrapper::MaxPreReserveSegment)
+    {
+        return nullptr;
+    }
+#else
+    // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
+    return nullptr;
+#endif // _M_IX86
+#endif
+
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        startAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
+        PreReservedHeapTrace(L"Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n", preReservedStartAddress);
+        preReservedStartAddress = startAddress;
+
+#if !_M_X64_OR_ARM64
+        if (startAddress)
+        {
+            InterlockedIncrement(&PreReservedVirtualAllocWrapper::numPreReservedSegment);
+        }
+#endif
+    }
+#endif
+    
+
+    return startAddress;
+}
+
 /*
 *   LPVOID PreReservedVirtualAllocWrapper::Alloc
 *   -   Reserves only one big memory region.
@@ -174,39 +223,18 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
     Assert(this);
     AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
     AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?");
-    Assert((allocationType & MEM_COMMIT) != 0);
     Assert(dwSize != 0);
 
     {
         AutoCriticalSection autocs(&this->cs);
-        if (preReservedStartAddress == NULL)
-        {
-            //PreReserve a (bigger) segment
-            size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
-#if defined(_CONTROL_FLOW_GUARD)
-            if (AutoSystemInfo::Data.IsCFGEnabled())
-            {
-                preReservedStartAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
-                PreReservedHeapTrace(L"Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n", preReservedStartAddress);
-            }
-            else
-#endif
-            if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
-            {
-                //This code is used where CFG is not available, but still PreReserve optimization for CFG can be tested
-                preReservedStartAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, protectFlags);
-                PreReservedHeapTrace(L"Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n", preReservedStartAddress);
-            }
-        }
-
         //Return nullptr, if no space to Reserve
-        if (preReservedStartAddress == NULL)
+        if (EnsurePreReservedRegionInternal() == nullptr)
         {
             PreReservedHeapTrace(L"No space to pre-reserve memory with %d pages. Returning NULL\n", PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount());
             return nullptr;
         }
 
-        char * addressToCommit = nullptr;
+        char * addressToReserve = nullptr;
 
         uint freeSegmentsBVIndex = BVInvalidIndex;
         size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize());
@@ -230,11 +258,11 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
             } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)));
 
             uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
-            addressToCommit = (char*) preReservedStartAddress + offset;
+            addressToReserve = (char*) preReservedStartAddress + offset;
 
             //Check if the region is not already in MEM_COMMIT state.
             MEMORY_BASIC_INFORMATION memBasicInfo;
-            size_t bytes = VirtualQuery(addressToCommit, &memBasicInfo, sizeof(memBasicInfo));
+            size_t bytes = VirtualQuery(addressToReserve, &memBasicInfo, sizeof(memBasicInfo));
             if (bytes == 0
                 || memBasicInfo.RegionSize < requestedNumOfSegments * AutoSystemInfo::Data.GetAllocationGranularityPageSize()
                 || memBasicInfo.State == MEM_COMMIT
@@ -249,8 +277,8 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
             //Check If the lpAddress is within the range of the preReserved Memory Region
             Assert(((char*) lpAddress) >= (char*) preReservedStartAddress || ((char*) lpAddress + dwSize) < GetPreReservedEndAddress());
 
-            addressToCommit = (char*) lpAddress;
-            freeSegmentsBVIndex = (uint) ((addressToCommit - (char*) preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
+            addressToReserve = (char*) lpAddress;
+            freeSegmentsBVIndex = (uint) ((addressToReserve - (char*) preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
 #if DBG
             uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize());
             Assert(numOfSegments != 0);
@@ -262,49 +290,57 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
         AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?");
         AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity");
 
-        char * committedAddress = nullptr;
+        char * allocatedAddress = nullptr;
 
+        if ((allocationType & MEM_COMMIT) != 0)
+        {
 #if defined(ENABLE_JIT_CLAMP)
-        AutoEnableDynamicCodeGen enableCodeGen;
+            AutoEnableDynamicCodeGen enableCodeGen;
 #endif
 
 #if defined(_CONTROL_FLOW_GUARD)
-        if (AutoSystemInfo::Data.IsCFGEnabled())
-        {
-            DWORD oldProtect = 0;
-            DWORD allocProtectFlags = 0;
-
             if (AutoSystemInfo::Data.IsCFGEnabled())
             {
-                allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+                DWORD oldProtect = 0;
+                DWORD allocProtectFlags = 0;
+
+                if (AutoSystemInfo::Data.IsCFGEnabled())
+                {
+                    allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+                }
+                else
+                {
+                    allocProtectFlags = PAGE_EXECUTE_READWRITE;
+                }
+
+                allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, allocProtectFlags);
+
+                AssertMsg(allocatedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?");
+                VirtualProtect(allocatedAddress, dwSize, protectFlags, &oldProtect);
+                AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags.");
             }
             else
+#endif
             {
-                allocProtectFlags = PAGE_EXECUTE_READWRITE;
+                allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, protectFlags);
             }
-
-            committedAddress = (char *)VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, allocProtectFlags);
-
-            AssertMsg(committedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?");
-            VirtualProtect(committedAddress, dwSize, protectFlags, &oldProtect);
-            AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags.");
         }
         else
-#endif
         {
-            committedAddress = (char *) VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, protectFlags);
+            // Just return the uncommitted address if we didn't ask to commit it.
+            allocatedAddress = addressToReserve;
         }
 
-        //Keep track of the committed pages within the preReserved Memory Region
-        if (lpAddress == nullptr && committedAddress != nullptr)
+        // Keep track of the committed pages within the preReserved Memory Region
+        if (lpAddress == nullptr && allocatedAddress != nullptr)
         {
-            Assert(committedAddress == addressToCommit);
+            Assert(allocatedAddress == addressToReserve);
             Assert(requestedNumOfSegments != 0);
             freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
         }
 
-        PreReservedHeapTrace(L"MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n", committedAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
-        return committedAddress;
+        PreReservedHeapTrace(L"MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n", allocatedAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
+        return allocatedAddress;
     }
 }
 

+ 26 - 9
lib/Common/Memory/VirtualAllocWrapper.h

@@ -18,10 +18,6 @@ class VirtualAllocWrapper
 public:
     LPVOID  Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
     BOOL    Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType);
-    bool        IsPreReservedRegionPresent();
-    bool        IsInRange(void * address);
-    LPVOID      GetPreReservedStartAddress();
-    LPVOID      GetPreReservedEndAddress();
 };
 
 /*
@@ -39,20 +35,41 @@ public:
 #else // _M_X64_OR_ARM64
     static const uint PreReservedAllocationSegmentCount = 4096; //(4096 * 64K) == 256MB, if 64k is the AllocationGranularity
 #endif
-
+#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+    static const unsigned MaxPreReserveSegment = 6;
+#endif
 public:
     PreReservedVirtualAllocWrapper();
-    BOOL Shutdown();
+    void        Shutdown();
     LPVOID      Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
-    BOOL        Free(LPVOID lpAddress,  size_t dwSize, DWORD dwFreeType);
-    bool        IsPreReservedRegionPresent();
+    BOOL        Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType);
+
     bool        IsInRange(void * address);
-    LPVOID      GetPreReservedStartAddress();
+    LPVOID      EnsurePreReservedRegion();
+
     LPVOID      GetPreReservedEndAddress();
+
+#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+    static int  NumPreReservedSegment() { return numPreReservedSegment; }
+#endif
+
+#if DBG_DUMP || defined(ENABLE_IR_VIEWER)
+    bool        IsPreReservedEndAddress(LPVOID address)
+    {
+        return IsPreReservedRegionPresent() && address == GetPreReservedEndAddress();
+    }
+#endif
 private:
+    LPVOID      EnsurePreReservedRegionInternal();
+    bool        IsPreReservedRegionPresent();
+    LPVOID      GetPreReservedStartAddress();
     BVStatic<PreReservedAllocationSegmentCount>     freeSegments;
     LPVOID                                          preReservedStartAddress;
     CriticalSection                                 cs;
+
+#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+    static uint  numPreReservedSegment;
+#endif
 };
 
 #if defined(ENABLE_JIT_CLAMP)

+ 0 - 4
lib/Runtime/Base/Constants.h

@@ -111,10 +111,6 @@ namespace Js
         static const unsigned MaxProcessJITCodeHeapSize = 1024 * 1024 * 1024;
 #endif
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-        static const unsigned MaxThreadContextsWithPreReserveSegment = 6;
-#endif
-
         static const PBYTE StackLimitForScriptInterrupt;
 
 

+ 1 - 1
lib/Runtime/Base/FunctionBody.cpp

@@ -2867,7 +2867,7 @@ namespace Js
     BOOL FunctionBody::IsNativeOriginalEntryPoint() const
     {
 #if ENABLE_NATIVE_CODEGEN
-        return IsNativeFunctionAddr(this->GetScriptContext(), this->originalEntryPoint);
+        return this->GetScriptContext()->IsNativeAddress(this->originalEntryPoint);
 #else
         return false;
 #endif

+ 7 - 23
lib/Runtime/Base/ScriptContext.cpp

@@ -1184,13 +1184,13 @@ namespace Js
         }
 
 #if DYNAMIC_INTERPRETER_THUNK
-        interpreterThunkEmitter = HeapNew(InterpreterThunkEmitter, this->GetThreadContext()->GetAllocationPolicyManager(),
-            SourceCodeAllocator(), Js::InterpreterStackFrame::InterpreterThunk);
+        interpreterThunkEmitter = HeapNew(InterpreterThunkEmitter, SourceCodeAllocator(), this->GetThreadContext()->GetThunkPageAllocators(), 
+            Js::InterpreterStackFrame::InterpreterThunk);
 #endif
 
 #ifdef ASMJS_PLAT
-        asmJsInterpreterThunkEmitter = HeapNew(InterpreterThunkEmitter, this->GetThreadContext()->GetAllocationPolicyManager(),
-            SourceCodeAllocator(), Js::InterpreterStackFrame::InterpreterAsmThunk);
+        asmJsInterpreterThunkEmitter = HeapNew(InterpreterThunkEmitter, SourceCodeAllocator(), this->GetThreadContext()->GetThunkPageAllocators(),
+            Js::InterpreterStackFrame::InterpreterAsmThunk);
 #endif
 
         JS_ETW(EtwTrace::LogScriptContextLoadEvent(this));
@@ -4291,7 +4291,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
     }
 
     void
-        ScriptContext::SetLastUtcTimeFromStr(JavascriptString * str, double value)
+    ScriptContext::SetLastUtcTimeFromStr(JavascriptString * str, double value)
     {
             lastUtcTimeFromStr = value;
             cache->lastUtcTimeFromStrString = str;
@@ -4300,23 +4300,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
 #if ENABLE_NATIVE_CODEGEN
     BOOL ScriptContext::IsNativeAddress(void * codeAddr)
     {
-        PreReservedVirtualAllocWrapper *preReservedVirtualAllocWrapper = this->threadContext->GetPreReservedVirtualAllocator();
-        if (preReservedVirtualAllocWrapper->IsPreReservedRegionPresent())
-        {
-            if (preReservedVirtualAllocWrapper->IsInRange(codeAddr))
-            {
-                Assert(!this->IsDynamicInterpreterThunk(codeAddr));
-                return true;
-            }
-            else if (this->threadContext->IsAllJITCodeInPreReservedRegion())
-            {
-                return false;
-            }
-        }
-
-        // Try locally first and then all script context on the thread
-        //Slow path
-        return IsNativeFunctionAddr(this, codeAddr) || this->threadContext->IsNativeAddress(codeAddr);
+        return this->GetThreadContext()->IsNativeAddress(codeAddr);
     }
 #endif
 
@@ -4534,7 +4518,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
 
     BOOL ScriptContext::IsDynamicInterpreterThunk(void* address)
     {
-        return this->interpreterThunkEmitter->IsInRange(address);
+        return this->interpreterThunkEmitter->IsInHeap(address);
     }
 
     void ScriptContext::ReleaseDynamicInterpreterThunk(BYTE* address, bool addtoFreeList)

+ 2 - 1
lib/Runtime/Base/ScriptContext.h

@@ -715,8 +715,9 @@ private:
 
         // DisableJIT-TODO: Switch this to Dynamic thunk ifdef instead
 #if ENABLE_NATIVE_CODEGEN
+#if DYNAMIC_INTERPRETER_THUNK
         InterpreterThunkEmitter* interpreterThunkEmitter;
-
+#endif
         BackgroundParser *backgroundParser;
 #ifdef ASMJS_PLAT
         InterpreterThunkEmitter* asmJsInterpreterThunkEmitter;

+ 26 - 42
lib/Runtime/Base/ThreadContext.cpp

@@ -70,10 +70,6 @@ ThreadContext * ThreadContext::globalListFirst = nullptr;
 ThreadContext * ThreadContext::globalListLast = nullptr;
 uint ThreadContext::activeScriptSiteCount = 0;
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-uint ThreadContext::numOfThreadContextsWithPreReserveSegment = 0;
-#endif
-
 const Js::PropertyRecord * const ThreadContext::builtInPropertyRecords[] =
 {
     Js::BuiltInPropertyRecords::EMPTY,
@@ -95,6 +91,12 @@ ThreadContext::RecyclableData::RecyclableData(Recycler *const recycler) :
 {
 }
 
+#if PDATA_ENABLED
+#define ALLOC_XDATA (true)
+#else
+#define ALLOC_XDATA (false)
+#endif
+
 ThreadContext::ThreadContext(AllocationPolicyManager * allocationPolicyManager, JsUtil::ThreadService::ThreadServiceCallback threadServiceCallback, bool enableExperimentalFeatures) :
     currentThreadId(::GetCurrentThreadId()),
     stackLimitForCurrentThread(0),
@@ -164,6 +166,10 @@ ThreadContext::ThreadContext(AllocationPolicyManager * allocationPolicyManager,
     entryPointToBuiltInOperationIdCache(&threadAlloc, 0),
 #if ENABLE_NATIVE_CODEGEN
     codeGenNumberThreadAllocator(nullptr),
+#if DYNAMIC_INTERPRETER_THUNK || defined(ASMJS_PLAT)
+    thunkPageAllocators(allocationPolicyManager, /* allocXData */ false, /* virtualAllocator */ nullptr),
+#endif
+    codePageAllocators(allocationPolicyManager, ALLOC_XDATA, GetPreReservedVirtualAllocator()),
 #endif
     dynamicObjectEnumeratorCacheMap(&HeapAllocator::Instance, 16),
     //threadContextFlags(ThreadContextFlagNoFlag),
@@ -312,24 +318,12 @@ void ThreadContext::GlobalInitialize()
     }
 }
 
-void ThreadContext::IncrementThreadContextsWithPreReservedSegment()
-{
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-    InterlockedIncrement(&ThreadContext::numOfThreadContextsWithPreReserveSegment);
-#endif
-}
-
+#if ENABLE_NATIVE_CODEGEN
 void ThreadContext::ReleasePreReservedSegment()
 {
-    BOOL success = preReservedVirtualAllocator.Shutdown();
-    if (success)
-    {
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-        Assert(numOfThreadContextsWithPreReserveSegment > 0);
-        InterlockedDecrement(&numOfThreadContextsWithPreReserveSegment);
-#endif
-    }
+    preReservedVirtualAllocator.Shutdown();
 }
+#endif
 
 ThreadContext::~ThreadContext()
 {
@@ -512,7 +506,9 @@ ThreadContext::~ThreadContext()
 #endif
 #endif
 
+#if ENABLE_NATIVE_CODEGEN
     ReleasePreReservedSegment();
+#endif
 }
 
 void
@@ -1470,19 +1466,6 @@ ThreadContext::SetStackLimitForCurrentThread(PBYTE limit)
     this->stackLimitForCurrentThread = limit;
 }
 
-bool
-ThreadContext::CanPreReserveSegmentForCustomHeap()
-{
-#if _M_IX86 && _CONTROL_FLOW_GUARD
-    return numOfThreadContextsWithPreReserveSegment <= Js::Constants::MaxThreadContextsWithPreReserveSegment;
-#elif _M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-    return true;
-#else
-    return false;
-#endif
-}
-
-
 __declspec(noinline) //Win8 947081: might use wrong _AddressOfReturnAddress() if this and caller are inlined
 bool
 ThreadContext::IsStackAvailable(size_t size)
@@ -3477,18 +3460,19 @@ void DumpRecyclerObjectGraph()
 #endif
 
 #if ENABLE_NATIVE_CODEGEN
-BOOL ThreadContext::IsNativeAddress(void *pCodeAddr)
+BOOL ThreadContext::IsNativeAddress(void * pCodeAddr)
 {
-    for (Js::ScriptContext *scriptContext = this->scriptContextList;
-        scriptContext;
-        scriptContext = scriptContext->next)
+    PreReservedVirtualAllocWrapper *preReservedVirtualAllocWrapper = this->GetPreReservedVirtualAllocator();
+    if (preReservedVirtualAllocWrapper->IsInRange(pCodeAddr))
     {
-        if (IsNativeFunctionAddr(scriptContext, pCodeAddr))
-        {
-            return TRUE;
-        }
-    };
-
+        return TRUE;
+    }
+    
+    if (!this->IsAllJITCodeInPreReservedRegion())
+    {
+        CustomHeap::CodePageAllocators::AutoLock autoLock(&this->codePageAllocators);
+        return this->codePageAllocators.IsInNonPreReservedPageAllocator(pCodeAddr);
+    }
     return FALSE;
 }
 #endif

+ 15 - 11
lib/Runtime/Base/ThreadContext.h

@@ -372,8 +372,9 @@ public:
         WorkerThread(HANDLE handle = nullptr) :threadHandle(handle){};
     };
 
+#if ENABLE_NATIVE_CODEGEN
     void ReleasePreReservedSegment();
-    void IncrementThreadContextsWithPreReservedSegment();
+#endif
 
     void SetCurrentThreadId(DWORD threadId) { this->currentThreadId = threadId; }
     DWORD GetCurrentThreadId() const { return this->currentThreadId; }
@@ -416,8 +417,6 @@ public:
     }
 #endif
 
-    bool CanPreReserveSegmentForCustomHeap();
-
 #if ENABLE_NATIVE_CODEGEN
     // used by inliner. Maps Simd FuncInfo (library func) to equivalent opcode.
     typedef JsUtil::BaseDictionary<Js::FunctionInfo *, Js::OpCode, ArenaAllocator> FuncInfoToOpcodeMap;
@@ -452,10 +451,6 @@ private:
     PBYTE GetStackLimitForCurrentThread() const;
     void SetStackLimitForCurrentThread(PBYTE limit);
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
-    static uint numOfThreadContextsWithPreReserveSegment;
-#endif
-
     // The current heap enumeration object being used during enumeration.
     IActiveScriptProfilerHeapEnum* heapEnum;
 
@@ -591,8 +586,6 @@ private:
     AllocationPolicyManager * allocationPolicyManager;
 
     JsUtil::ThreadService threadService;
-    PreReservedVirtualAllocWrapper preReservedVirtualAllocator;
-
     uint callRootLevel;
 
     // The thread page allocator is used by the recycler and need the background page queue
@@ -641,6 +634,11 @@ private:
     JsUtil::JobProcessor *jobProcessor;
     Js::Var * bailOutRegisterSaveSpace;
     CodeGenNumberThreadAllocator * codeGenNumberThreadAllocator;
+    PreReservedVirtualAllocWrapper preReservedVirtualAllocator;
+#if DYNAMIC_INTERPRETER_THUNK || defined(ASMJS_PLAT)
+    CustomHeap::CodePageAllocators thunkPageAllocators;
+#endif
+    CustomHeap::CodePageAllocators codePageAllocators;
 #endif
 
     RecyclerRootPtr<RecyclableData> recyclableData;
@@ -783,7 +781,14 @@ public:
     PageAllocator * GetPageAllocator() { return &pageAllocator; }
 
     AllocationPolicyManager * GetAllocationPolicyManager() { return allocationPolicyManager; }
+
+#if ENABLE_NATIVE_CODEGEN
     PreReservedVirtualAllocWrapper * GetPreReservedVirtualAllocator() { return &preReservedVirtualAllocator; }
+#if DYNAMIC_INTERPRETER_THUNK || defined(ASMJS_PLAT)
+    CustomHeap::CodePageAllocators * GetThunkPageAllocators() { return &thunkPageAllocators; }
+#endif
+    CustomHeap::CodePageAllocators * GetCodePageAllocators() { return &codePageAllocators; }
+#endif // ENABLE_NATIVE_CODEGEN
 
     void ResetIsAllJITCodeInPreReservedRegion() { isAllJITCodeInPreReservedRegion = false; }
     bool IsAllJITCodeInPreReservedRegion() { return isAllJITCodeInPreReservedRegion; }
@@ -1101,9 +1106,8 @@ public:
     void RegisterCodeGenRecyclableData(Js::CodeGenRecyclableData *const codeGenRecyclableData);
     void UnregisterCodeGenRecyclableData(Js::CodeGenRecyclableData *const codeGenRecyclableData);
 #if ENABLE_NATIVE_CODEGEN
-    BOOL IsNativeAddress(void *pCodeAddr);
+    BOOL IsNativeAddress(void * pCodeAddr);
     JsUtil::JobProcessor *GetJobProcessor();
-    static void CloseSharedJobProcessor(const bool waitForThread);
     Js::Var * GetBailOutRegisterSaveSpace() const { return bailOutRegisterSaveSpace; }
     CodeGenNumberThreadAllocator * GetCodeGenNumberThreadAllocator() const
     {

+ 1 - 1
lib/Runtime/Language/DynamicProfileInfo.cpp

@@ -336,7 +336,7 @@ namespace Js
         }
         else
         {
-            Assert(directEntryPoint == ProfileEntryThunk || IsNativeFunctionAddr(functionBody->GetScriptContext(), directEntryPoint));
+            Assert(directEntryPoint == ProfileEntryThunk || functionBody->GetScriptContext()->IsNativeAddress(directEntryPoint));
             Assert(functionBody->HasExecutionDynamicProfileInfo());
         }