Kaynağa Gözat

Change CustomHeap to on demand update on whether a page should be in the full list or not.

Now that the page allocator for the code pages are shared amongst multiple CustomHeap, the state of the secondary allocator may change by other CustomHeap, making it impossible to maintain that full list are block that are really full (no more allocable space in the page and/or no more secondary space for the segment).  So, walking all the block to and move the pages between full list and the bucket list if the current CustomHeap when secondary allocator state changes, do it when we allocate on demand.

In PreReservedVirtualAllocWrapper::IsInRange, only check if the addres is committed when it is in the preserved region. We don't care otherwise.
Curtis Man 10 yıl önce
ebeveyn
işleme
4d3e524636

+ 9 - 12
lib/Backend/EmitBuffer.cpp

@@ -68,13 +68,6 @@ EmitBufferManager<SyncObject>::FreeAllocations(bool release)
     EmitBufferAllocation * allocation = this->allocations;
     while (allocation != nullptr)
     {
-        BOOL isFreed;
-        // In case of ThunkEmitter the script context would be null and we don't want to track that as code size.
-        if (!release && (scriptContext != nullptr) && allocation->recorded)
-        {
-            this->scriptContext->GetThreadContext()->SubCodeSize(allocation->bytesCommitted);
-            allocation->recorded = false;
-        }
 #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
         if(CONFIG_FLAG(CheckEmitBufferPermissions))
         {
@@ -83,21 +76,25 @@ EmitBufferManager<SyncObject>::FreeAllocations(bool release)
 #endif
         if (release)
         {
-            isFreed = this->allocationHeap.Free(allocation->allocation);
+            this->allocationHeap.Free(allocation->allocation);
         }
-        else
+        else if ((scriptContext != nullptr) && allocation->recorded)
         {
-            isFreed = this->allocationHeap.Decommit(allocation->allocation);
+            // In case of ThunkEmitter the script context would be null and we don't want to track that as code size.
+            this->scriptContext->GetThreadContext()->SubCodeSize(allocation->bytesCommitted);
+            allocation->recorded = false;
         }
 
-        Assert(isFreed);
         allocation = allocation->nextAllocation;
     }
     if (release)
     {
         this->allocations = nullptr;
     }
-
+    else
+    {
+        this->allocationHeap.DecommitAll();
+    }
 }
 
 template <typename SyncObject>

+ 146 - 196
lib/Common/Memory/CustomHeap.cpp

@@ -22,7 +22,8 @@ namespace CustomHeap
 
 Heap::Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators):
     auxiliaryAllocator(alloc),
-    codePageAllocators(codePageAllocators)
+    codePageAllocators(codePageAllocators),
+    lastSecondaryAllocStateChangedCount(0)
 #if DBG_DUMP
     , freeObjectSize(0)
     , totalAllocationSize(0)
@@ -61,13 +62,13 @@ void Heap::FreeAll()
     FreeDecommittedLargeObjects();
 }
 
-bool Heap::Free(__in Allocation* object)
+void Heap::Free(__in Allocation* object)
 {
     Assert(object != nullptr);
 
     if (object == nullptr)
     {
-        return false;
+        return;
     }
 
     BucketId bucket = (BucketId) GetBucketForSize(object->size);
@@ -80,13 +81,13 @@ bool Heap::Free(__in Allocation* object)
             FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
         }
 #endif
-        if (object->largeObjectAllocation.isDecommitted)
+        if (!object->largeObjectAllocation.isDecommitted)
         {
-            return true;
+            FreeLargeObject(object);
         }
-
-        return FreeLargeObject<false>(object);
+        return;
     }
+
 #if PDATA_ENABLED
     if(!object->xdata.IsFreed())
     {
@@ -94,77 +95,49 @@ bool Heap::Free(__in Allocation* object)
     }
 #endif
 
-    if (object->page->isDecommitted)
+    if (!object->page->isDecommitted)
     {
-        return true;
+        FreeAllocation(object);
     }
-
-    return FreeAllocation(object);
 }
 
-bool Heap::Decommit(__in Allocation* object)
+void Heap::DecommitAll()
 {
     // This function doesn't really touch the page allocator data structure.
     // DecommitPages is merely a wrapper for VirtualFree
     // So no need to take the critical section to synchronize
-    Assert(object != nullptr);
 
-    if (object == nullptr)
-    {
-        return false;
-    }
-
-    Assert(object->isAllocationUsed);
-
-    BucketId bucket = (BucketId) GetBucketForSize(object->size);
-
-    if (bucket == BucketId::LargeObjectList)
-    {
-        Assert(!object->largeObjectAllocation.isDecommitted);
+    DListBase<Allocation>::EditingIterator i(&this->largeObjectAllocations);
+    while (i.Next())
+    { 
+        Allocation& allocation = i.Data();
+        Assert(!allocation.largeObjectAllocation.isDecommitted);
 
-        if (!object->largeObjectAllocation.isDecommitted)
-        {
-#if PDATA_ENABLED
-            if(!object->xdata.IsFreed())
-            {
-                FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
-            }
-#endif
-            this->codePageAllocators->DecommitPages(object->address, object->GetPageCount(), object->largeObjectAllocation.segment);
-            this->largeObjectAllocations.MoveElementTo(object, &this->decommittedLargeObjects);
-            object->largeObjectAllocation.isDecommitted = true;
-            return true;
-        }
+        this->codePageAllocators->DecommitPages(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
+        i.MoveCurrentTo(&this->decommittedLargeObjects);
+        allocation.largeObjectAllocation.isDecommitted = true;
     }
 
-    // Skip asserting here- multiple objects could be on the same page
-    // Review: should we really decommit here or decommit only when all objects
-    // on the page have been decommitted?
-
-    if (!object->page->isDecommitted)
+    for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
     {
-#if PDATA_ENABLED
-        if(!object->xdata.IsFreed())
+        FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter1)
         {
-            FreeXdata(&object->xdata, object->page->segment);
+            Assert(page.inFullList);
+            this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
+            bucketIter1.MoveCurrentTo(&(this->decommittedPages));
+            page.isDecommitted = true;
         }
-#endif
-        bucket = object->page->currentBucket;
-
-        this->codePageAllocators->DecommitPages(object->page->address, 1, object->page->segment);
+        NEXT_DLISTBASE_ENTRY_EDITING;
 
-        if (this->ShouldBeInFullList(object->page))
-        {
-            this->fullPages[bucket].MoveElementTo(object->page, &this->decommittedPages);
-        }
-        else
+        FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->buckets[bucket]), bucketIter2)
         {
-            this->buckets[bucket].MoveElementTo(object->page, &this->decommittedPages);
+            Assert(!page.inFullList);
+            this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
+            bucketIter2.MoveCurrentTo(&(this->decommittedPages));
+            page.isDecommitted = true;
         }
-        object->page->isDecommitted = true;
+        NEXT_DLISTBASE_ENTRY_EDITING;
     }
-
-    return true;
 }
 
 bool Heap::IsInHeap(DListBase<Page> const& bucket, __in void * address)
@@ -213,6 +186,17 @@ bool Heap::IsInHeap(__in void* address)
     return IsInHeap(buckets, address) || IsInHeap(fullPages, address) || IsInHeap(largeObjectAllocations, address);
 }
 
+Page * Heap::GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment)
+{
+    if (!this->buckets[bucket].Empty())
+    {
+        Assert(!this->buckets[bucket].Head().inFullList);
+        return &this->buckets[bucket].Head();
+    }
+
+    return FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
+}
+
 /*
  * Algorithm:
  *   - Find bucket
@@ -229,15 +213,17 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
     // Round up to power of two to allocate, and figure out which bucket to allocate in
     size_t bytesToAllocate = PowerOf2Policy::GetSize(bytes);
     BucketId bucket = (BucketId) GetBucketForSize(bytesToAllocate);
-    Allocation* allocation;
 
     if (bucket == BucketId::LargeObjectList)
     {
-        allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+        Allocation * allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
 #if defined(DBG)
-        MEMORY_BASIC_INFORMATION memBasicInfo;
-        size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
-        Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+        if (allocation)
+        {
+            MEMORY_BASIC_INFORMATION memBasicInfo;
+            size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+            Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+        }
 #endif
         return allocation;
     }
@@ -245,35 +231,37 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
     VerboseHeapTrace(L"Bucket is %d\n", bucket);
     VerboseHeapTrace(L"Requested: %d bytes. Allocated: %d bytes\n", bytes, bytesToAllocate);
 
-    Page* page = nullptr;
-    if(!this->buckets[bucket].Empty())
+    do
     {
-        page = &this->buckets[bucket].Head();
-    }
-    else
-    {
-        page = FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
-    }
+        Page* page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
+        if (page == nullptr && UpdateFullPages())
+        {
+            page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
+        }
 
-    if(page == nullptr)
-    {
-        page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
-    }
+        if (page == nullptr)
+        {
+            page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+        }
 
-    // Out of memory
-    if (page == nullptr)
-    {
-        return nullptr;
-    }
+        // Out of memory
+        if (page == nullptr)
+        {
+            return nullptr;
+        }
 
 #if defined(DBG)
-    MEMORY_BASIC_INFORMATION memBasicInfo;
-    size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
-    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+        MEMORY_BASIC_INFORMATION memBasicInfo;
+        size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
+        Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
 #endif
 
-    allocation = AllocInPage(page, bytesToAllocate, pdataCount, xdataSize);
-    return allocation;
+        Allocation* allocation = nullptr;
+        if (AllocInPage(page, bytesToAllocate, pdataCount, xdataSize, &allocation))
+        {
+            return allocation;
+        }
+    } while (true);
 }
 
 BOOL Heap::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage)
@@ -442,20 +430,6 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
 
 #if PDATA_ENABLED
     allocation->xdata = xdata;
-
-    if (((Segment*)segment)->GetSecondaryAllocator() != nullptr && !((Segment*)segment)->CanAllocSecondary())
-    {
-        TransferPages(
-            [&](Page* currentPage) -> bool
-        {
-            bool transfer = currentPage->segment == segment;
-            if(transfer)
-            {
-                VerboseHeapTrace(L"Moving page from bucket %d to full list because no XDATA allocations can be made\n", currentPage->currentBucket);
-            }
-            return transfer;
-        } , this->buckets, this->fullPages);
-    }
 #endif
     return allocation;
 }
@@ -500,39 +474,47 @@ DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
     }   
 }
 
-template <bool freeAll>
-bool Heap::FreeLargeObject(Allocation* address)
+void Heap::FreeLargeObjects()
 {
     CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
     FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
     {
-        if (address == (&allocation) || freeAll)
-        {
-            EnsureAllocationWriteable(&allocation);
+        EnsureAllocationWriteable(&allocation);
 #if PDATA_ENABLED
-            Assert(allocation.xdata.IsFreed());
+        Assert(allocation.xdata.IsFreed());
 #endif
-            this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
+        this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
 
-            largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
-            if (!freeAll) return true;
-        }
+        largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
     }
     NEXT_DLISTBASE_ENTRY_EDITING;
+}
 
-    // If we're not freeing everything, and we hit this point, that means that
-    // something that wasn't in the large object list was asked to be free.
-    // So, assert that we're freeing everything if we get to this point.
-    Assert(freeAll);
-    return false;
+void Heap::FreeLargeObject(Allocation* allocation)
+{
+    CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+
+    EnsureAllocationWriteable(allocation);
+#if PDATA_ENABLED
+    Assert(allocation->xdata.IsFreed());
+#endif
+    this->codePageAllocators->Release(allocation->address, allocation->GetPageCount(), allocation->largeObjectAllocation.segment);
+
+    this->largeObjectAllocations.RemoveElement(this->auxiliaryAllocator, allocation);
 }
 
 #pragma endregion
 
 #pragma region "Page methods"
 
-Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize)
+bool Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocationOut)
 {
+    Allocation * allocation = AnewNoThrowStruct(this->auxiliaryAllocator, Allocation);
+    if (allocation == nullptr)
+    {
+        return true;
+    }
+
     Assert(Math::IsPow2((int32)bytes));
 
     uint length = GetChunkSizeForBytes(bytes);
@@ -545,25 +527,26 @@ Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushor
     if(pdataCount > 0)
     {
         CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
-        if(!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
+        if (this->ShouldBeInFullList(page))
         {
-            return nullptr;
+            Adelete(this->auxiliaryAllocator, allocation);
+            // If we run out of XData space with the segment, move the page to the full page list, and return false to try the next page.
+            BucketId bucket = page->currentBucket;
+            VerboseHeapTrace(L"Moving page from bucket %d to full list\n", bucket);
+
+            Assert(!page->inFullList);
+            this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
+            page->inFullList = true;
+            return false;
         }
-    }
-#endif
 
-    Allocation* allocation = AnewNoThrowStruct(this->auxiliaryAllocator, Allocation);
-    if (allocation == nullptr)
-    {
-#if PDATA_ENABLED
-        if(pdataCount > 0)
+        if (!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
         {
-            CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
-            this->codePageAllocators->ReleaseSecondary(xdata, page->segment);
+            Adelete(this->auxiliaryAllocator, allocation);
+            return true;
         }
-#endif
-        return nullptr;
     }
+#endif
 
 #if DBG
     allocation->isAllocationUsed = false;
@@ -587,34 +570,22 @@ Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushor
 #endif
     VerboseHeapTrace(L"\n");
 
-
     if (this->ShouldBeInFullList(page))
     {
         BucketId bucket = page->currentBucket;
         VerboseHeapTrace(L"Moving page from bucket %d to full list\n", bucket);
 
+        Assert(!page->inFullList);
         this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
+        page->inFullList = true;
     }
 
 #if PDATA_ENABLED
     allocation->xdata = xdata;
-
-    if(((Segment*)page->segment)->GetSecondaryAllocator() != nullptr && !((Segment*)page->segment)->CanAllocSecondary())
-    {
-        TransferPages(
-            [&](Page* currentPage) -> bool
-        {
-            bool transfer = currentPage->segment == page->segment;
-            if(transfer)
-            {
-                VerboseHeapTrace(L"Moving page from bucket %d to full list because no XDATA allocations can be made\n", page->currentBucket);
-            }
-            return transfer;
-        } , this->buckets, this->fullPages);
-    }
 #endif
 
-    return allocation;
+    *allocationOut = allocation;
+    return true;
 }
 
 Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
@@ -677,10 +648,13 @@ Page* Heap::AddPageToBucket(Page* page, BucketId bucket, bool wasFull)
     if (wasFull)
     {
         #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
+        Assert(page->inFullList);
         this->fullPages[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
+        page->inFullList = false;
     }
     else
     {
+        Assert(!page->inFullList);
         #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
         this->buckets[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
     }
@@ -709,6 +683,7 @@ Page* Heap::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages
         #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
         FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
         {
+            Assert(!pageInBucket.inFullList);
             if (findPreReservedHeapPages && !this->codePageAllocators->IsPreReservedSegment(pageInBucket.segment))
             {
                 //Find only pages that are pre-reserved using preReservedHeapPageAllocator
@@ -732,22 +707,6 @@ Page* Heap::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages
     return nullptr;
 }
 
-void Heap::RemovePageFromFullList(Page* pageToRemove)
-{
-    FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->fullPages[pageToRemove->currentBucket], pageIter)
-    {
-        if (&page == pageToRemove)
-        {
-            pageIter.RemoveCurrent(this->auxiliaryAllocator);
-            return;
-        }
-    }
-    NEXT_DLISTBASE_ENTRY_EDITING;
-
-    // Page not found- why?
-    Assert(false);
-}
-
 BVIndex Heap::GetIndexInPage(__in Page* page, __in char* address)
 {
     Assert(page->address <= address && address < page->address + AutoSystemInfo::PageSize);
@@ -778,7 +737,7 @@ bool Heap::FreeAllocation(Allocation* object)
     }
 #endif
 
-    if (this->ShouldBeInFullList(page))
+    if (page->inFullList)
     {
         VerboseHeapTrace(L"Recycling page 0x%p because address 0x%p of size %d was freed\n", page->address, object->address, object->size);
 
@@ -797,7 +756,7 @@ bool Heap::FreeAllocation(Allocation* object)
 
             void* pageAddress = page->address;
 
-            RemovePageFromFullList(page);
+            this->fullPages[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);            
 
             // The page is not in any bucket- just update the stats, free the allocation
             // and dump the page- we don't need to update free object size since the object
@@ -852,28 +811,7 @@ bool Heap::FreeAllocation(Allocation* object)
 
     if (page->IsEmpty())
     {
-        // Find the page and remove it from the buckets- the page is going to be freed anyway
-        FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[page->currentBucket], pageIter)
-        {
-            // Templatize this to remove branches/make code more compact?
-            if (&pageInBucket == page)
-            {
-                VerboseHeapTrace(L"Removing page in bucket %d\n", page->currentBucket);
-                {
-                    CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
-                    this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
-                }
-                pageIter.RemoveCurrent(this->auxiliaryAllocator);
-
-#if DBG_DUMP
-                this->freeObjectSize -= pageSize;
-                this->totalAllocationSize -= pageSize;
-#endif
-                return true;
-            }
-        }
-        NEXT_DLISTBASE_ENTRY_EDITING;
-
+        this->buckets[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
         return false;
     }
     else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
@@ -954,23 +892,35 @@ void Heap::FreeBuckets(bool freeOnlyEmptyPages)
 #endif
 }
 
-#if PDATA_ENABLED
-void Heap::FreeXdata(XDataAllocation* xdata, void* segment)
+bool Heap::UpdateFullPages()
 {
-    Assert(!xdata->IsFreed());
-
-    if(!((Segment*)segment)->CanAllocSecondary())
+    bool updated = false;
+    if (this->codePageAllocators->HasSecondaryAllocStateChanged(&lastSecondaryAllocStateChangedCount))
     {
-        this->TransferPages([&](Page* currentPage) -> bool
+        CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
+        for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
         {
-            bool transfer = currentPage->segment == segment && !currentPage->HasNoSpace();
-            if(transfer)
+            FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter)
             {
-                VerboseHeapTrace(L"Recycling page 0x%p because XDATA was freed\n", currentPage->address);
+                Assert(page.inFullList);
+                if (!this->ShouldBeInFullList(&page))
+                {
+                    VerboseHeapTrace(L"Recycling page 0x%p because XDATA was freed\n", page.address);
+                    bucketIter.MoveCurrentTo(&(this->buckets[bucket]));
+                    page.inFullList = false;
+                    updated = true;
+                }
             }
-            return transfer;
-        }, this->fullPages, this->buckets);
+            NEXT_DLISTBASE_ENTRY_EDITING;
+        }
     }
+    return updated;
+}
+
+#if PDATA_ENABLED
+void Heap::FreeXdata(XDataAllocation* xdata, void* segment)
+{
+    Assert(!xdata->IsFreed()); 
     {
         CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
         this->codePageAllocators->ReleaseSecondary(*xdata, segment);

+ 43 - 47
lib/Common/Memory/CustomHeap.h

@@ -35,14 +35,11 @@ enum BucketId
 
 BucketId GetBucketForSize(size_t bytes);
 
-struct PageAllocatorAllocation
+struct Page
 {
-    bool isDecommitted;
-};
-
-struct Page: public PageAllocatorAllocation
-{
-    void* segment;
+    bool         inFullList;
+    bool         isDecommitted;
+    void*        segment;
     BVUnit       freeBitVector;
     char*        address;
     BucketId     currentBucket;
@@ -62,14 +59,14 @@ struct Page: public PageAllocatorAllocation
         return freeBitVector.FirstStringOfOnes(targetBucket + 1) != BVInvalidIndex;
     }
 
-    Page(__in char* address, void* segment, BucketId bucket):
-      address(address),
-      segment(segment),
-      currentBucket(bucket),
-      freeBitVector(0xFFFFFFFF)
+    Page(__in char* address, void* segment, BucketId bucket) :
+        address(address),
+        segment(segment),
+        currentBucket(bucket),
+        freeBitVector(0xFFFFFFFF),
+        isDecommitted(false),
+        inFullList(false)
     {
-        // Initialize PageAllocatorAllocation fields
-        this->isDecommitted = false;
     }
 
     // Each bit in the bit vector corresponds to 128 bytes of memory
@@ -83,9 +80,10 @@ struct Allocation
     union
     {
         Page*  page;
-        struct: PageAllocatorAllocation
+        struct
         {
             void* segment;
+            bool isDecommitted;
         } largeObjectAllocation;
     };
 
@@ -145,7 +143,8 @@ public:
     CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, PreReservedVirtualAllocWrapper * virtualAllocator) :
         pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, nullptr),
         preReservedHeapPageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator),
-        cs(4000)
+        cs(4000),
+        secondaryAllocStateChangedCount(0)
     {
 #if DBG
         this->preReservedHeapPageAllocator.ClearConcurrentThreadId();
@@ -271,12 +270,22 @@ public:
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
-            this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ReleaseSecondary(allocation, segment);
+            secondaryAllocStateChangedCount += (uint)this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ReleaseSecondary(allocation, segment);
         }
         else
         {
-            this->GetPageAllocator<VirtualAllocWrapper>(segment)->ReleaseSecondary(allocation, segment);
+            secondaryAllocStateChangedCount += (uint)this->GetPageAllocator<VirtualAllocWrapper>(segment)->ReleaseSecondary(allocation, segment);
+        }
+    }
+
+    bool HasSecondaryAllocStateChanged(uint * lastSecondaryAllocStateChangedCount)
+    {
+        if (secondaryAllocStateChangedCount != *lastSecondaryAllocStateChangedCount)
+        {
+            *lastSecondaryAllocStateChangedCount = secondaryAllocStateChangedCount;
+            return true;
         }
+        return false;
     }
 
     void DecommitPages(__in char* address, size_t pageCount, void* segment)
@@ -371,6 +380,13 @@ private:
     HeapPageAllocator<VirtualAllocWrapper>               pageAllocator;
     HeapPageAllocator<PreReservedVirtualAllocWrapper>    preReservedHeapPageAllocator;
     CriticalSection cs;
+
+    // Track the number of time a segment's secondary allocate change from full to available to allocate.
+    // So that we know whether CustomHeap to know when to update their "full page"
+    // It is ok to overflow this variable.  All we care is if the state has changed.
+    // If in the unlikely scenario that we do overflow, then we delay the full pages in CustomHeap from
+    // being made available.
+    uint secondaryAllocStateChangedCount;
 };
 
 /*
@@ -388,8 +404,8 @@ public:
     Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators);
 
     Allocation* Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
-    bool Free(__in Allocation* allocation);
-    bool Decommit(__in Allocation* allocation);
+    void Free(__in Allocation* allocation);
+    void DecommitAll();
     void FreeAll();
     bool IsInHeap(__in void* address);
    
@@ -444,14 +460,10 @@ private:
      * Large object methods
      */
     Allocation* AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
+    
+    void FreeLargeObject(Allocation* header);
 
-    template<bool freeAll>
-    bool FreeLargeObject(Allocation* header);
-
-    void FreeLargeObjects()
-    {
-        FreeLargeObject<true>(nullptr);
-    }
+    void FreeLargeObjects();
 
     //Called during Free
     DWORD EnsurePageWriteable(Page* page);
@@ -506,30 +518,13 @@ private:
      * Page methods
      */
     Page*       AddPageToBucket(Page* page, BucketId bucket, bool wasFull = false);
-    Allocation* AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize);
+    bool        AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
     Page*       AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
     Page*       FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages = false);
-
-    template<class Fn>
-    void TransferPages(Fn predicate, DListBase<Page>* fromList, DListBase<Page>* toList)
-    {
-        Assert(fromList != toList);
-
-        for(int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
-        {
-            FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(fromList[bucket]), bucketIter)
-            {
-                if(predicate(&page))
-                {
-                    bucketIter.MoveCurrentTo(&(toList[bucket]));
-                }
-            }
-            NEXT_DLISTBASE_ENTRY_EDITING;
-        }
-    }
+    bool        UpdateFullPages();
+    Page *      GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment);
 
     BVIndex     GetIndexInPage(__in Page* page, __in char* address);
-    void        RemovePageFromFullList(Page* page);
 
 
     bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
@@ -562,6 +557,7 @@ private:
     DListBase<Page>        decommittedPages;
     DListBase<Allocation>  decommittedLargeObjects;
 
+    uint lastSecondaryAllocStateChangedCount;
 #if DBG
     bool inDtor;
 #endif

+ 3 - 1
lib/Common/Memory/PageAllocator.cpp

@@ -2312,7 +2312,7 @@ bool HeapPageAllocator<T>::AllocSecondary(void* segmentParam, ULONG_PTR function
 }
 
 template<typename T>
-void HeapPageAllocator<T>::ReleaseSecondary(const SecondaryAllocation& allocation, void* segmentParam)
+bool HeapPageAllocator<T>::ReleaseSecondary(const SecondaryAllocation& allocation, void* segmentParam)
 {
     SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
     Assert(allocation.address != nullptr);
@@ -2335,6 +2335,7 @@ void HeapPageAllocator<T>::ReleaseSecondary(const SecondaryAllocation& allocatio
             AssertMsg(fromList == &fullSegments, "Releasing a secondary allocator should make a state change only if the segment was originally in the full list");
             AssertMsg(pageSegment->CanAllocSecondary(), "It should be allocate secondary now");
             this->AddFreePageCount(pageSegment->GetFreePageCount());
+            return true;
         }
     }
     else
@@ -2342,6 +2343,7 @@ void HeapPageAllocator<T>::ReleaseSecondary(const SecondaryAllocation& allocatio
         Assert(segment->CanAllocSecondary());
         segment->GetSecondaryAllocator()->Release(allocation);
     }
+    return false;
 }
 
 template<typename T>

+ 1 - 1
lib/Common/Memory/PageAllocator.h

@@ -719,7 +719,7 @@ public:
 
     BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
     bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation);
-    void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
+    bool ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
     void TrackDecommittedPages(void * address, uint pageCount, __in void* segment);
     void DecommitPages(__in char* address, size_t pageCount = 1);
 

+ 13 - 8
lib/Common/Memory/VirtualAllocWrapper.cpp

@@ -113,17 +113,22 @@ PreReservedVirtualAllocWrapper::IsInRange(void * address)
     {
         return false;
     }
-#if DBG
-    //Check if the region is in MEM_COMMIT state.
-    MEMORY_BASIC_INFORMATION memBasicInfo;
-    size_t bytes = VirtualQuery(address, &memBasicInfo, sizeof(memBasicInfo));
-    if (bytes == 0 || memBasicInfo.State != MEM_COMMIT)
+
+    if (address >= GetPreReservedStartAddress() && address < GetPreReservedEndAddress())
     {
-        AssertMsg(false, "Memory not committed? Checking for uncommitted address region?");
-    }
+#if DBG
+        //Check if the region is in MEM_COMMIT state.
+        MEMORY_BASIC_INFORMATION memBasicInfo;
+        size_t bytes = VirtualQuery(address, &memBasicInfo, sizeof(memBasicInfo));
+        if (bytes == 0 || memBasicInfo.State != MEM_COMMIT)
+        {
+            AssertMsg(false, "Memory not committed? Checking for uncommitted address region?");
+        }
 #endif
+        return true;
+    }
 
-    return address >= GetPreReservedStartAddress() && address < GetPreReservedEndAddress();
+    return false;
 }
 
 LPVOID