2
0
Эх сурвалжийг харах

[MERGE #985] decommit unusable page(s) in medium heap blocks (bug 5822302)

Merge pull request #985 from leirocks:mediumdecommit1
in medium heap blocks, if the object size is bigger than 1 page, it can cause the whole last 1~3 page(s) never been allocated. when such case is hit, decommit those pages to save memory as well as capture corruption. when returning the pages back to page allocator, we should commit those pages again for reuse, in case of OOM here, just decommit all pages in the heap block and let the page allocator to manage the decommitted pages.
in rescan code, asserting that we never scan those unallocatable pages
Lei Shi 9 жил өмнө
parent
commit
4bad4430e1

+ 72 - 5
lib/Common/Memory/HeapBlock.cpp

@@ -154,6 +154,63 @@ SmallHeapBlockT<TBlockAttributes>::GetPageCount() const
     return TBlockAttributes::PageCount;
 }
 
+template <>
+uint
+SmallHeapBlockT<SmallAllocationBlockAttributes>::GetUnusablePageCount()
+{
+    return 0;
+}
+
+template <>
+void
+SmallHeapBlockT<SmallAllocationBlockAttributes>::ProtectUnusablePages()
+{
+}
+
+template <>
+void
+SmallHeapBlockT<SmallAllocationBlockAttributes>::RestoreUnusablePages()
+{
+}
+
+template <>
+uint
+SmallHeapBlockT<MediumAllocationBlockAttributes>::GetUnusablePageCount()
+{
+    return ((MediumAllocationBlockAttributes::PageCount * AutoSystemInfo::PageSize) % this->objectSize) / AutoSystemInfo::PageSize;
+}
+
+template <>
+void
+SmallHeapBlockT<MediumAllocationBlockAttributes>::ProtectUnusablePages()
+{
+    size_t count = this->GetUnusablePageCount();
+    if (count > 0)
+    {
+        char* startPage = this->address + (MediumAllocationBlockAttributes::PageCount - count) * AutoSystemInfo::PageSize;
+        DWORD oldProtect;
+        BOOL ret = ::VirtualProtect(startPage, count * AutoSystemInfo::PageSize, PAGE_READONLY, &oldProtect);
+        Assert(ret && oldProtect == PAGE_READWRITE);
+
+        ::ResetWriteWatch(startPage, count*AutoSystemInfo::PageSize);
+    }
+}
+
+template <>
+void
+SmallHeapBlockT<MediumAllocationBlockAttributes>::RestoreUnusablePages()
+{
+    size_t count = this->GetUnusablePageCount();
+    if (count > 0)
+    {
+        char* startPage = (char*)this->address + (MediumAllocationBlockAttributes::PageCount - count) * AutoSystemInfo::PageSize;
+        DWORD oldProtect;
+        BOOL ret = ::VirtualProtect(startPage, count * AutoSystemInfo::PageSize, PAGE_READWRITE, &oldProtect);
+        Assert(ret && oldProtect == PAGE_READONLY);
+    }
+}
+
+
 template <class TBlockAttributes>
 void
 SmallHeapBlockT<TBlockAttributes>::ClearObjectInfoList()
@@ -295,7 +352,7 @@ SmallHeapBlockT<TBlockAttributes>::SetPage(__in_ecount_pagesize char * baseAddre
 
     // We use the block type directly here, without the getter so that we can tell on the heap block map,
     // whether the block is a medium block or not
-    if (!recycler->heapBlockMap.SetHeapBlock(this->address, this->GetPageCount(), this, this->heapBlockType, (byte)this->bucketIndex))
+    if (!recycler->heapBlockMap.SetHeapBlock(this->address, this->GetPageCount() - this->GetUnusablePageCount(), this, this->heapBlockType, (byte)this->bucketIndex))
     {
         return FALSE;
     }
@@ -313,6 +370,8 @@ SmallHeapBlockT<TBlockAttributes>::SetPage(__in_ecount_pagesize char * baseAddre
     MemoryBarrier();
 #endif
 
+    this->ProtectUnusablePages();
+
     return TRUE;
 }
 
@@ -334,23 +393,31 @@ SmallHeapBlockT<TBlockAttributes>::ReleasePages(Recycler * recycler)
     char* address = this->address;
 
 #ifdef RECYCLER_FREE_MEM_FILL
-    memset(address, DbgMemFill, AutoSystemInfo::PageSize * this->GetPageCount());
+    memset(address, DbgMemFill, AutoSystemInfo::PageSize * (this->GetPageCount()-this->GetUnusablePageCount()));
 #endif
 
+    if (this->GetUnusablePageCount() > 0)
+    {
+        this->RestoreUnusablePages();
+    }
+
     this->GetPageAllocator(recycler)->ReleasePages(address, this->GetPageCount(), this->GetPageSegment());
 
     this->segment = nullptr;
     this->address = nullptr;
-
 }
 
 template <class TBlockAttributes>
 void
 SmallHeapBlockT<TBlockAttributes>::BackgroundReleasePagesSweep(Recycler* recycler)
 {
-    recycler->heapBlockMap.ClearHeapBlock(address, this->GetPageCount());
+    recycler->heapBlockMap.ClearHeapBlock(address, this->GetPageCount() - this->GetUnusablePageCount());
     char* address = this->address;
 
+    if (this->GetUnusablePageCount() > 0)
+    {
+        this->RestoreUnusablePages();
+    }
     this->GetPageAllocator(recycler)->BackgroundReleasePages(address, this->GetPageCount(), this->GetPageSegment());
 
     this->address = nullptr;
@@ -381,7 +448,7 @@ template <class TBlockAttributes>
 void
 SmallHeapBlockT<TBlockAttributes>::RemoveFromHeapBlockMap(Recycler* recycler)
 {
-    recycler->heapBlockMap.ClearHeapBlock(address, this->GetPageCount());
+    recycler->heapBlockMap.ClearHeapBlock(address, this->GetPageCount() - this->GetUnusablePageCount());
 }
 
 template <class TBlockAttributes>

+ 3 - 0
lib/Common/Memory/HeapBlock.h

@@ -444,6 +444,9 @@ public:
 public:
     ~SmallHeapBlockT();
 
+    void ProtectUnusablePages();
+    void RestoreUnusablePages();
+    uint GetUnusablePageCount();
 
 #ifdef RECYCLER_WRITE_BARRIER
     bool IsWithBarrier() const;

+ 2 - 1
lib/Common/Memory/HeapBlockMap.cpp

@@ -1067,7 +1067,8 @@ HeapBlockMap32::RescanHeapBlockOnOOM(TBlockType* heapBlock, char* pageAddress, H
     // The following assert makes sure that this method is called only once per heap block
     Assert(blockStartAddress == pageAddress);
 
-    for (int i = 0; i < TBlockType::HeapBlockAttributes::PageCount; i++)
+    int inUsePageCount = heapBlock->GetPageCount() - heapBlock->GetUnusablePageCount();
+    for (int i = 0; i < inUsePageCount; i++)
     {
         char* pageAddressToScan = blockStartAddress + (i * AutoSystemInfo::PageSize);
 

+ 3 - 11
lib/Common/Memory/LargeHeapBlock.cpp

@@ -344,17 +344,9 @@ LargeHeapBlock::ReleasePages(Recycler * recycler)
             realPageCount = this->actualPageCount;
             size_t guardPageCount = this->actualPageCount - this->pageCount;
 
-            MEMORY_BASIC_INFORMATION memInfo;
-            VirtualQuery(this->address, &memInfo, sizeof(memInfo));
-            if (::VirtualAlloc(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_COMMIT, memInfo.Protect) == NULL)
-            {
-                // failed to commit the guard page again. decommit all pages
-                RecyclerPageAllocator* pageAllocator = (RecyclerPageAllocator*)this->GetPageAllocator(recycler);
-                pageAllocator->PartialDecommitPages(blockStartAddress, actualPageCount, this->address, this->pageCount, this->segment);
-                RECYCLER_PERF_COUNTER_SUB(LargeHeapBlockPageSize, this->pageCount * AutoSystemInfo::PageSize);
-                this->segment = nullptr;
-                return;
-            }
+            DWORD oldProtect;
+            BOOL ret = ::VirtualProtect(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, PAGE_READWRITE, &oldProtect);
+            Assert(ret && oldProtect == PAGE_NOACCESS);
         }
     }
 #endif

+ 5 - 11
lib/Common/Memory/LargeHeapBucket.cpp

@@ -158,8 +158,6 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
         AnalysisAssert(false);
     }
 
-
-
     LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
     if (!heapBlock)
     {
@@ -172,7 +170,11 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
     heapBlock->heapInfo = this->heapInfo;
     heapBlock->actualPageCount = actualPageCount;
     heapBlock->guardPageAddress = guardPageAddress;
-
+    
+    DWORD oldProtect;
+    BOOL ret = ::VirtualProtect(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, PAGE_NOACCESS, &oldProtect);
+    Assert(ret && oldProtect == PAGE_READWRITE);
+    
     // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
     size_t usedSpace = sizeof(LargeObjectHeader) + size;
     memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
@@ -193,14 +195,6 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
     Assert(memBlock != nullptr);
 
 
-#pragma prefast(suppress:6250, "This method decommits memory")
-    if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)
-    {
-        AssertMsg(false, "Unable to decommit guard page.");
-        ReportFatalException(NULL, E_FAIL, Fatal_Internal_Error, 2);
-        return nullptr;
-    }
-
     if (this->largePageHeapBlockList)
     {
         HeapBlockList::Tail(this->largePageHeapBlockList)->SetNextBlock(heapBlock);

+ 0 - 52
lib/Common/Memory/PageAllocator.cpp

@@ -461,26 +461,6 @@ PageSegmentBase<T>::DecommitPages(__in void * address, uint pageCount)
     Assert(decommitPageCount == (uint)this->GetCountOfDecommitPages());
 }
 
-template<typename T>
-void
-PageSegmentBase<T>::PartialDecommitPages(__in void * address, size_t totalPageCount, __in void* addressToDecommit, size_t pageCountToDecommit)
-{
-
-    Assert(address >= this->address && address < this->GetEndAddress());
-    Assert(addressToDecommit >= this->address && addressToDecommit < this->GetEndAddress());
-    Assert(totalPageCount <= allocator->maxAllocPageCount);
-    Assert(((uintptr_t)(((char *)address) - this->address)) <= (allocator->maxAllocPageCount - totalPageCount) * AutoSystemInfo::PageSize);
-
-    Assert(!IsFreeOrDecommitted(address, (uint)totalPageCount));
-    uint base = this->GetBitRangeBase(address);
-
-    this->SetRangeInDecommitPagesBitVector(base, (uint)totalPageCount);
-    this->decommitPageCount += (uint)totalPageCount;
-    GetAllocator()->GetVirtualAllocator()->Free(addressToDecommit, pageCountToDecommit * AutoSystemInfo::PageSize, MEM_DECOMMIT);
-
-    Assert(this->decommitPageCount == (uint)this->GetCountOfDecommitPages());
-}
-
 template<typename T>
 size_t
 PageSegmentBase<T>::DecommitFreePages(size_t pageToDecommit)
@@ -1393,38 +1373,6 @@ PageAllocatorBase<T>::AddFreePageCount(uint pageCount)
     this->freePageCount += pageCount;
 }
 
-template<typename T>
-void
-PageAllocatorBase<T>::PartialDecommitPages(__in void * address, size_t pageCountTotal, __in void* decommitAddress, size_t pageCountToDecommit, __in void * segmentParam)
-{
-    // TODO: use a specialized PageHeapPageAllocator to simplify the page allocating logic for pageheap
-    if (pageCountTotal > this->maxAllocPageCount)
-    {
-        SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
-        Assert(pageCountTotal == segment->GetPageCount());
-        PageTracking::ReportFree((PageAllocator*)this, segment->GetAddress(), AutoSystemInfo::PageSize * segment->GetPageCount());
-        LogFreePages(segment->GetPageCount());
-        LogFreeSegment(segment);
-
-        // when deleting segement, it call VirtualFree with MEM_RELEASE, so it should be OK
-        // even we have partial decommited pages in the segment
-        largeSegments.RemoveElement(&NoThrowNoMemProtectHeapAllocator::Instance, segment);
-    }
-    else
-    {
-        PageSegmentBase<T> * pageSegment = (PageSegmentBase<T>*) segmentParam;
-        DListBase<PageSegmentBase<T>> * fromSegmentList = GetSegmentList(pageSegment);
-
-        pageSegment->PartialDecommitPages(address, pageCountTotal, decommitAddress, pageCountToDecommit);
-        LogFreePages(pageCountTotal);
-        LogDecommitPages(pageCountTotal);
-#if DBG_DUMP
-        this->decommitPageCount += pageCountTotal;
-#endif
-        TransferSegment(pageSegment, fromSegmentList);
-    }
-}
-
 template<typename T>
 void
 PageAllocatorBase<T>::ReleasePages(__in void * address, uint pageCount, __in void * segmentParam)

+ 0 - 2
lib/Common/Memory/PageAllocator.h

@@ -221,7 +221,6 @@ public:
     void ReleasePages(__in void * address, uint pageCount);
     template <bool onlyUpdateState>
     void DecommitPages(__in void * address, uint pageCount);
-    void PartialDecommitPages(__in void * address, size_t totalPageCount, __in void* addressToDecommit, size_t pageCountToDecommit);
 
     uint GetCountOfFreePages() const;
     uint GetNextBitInFreePagesBitVector(uint index) const;
@@ -428,7 +427,6 @@ public:
     char * AllocPages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
     char * AllocPagesPageAligned(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
 
-    void PartialDecommitPages(__in void * address, size_t pageCountTotal, __in void* decommitAddress, size_t pageCountToDecommit,  __in void * pageSegment);
     void ReleasePages(__in void * address, uint pageCount, __in void * pageSegment);
     void BackgroundReleasePages(void * address, uint pageCount, PageSegmentBase<TVirtualAlloc> * pageSegment);
 

+ 8 - 2
lib/Common/Memory/SmallNormalHeapBucket.cpp

@@ -6,7 +6,7 @@
 
 
 template <typename TBlockType>
-SmallNormalHeapBucketBase<TBlockType>::SmallNormalHeapBucketBase() 
+SmallNormalHeapBucketBase<TBlockType>::SmallNormalHeapBucketBase()
 #if ENABLE_PARTIAL_GC
     : partialHeapBlockList(nullptr)
 #if ENABLE_CONCURRENT_GC
@@ -114,6 +114,12 @@ SmallNormalHeapBucketBase<TBlockType>::RescanObjectsOnPage(TBlockType * block, c
     const uint pageObjectCount = blockInfoForPage.pageObjectCount;
     const uint localObjectCount = (TBlockAttributes::PageCount * AutoSystemInfo::PageSize) / localObjectSize;
 
+    // With protected unallocatable ending pages and reset writewatch, we should never be scanning on these pages.
+    if (firstObjectOnPageIndex >= localObjectCount)
+    {
+        ReportFatalException(NULL, E_FAIL, Fatal_Recycler_MemoryCorruption, 3);
+    }
+
     // If all objects are marked, rescan whole block at once
     if (TBlockType::CanRescanFullBlock() && rescanMarkCount == pageObjectCount)
     {
@@ -588,7 +594,7 @@ template class SmallNormalHeapBucketBase<MediumFinalizableHeapBlock>;
 template class SmallNormalHeapBucketBase<SmallFinalizableWithBarrierHeapBlock>;
 template class SmallNormalHeapBucketBase<MediumFinalizableWithBarrierHeapBlock>;
 #endif
-   
+
 template void SmallNormalHeapBucketBase<SmallNormalHeapBlock>::Sweep(RecyclerSweep& recyclerSweep);
 template void SmallNormalHeapBucketBase<MediumNormalHeapBlock>::Sweep(RecyclerSweep& recyclerSweep);