Przeglądaj źródła

Enable the allocations feature based on a heap block count threshold per bucket. The feature will be enabled for all buckets or none of them.

Atul Katti 8 lat temu
rodzic
commit
94f100e9be

+ 9 - 3
lib/Common/Memory/CollectionState.h

@@ -39,9 +39,15 @@ enum CollectionState
     Collection_PostSweepRedeferralCallback = 0x00040000,
     Collection_WrapperCallback             = 0x00080000,
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    // Please look at the documentation for PrepareForAllocationsDuringConcurrentSweep method for details on how the concurrent
+    // sweep progresses when allocations are allowed during sweep.
+    /* In Pass1 of the concurrent sweep we determine if a block is full or empty or needs to be swept. Leaf blocks will get
+    swept immediately where as non-leaf blocks may get put onto the pendingSweepList. */
     Collection_ConcurrentSweepPass1 = 0x00100000,
     Collection_ConcurrentSweepPass1Wait = 0x00200000,
+    /* In Pass2, all blocks will go through SweepPartialReusePages to determine if the page can be reused. Also, any blocks
+    that were put in the pendingSweepList will be swept during this stage. */
     Collection_ConcurrentSweepPass2 = 0x00400000,
     Collection_ConcurrentSweepPass2Wait = 0x00800000,
 #endif
@@ -71,9 +77,9 @@ enum CollectionState
     CollectionStateConcurrentSweep        = Collection_ConcurrentSweep | Collection_ExecutingConcurrent,      // concurrent sweep
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     CollectionStateConcurrentSweepPass1 = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass1 | Collection_ExecutingConcurrent,          // concurrent sweep Pass 1
-    CollectionStateConcurrentSweepPass1Wait = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass1Wait /*| Collection_ExecutingConcurrent*/,  // concurrent sweep wait state after Pass 1 has finished
+    CollectionStateConcurrentSweepPass1Wait = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass1Wait,                                   // concurrent sweep wait state after Pass 1 has finished
     CollectionStateConcurrentSweepPass2 = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass2 | Collection_ExecutingConcurrent,          // concurrent sweep Pass 2
-    CollectionStateConcurrentSweepPass2Wait = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass2Wait | Collection_ExecutingConcurrent,  // concurrent sweep wait state after Pass 2 has finished
+    CollectionStateConcurrentSweepPass2Wait = Collection_ConcurrentSweep | Collection_ConcurrentSweepPass2Wait,                                   // concurrent sweep wait state after Pass 2 has finished
 #endif
     CollectionStateTransferSweptWait      = Collection_ConcurrentSweep | Collection_FinishConcurrent,         // transfer swept objects (after concurrent sweep)
 #endif

+ 34 - 43
lib/Common/Memory/HeapBlock.cpp

@@ -187,7 +187,7 @@ SmallHeapBlockT<TBlockAttributes>::ConstructorCommon(HeapBucket * bucket, ushort
     this->Init(objectSize, objectCount);
     Assert(heapBlockType < HeapBlock::HeapBlockType::SmallAllocBlockTypeCount + HeapBlock::HeapBlockType::MediumAllocBlockTypeCount);
     Assert(objectCount > 1 && objectCount == (this->GetPageCount() * AutoSystemInfo::PageSize) / objectSize);
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED)
     heapBucket->heapInfo->heapBlockCount[heapBlockType]++;
 #endif
 
@@ -230,8 +230,11 @@ SmallHeapBlockT<TBlockAttributes>::~SmallHeapBlockT()
         (this->IsLeafBlock()) ||
         this->GetPageAllocator(heapBucket->heapInfo->recycler)->IsClosed());
 
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED)
     heapBucket->heapInfo->heapBlockCount[this->GetHeapBlockType()]--;
+#endif
+
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     heapBucket->heapBlockCount--;
 #endif
 }
@@ -379,6 +382,7 @@ SmallHeapBlockT<TBlockAttributes>::Init(ushort objectSize, ushort objectCount)
 #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
         this->hasFinishedSweepObjects = false;
         this->wasAllocatedFromDuringSweep = false;
+        this->lastObjectsAllocatedDuringConcurrentSweepCount = 0;
 #endif
     }
 #endif
@@ -581,7 +585,7 @@ SmallHeapBlockT<TBlockAttributes>::Reset()
 
     this->freeCount = 0;
     this->markCount = 0;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
 #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
@@ -591,6 +595,7 @@ SmallHeapBlockT<TBlockAttributes>::Reset()
         this->isPendingConcurrentSweepPrep = false;
         DebugOnly(this->objectsMarkedDuringSweep = 0);
         this->objectsAllocatedDuringConcurrentSweepCount = 0;
+        DebugOnly(this->lastObjectsAllocatedDuringConcurrentSweepCount = 0);
     }
 #endif
 
@@ -793,11 +798,11 @@ template <class TBlockAttributes>
 Recycler *
 SmallHeapBlockT<TBlockAttributes>::GetRecycler() const
 {
-//#if DBG
+#if DBG
     return this->heapBucket->heapInfo->recycler;
-//#else
-//    return nullptr;
-//#endif
+#else
+    return nullptr;
+#endif
 }
 
 #if DBG
@@ -1216,6 +1221,7 @@ SmallHeapBlockT<TBlockAttributes>::GetAndClearUnaccountedAllocBytes()
 {
     Assert(this->lastFreeCount >= this->freeCount);
     const ushort currentFreeCount = this->freeCount;
+
     uint unaccountedAllocBytes = (this->lastFreeCount - currentFreeCount) * this->objectSize;
     this->lastFreeCount = currentFreeCount;
     return unaccountedAllocBytes;
@@ -1280,8 +1286,8 @@ SmallHeapBlockT<TBlockAttributes>::GetMarkCountForSweep()
     temp.Minus(this->GetInvalidBitVector());
 
     // Remove the mark bit for things that are still free
-    //TODO: akatti: Can this change be removed? We should already have set the mark bits for anything that was allcoated during the concurrent sweep.
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+    //TODO: akatti: Can this change be removed? We should already have set the mark bits for anything that was allocated during the concurrent sweep.
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     if (this->freeCount != 0 || this->objectsAllocatedDuringConcurrentSweepCount != 0)
 #else
     if (this->freeCount != 0)
@@ -1303,10 +1309,10 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
     Assert(!this->isPendingConcurrentSweep);
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if DBG && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     // In concurrent sweep pass1, we mark the object directly in the mark bit vector for objects allocated during the sweep to prevent them from getting swept during the ongoing sweep itself.
     // This will make the mark bit vector on the HeapBlockMap out-of-date w.r.t. these newly allocated objects.
-    if (this->objectsAllocatedDuringConcurrentSweepCount == 0)
+    if (!this->wasAllocatedFromDuringSweep)
 #endif
     {
         DebugOnly(VerifyMarkBitVector());
@@ -1317,7 +1323,7 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
         // This block has been allocated from since the last GC.
         // We need to update its free bit vector so we can use it below.
         DebugOnly(ushort currentFreeCount = (ushort)this->GetFreeBitVector()->Count());
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         Assert(freeCount - this->objectsAllocatedDuringConcurrentSweepCount == currentFreeCount);
 #else
         Assert(freeCount == currentFreeCount);
@@ -1348,17 +1354,6 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
     Assert(expectFreeCount >= this->freeCount);
 
     uint expectSweepCount = expectFreeCount - this->freeCount;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
-    {
-        //TODO:akatti: Revisit. Check if we still need this.
-        //if (this->objectsAllocatedDuringConcurrentSweepCount > 0)
-        //{
-        //    Assert(!this->IsAnyFinalizableBlock());
-        //    expectSweepCount -= this->objectsAllocatedDuringConcurrentSweepCount;
-        //}
-    }
-#endif
 
     Assert(!this->IsLeafBlock() || finalizeCount == 0);
 
@@ -1391,7 +1386,7 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
     const bool isAllFreed = (finalizeCount == 0 && noRealObjectsMarked && !hasPendingDispose);
     if (isAllFreed)
     {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || this->objectsAllocatedDuringConcurrentSweepCount == 0)
 #endif
         {
@@ -1418,11 +1413,9 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
 
     if (expectSweepCount == 0)
     {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
         {
-            //TODO:akatti:PERF Do we need to do this for non-debug builds? We might be able to skip
-            // this if this is only needed for passing debug asserts.
             // If we allocated from this block during concurrent sweep, we must recalculate the
             // mark and free bits for these blocks.
             if (this->objectsAllocatedDuringConcurrentSweepCount > 0)
@@ -1500,7 +1493,7 @@ SmallHeapBlockT<TBlockAttributes>::Sweep(RecyclerSweep& recyclerSweep, bool queu
     }
 
     // Already swept, no more work to be done. Put it back to the queue.
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBlock())
     {
         // We always need to check the free count as we may have allocated from this block during concurrent sweep.
@@ -1549,7 +1542,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
     Assert(mode == SweepMode_InThread);
 #endif
     Assert(this->IsFreeBitsValid());
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     AssertMsg(!hasFinishedSweepObjects, "Block in SweepObjects more than once during the ongoing sweep.");
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
@@ -1563,10 +1556,10 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
 
     Assert(this->markCount == this->GetMarkCountForSweep());
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if DBG && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     // In concurrent sweep pass1, we mark the object directly in the mark bit vector for objects allocated during the sweep to prevent them from getting swept during the ongoing sweep itself.
     // This will make the mark bit vector on the HeapBlockMap out-of-date w.r.t. these newly allocated objects.
-    if (this->objectsAllocatedDuringConcurrentSweepCount == 0)
+    if (!this->wasAllocatedFromDuringSweep)
 #endif
     {
         DebugOnly(VerifyMarkBitVector());
@@ -1575,7 +1568,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
     SmallHeapBlockBitVector * marked = this->GetMarkedBitVector();
 
     DebugOnly(uint expectedSweepCount = objectCount - freeCount - markCount);
-#if DBG && ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if DBG && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
         Assert(expectedSweepCount != 0 || this->isForceSweeping || this->objectsAllocatedDuringConcurrentSweepCount != 0);
@@ -1586,7 +1579,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
         Assert(expectedSweepCount != 0 || this->isForceSweeping);
     }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
         if (this->objectsAllocatedDuringConcurrentSweepCount > 0)
@@ -1611,7 +1604,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
 
         if (!marked->Test(bitIndex))
         {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
             // Skip this if we are only clearing the bit set to prevent object from getting swept as it was allocated during the ongoing concurrent sweep.
             if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !onlyRecalculateMarkCountAndFreeBits)
 #endif
@@ -1648,12 +1641,12 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
         objectAddress += localSize;
     }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     // Skip this if we are only clearing the bit set to prevent object from getting swept as it was allocated during the ongoing concurrent sweep.
     if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !onlyRecalculateMarkCountAndFreeBits)
 #endif
     {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         // If allocations happened during concurrent sweep then we will not have the accurate count of expected sweep count as the mark/free information
         // wasn't valid anymore.
         Assert(sweepCount == expectedSweepCount || this->objectsAllocatedDuringConcurrentSweepCount > 0);
@@ -1693,7 +1686,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
 
         // While allocations are allowed during concurrent sweep into still unswept blocks the
         // free bit vectors are not valid yet.
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->objectsAllocatedDuringConcurrentSweepCount == 0)
 #endif
         {
@@ -1701,7 +1694,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
         }
     }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->objectsAllocatedDuringConcurrentSweepCount > 0)
     {
         Assert(!this->IsAnyFinalizableBlock());
@@ -1710,9 +1703,6 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
         this->markCount = (ushort)this->GetMarkCountForSweep();
         this->EnsureFreeBitVector(true /*isCollecting*/);
 #if ENABLE_PARTIAL_GC
-        // Accounting for partial heuristics
-        this->GetRecycler()->recyclerSweep->AddUnaccountedNewObjectAllocBytes(this);
-
         this->oldFreeCount = this->lastFreeCount = this->freeCount;
 #else
         this->lastFreeCount = this->freeCount;
@@ -1720,6 +1710,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
 
         // Reset the count of objects allocated during this concurrent sweep; so we will start afresh the next time around.
         Assert(this->objectsAllocatedDuringConcurrentSweepCount == this->objectsMarkedDuringSweep);
+        DebugOnly(this->lastObjectsAllocatedDuringConcurrentSweepCount = this->objectsAllocatedDuringConcurrentSweepCount);
         this->objectsAllocatedDuringConcurrentSweepCount = 0;
         DebugOnly(this->objectsMarkedDuringSweep = 0);
     }
@@ -1730,7 +1721,7 @@ SmallHeapBlockT<TBlockAttributes>::SweepObjects(Recycler * recycler, bool onlyRe
         Assert(this->markCount == this->GetMarkCountForSweep());
     }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     DebugOnly(this->hasFinishedSweepObjects = true);
 #endif
 
@@ -1849,7 +1840,7 @@ SmallHeapBlockT<TBlockAttributes>::Check(bool expectFull, bool expectPending)
     Assert(expectPending == HasAnyDisposeObjects());
 
     // As the blocks are added to the SLIST and used from there during concurrent sweep, the expectFull assertion doesn't hold anymore.
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
 #endif
     {

+ 21 - 40
lib/Common/Memory/HeapBlock.h

@@ -107,7 +107,7 @@ enum ObjectInfoBits : unsigned short
     EnumClass_1_Bit             = 0x01,    // This can be extended to add more enumerable classes (if we still have bits left)
 
     // Mask for above bits
-    StoredObjectInfoBitMask = 0xFF,
+    StoredObjectInfoBitMask     = 0xFF,
 
     // Bits that implied by the block type, and thus don't need to be stored (for small blocks)
     // Note, LeafBit is used in finalizable blocks, thus is not always implied by the block type
@@ -115,7 +115,7 @@ enum ObjectInfoBits : unsigned short
     // We can move it the upper byte.
 
 #ifdef RECYCLER_WRITE_BARRIER
-    WithBarrierBit = 0x0100,
+    WithBarrierBit              = 0x0100,
 #endif
 
 #ifdef RECYCLER_VISITED_HOST
@@ -133,34 +133,34 @@ enum ObjectInfoBits : unsigned short
     // Additional definitions based on above
 
 #ifdef RECYCLER_STATS
-    NewFinalizeBit = NewTrackBit,  // Use to detect if the background thread has counted the finalizable object in stats
+    NewFinalizeBit              = NewTrackBit,  // Use to detect if the background thread has counted the finalizable object in stats
 #else
-    NewFinalizeBit = 0x00,
+    NewFinalizeBit              = 0x00,
 #endif
 
 #ifdef RECYCLER_WRITE_BARRIER
-    FinalizableWithBarrierBit = WithBarrierBit | FinalizeBit,
+    FinalizableWithBarrierBit   = WithBarrierBit | FinalizeBit,
 #endif
 
     // Allocation bits
-    FinalizableLeafBits = NewFinalizeBit | FinalizeBit | LeafBit,
-    FinalizableObjectBits = NewFinalizeBit | FinalizeBit,
+    FinalizableLeafBits         = NewFinalizeBit | FinalizeBit | LeafBit,
+    FinalizableObjectBits       = NewFinalizeBit | FinalizeBit,
 #ifdef RECYCLER_WRITE_BARRIER
     FinalizableWithBarrierObjectBits = NewFinalizeBit | FinalizableWithBarrierBit,
 #endif
     ClientFinalizableObjectBits = NewFinalizeBit | ClientTrackedBit | FinalizeBit,
 
-    ClientTrackableLeafBits = NewTrackBit | ClientTrackedBit | TrackBit | FinalizeBit | LeafBit,
-    ClientTrackableObjectBits = NewTrackBit | ClientTrackedBit | TrackBit | FinalizeBit,
+    ClientTrackableLeafBits     = NewTrackBit | ClientTrackedBit | TrackBit | FinalizeBit | LeafBit,
+    ClientTrackableObjectBits   = NewTrackBit | ClientTrackedBit | TrackBit | FinalizeBit,
 
 #ifdef RECYCLER_WRITE_BARRIER
     ClientTrackableObjectWithBarrierBits = ClientTrackableObjectBits | WithBarrierBit,
     ClientFinalizableObjectWithBarrierBits = ClientFinalizableObjectBits | WithBarrierBit,
 #endif
 
-    WeakReferenceEntryBits = LeafBit,
+    WeakReferenceEntryBits      = LeafBit,
 
-    ImplicitRootLeafBits = LeafBit | ImplicitRootBit,
+    ImplicitRootLeafBits        = LeafBit | ImplicitRootBit,
 
     // Pending dispose objects should have LeafBit set and no others
     PendingDisposeObjectBits    = PendingDisposeBit | LeafBit,
@@ -413,6 +413,13 @@ public:
         return (heapBlockType);
     }
 
+#if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
+    bool WasAllocatedFromDuringSweep()
+    {
+        return this->wasAllocatedFromDuringSweep;
+    }
+#endif
+
     IdleDecommitPageAllocator* GetPageAllocator(Recycler* recycler);
 
     bool GetAndClearNeedOOMRescan()
@@ -463,7 +470,7 @@ public:
 #endif
 };
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
 template <typename TBlockType>
 struct HeapBlockSListItem {
     // SLIST_ENTRY needs to be the first element in the structure to avoid calculating offset with the SList API calls.
@@ -560,10 +567,11 @@ public:
     ushort freeCount;
     ushort lastFreeCount;
     ushort markCount;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     ushort objectsAllocatedDuringConcurrentSweepCount;
 #if DBG
     ushort objectsMarkedDuringSweep;
+    ushort lastObjectsAllocatedDuringConcurrentSweepCount;
     bool blockNotReusedInPartialHeapBlockList;
     bool blockNotReusedInPendingList;
 #endif
@@ -810,7 +818,6 @@ public:
 protected:
     static size_t GetAllocPlusSize(uint objectCount);
     inline void SetAttributes(void * address, unsigned char attributes);
-    inline void UpdateAttributes(void * address, unsigned char attributes);
     ushort GetAddressIndex(void * objectAddress);
 
     SmallHeapBlockT(HeapBucket * bucket, ushort objectSize, ushort objectCount, HeapBlockType heapBlockType);
@@ -965,32 +972,6 @@ public:
         return tail;
     }
 
-    template <typename TBlockType>
-    static TBlockType * FindPreviousBlock(TBlockType * list, TBlockType * heapBlockToFind)
-    {
-        if (list == nullptr || heapBlockToFind == nullptr)
-        {
-            return nullptr;
-        }
-
-        TBlockType * previousBlock = nullptr;
-        TBlockType * heapBlock = list;
-        bool found = false;
-        while (heapBlock != nullptr)
-        {
-            if (heapBlock == heapBlockToFind)
-            {
-                found = true;
-                break;
-            }
-
-            previousBlock = heapBlock;
-            heapBlock = heapBlock->GetNextBlock();
-        }
-
-        return found ? previousBlock : nullptr;
-    }
-
 #if DBG
     template <typename TBlockType>
     static bool Contains(TBlockType * block, TBlockType * list, TBlockType * tail = nullptr)

+ 0 - 11
lib/Common/Memory/HeapBlock.inl

@@ -17,17 +17,6 @@ SmallHeapBlockT<TBlockAttributes>::SetAttributes(void * address, unsigned char a
     ObjectInfo(index) = attributes;
 }
 
-template <class TBlockAttributes>
-void
-SmallHeapBlockT<TBlockAttributes>::UpdateAttributes(void * address, unsigned char attributes)
-{
-    Assert(this->address != nullptr);
-    Assert(this->segment != nullptr);
-    ushort index = GetAddressIndex(address);
-    Assert(index != SmallHeapBlockT<TBlockAttributes>::InvalidAddressBit);
-    ObjectInfo(index) = attributes;
-}
-
 inline
 IdleDecommitPageAllocator*
 HeapBlock::GetPageAllocator(Recycler* recycler)

+ 5 - 0
lib/Common/Memory/HeapBlockMap.cpp

@@ -327,7 +327,12 @@ HeapBlockMap32::SetPageMarkCount(void * address, ushort markCount)
     // Callers should already have updated the mark bits by the time they call this,
     // so check that the new count is correct for the current mark bits.
     // Not true right now, will be true...
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    DebugOnly(HeapBlock * heapBlock = this->GetHeapBlock(address));
+    Assert(l2map->GetPageMarkBitVector(id2)->Count() == markCount || heapBlock->WasAllocatedFromDuringSweep());
+#else
     Assert(l2map->GetPageMarkBitVector(id2)->Count() == markCount);
+#endif
 
     l2map->pageMarkCount[id2] = markCount;
 }

+ 106 - 61
lib/Common/Memory/HeapBucket.cpp

@@ -9,14 +9,15 @@ HeapBucket::HeapBucket() :
     heapInfo(nullptr),
     sizeCat(0)
 {
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     heapBlockCount = 0;
     newHeapBlockCount = 0;
     emptyHeapBlockCount = 0;
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     this->allocationsStartedDuringConcurrentSweep = false;
+    this->concurrentSweepAllocationsThresholdExceeded = false;
 #endif
 
 #ifdef RECYCLER_PAGE_HEAP
@@ -51,7 +52,7 @@ HeapBucketT<TBlockType>::HeapBucketT() :
     emptyBlockList(nullptr),
     fullBlockList(nullptr),
     heapBlockList(nullptr),
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     lastKnownNextAllocableBlockHead(nullptr),
     allocableHeapBlockListHead((PSLIST_HEADER)_aligned_malloc(sizeof(SLIST_HEADER), MEMORY_ALLOCATION_ALIGNMENT)),
@@ -76,7 +77,7 @@ HeapBucketT<TBlockType>::~HeapBucketT()
     DeleteHeapBlockList(this->heapBlockList);
     DeleteHeapBlockList(this->fullBlockList);
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (allocableHeapBlockListHead != nullptr)
     {
@@ -130,7 +131,7 @@ HeapBucketT<TBlockType>::DeleteHeapBlockList(TBlockType * list)
     DeleteHeapBlockList(list, this->heapInfo->recycler);
 }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
 template<typename TBlockType>
 bool
 HeapBucketT<TBlockType>::PushHeapBlockToSList(PSLIST_HEADER list, TBlockType * heapBlock)
@@ -212,7 +213,7 @@ HeapBucketT<TBlockType>::Initialize(HeapInfo * heapInfo, uint sizeCat)
 #endif
     this->lastExplicitFreeListAllocator = &allocatorHead;
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
         if (allocableHeapBlockListHead == nullptr)
@@ -348,7 +349,10 @@ HeapBucketT<TBlockType>::IntegrateBlock(char * blockAddress, PageSegment * segme
 
     heapBlock->SetNextBlock(this->fullBlockList);
     this->fullBlockList = heapBlock;
-    RECYCLER_SLOW_CHECK(this->heapBlockCount++);
+
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    this->heapBlockCount++;
+#endif
 
     this->heapInfo->uncollectedAllocBytes += heapBlock->GetAndClearLastFreeCount() * heapBlock->GetObjectSize();
     RecyclerMemoryTracking::ReportAllocation(recycler, blockAddress, heapBlock->GetObjectSize() * heapBlock->GetObjectCount());
@@ -413,7 +417,7 @@ HeapBucketT<TBlockType>::AssertCheckHeapBlockNotInAnyList(TBlockType * heapBlock
     AssertMsg(!HeapBlockList::Contains(heapBlock, heapBlockList), "The heap block already exists in the heapBlockList.");
     AssertMsg(!HeapBlockList::Contains(heapBlock, fullBlockList), "The heap block already exists in the fullBlockList.");
     AssertMsg(!HeapBlockList::Contains(heapBlock, emptyBlockList), "The heap block already exists in the emptyBlockList.");
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     AssertMsg(!HeapBlockList::Contains(heapBlock, sweepableHeapBlockList), "The heap block already exists in the sweepableHeapBlockList.");
     AssertMsg(!HeapBlockList::Contains(heapBlock, pendingSweepPrepHeapBlockList), "The heap block already exists in the pendingSweepPrepHeapBlockList.");
 #endif
@@ -479,12 +483,12 @@ HeapBucketT<TBlockType>::TryAlloc(Recycler * recycler, TBlockAllocatorType * all
     ClearAllocator(allocator);
 
     TBlockType * heapBlock = this->nextAllocableBlockHead;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     bool heapBlockFromAllocableHeapBlockList = false;
     DebugOnly(bool heapBlockInPendingSweepPrepList = false);
 
-    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && heapBlock == nullptr && !this->IsAnyFinalizableBucket())
+    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && heapBlock == nullptr && recycler->AllowAllocationsDuringConcurrentSweep())
     {
 #if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
         // This lock is needed only in the debug mode while we verify block counts. Not needed otherwise, as this list is never accessed concurrently.
@@ -495,6 +499,7 @@ HeapBucketT<TBlockType>::TryAlloc(Recycler * recycler, TBlockAllocatorType * all
         heapBlock = PopHeapBlockFromSList(this->allocableHeapBlockListHead);
         if (heapBlock != nullptr)
         {
+            Assert(!this->IsAnyFinalizableBucket());
             DebugOnly(AssertCheckHeapBlockNotInAnyList(heapBlock));
             if (heapBlock->isPendingConcurrentSweepPrep)
             {
@@ -536,7 +541,7 @@ HeapBucketT<TBlockType>::TryAlloc(Recycler * recycler, TBlockAllocatorType * all
    {
         Assert(!this->IsAllocationStopped());
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
         // When allocations are allowed during concurrent sweep we set nextAllocableBlockHead to NULL as the allocator will pick heap blocks from the
         // interlocked SLIST. During that time, the heap block at the top of the SLIST is always the nextAllocableBlockHead.
         // If the heapBlock was just picked from the SLIST and nextAllocableBlockHead is not NULL then we just resumed normal allocations on the background thread
@@ -600,12 +605,28 @@ HeapBucket::GetRecycler() const
     return this->heapInfo->recycler;
 }
 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 bool
 HeapBucket::AllocationsStartedDuringConcurrentSweep() const
 {
     return this->allocationsStartedDuringConcurrentSweep;
 }
 
+bool
+HeapBucket::ConcurrentSweepAllocationsThresholdExceeded() const
+{
+    return this->concurrentSweepAllocationsThresholdExceeded;
+}
+
+bool
+HeapBucket::DoTwoPassConcurrentSweepPreCheck()
+{
+    this->concurrentSweepAllocationsThresholdExceeded = ((this->heapBlockCount + this->newHeapBlockCount) > RecyclerHeuristic::AllocDuringConcurrentSweepHeapBlockThreshold);
+
+    return this->concurrentSweepAllocationsThresholdExceeded;
+}
+#endif
+
 #ifdef RECYCLER_PAGE_HEAP
 template <typename TBlockType>
 char *
@@ -662,29 +683,6 @@ HeapBucketT<TBlockType>::SnailAlloc(Recycler * recycler, TBlockAllocatorType * a
 
     if (!collected)
     {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        //if (recycler->IsConcurrentSweepExecutingState())
-        //{
-        //    recycler->FinishConcurrent<FinishConcurrentOnAllocation>();
-        //    memBlock = this->TryAlloc(recycler, allocator, sizeCat, attributes);
-        //    if (memBlock != nullptr)
-        //    {
-        //        return memBlock;
-        //    }
-
-            //if (recycler->IsConcurrentSweepExecutingState())
-            //{
-            //    // Concurrent Sweep takes 2 passes now. Try again if we didn't finish sweeping yet.
-            //    recycler->FinishConcurrent<FinishConcurrentOnAllocation>();
-            //    memBlock = this->TryAlloc(recycler, allocator, sizeCat, attributes);
-            //    if (memBlock != nullptr)
-            //    {
-            //        return memBlock;
-            //    }
-            //}
-        //}
-#endif
-
 #if ENABLE_CONCURRENT_GC
 #if ENABLE_PARTIAL_GC
         // wait for background sweeping finish if there are too many pages allocated during background sweeping
@@ -793,7 +791,7 @@ HeapBucketT<TBlockType>::CreateHeapBlock(Recycler * recycler)
 
     // Add it to head of heap block list so we will keep track of the block
     recycler->autoHeap.AppendNewHeapBlock(heapBlock, this);
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 #if ENABLE_CONCURRENT_GC
     ::InterlockedIncrement(&this->newHeapBlockCount);
 #else
@@ -840,7 +838,7 @@ HeapBucketT<TBlockType>::ResetMarks(ResetMarkFlags flags)
             Assert(!heapBlock->HasFreeObject());
         });
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
         if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
         {
             HeapBlockList::ForEach(sweepableHeapBlockList, [flags](TBlockType * heapBlock)
@@ -864,7 +862,7 @@ HeapBucketT<TBlockType>::ResetMarks(ResetMarkFlags flags)
     {
         // When allocations are enabled for buckets during oncurrent sweep we don't keep track of the nextAllocableBlockHead as it directly
         // comes out of the SLIST. As a result, the below validations can't be performed reliably on a heap block.
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || this->IsAnyFinalizableBucket())
 #endif
         {
@@ -896,7 +894,7 @@ HeapBucketT<TBlockType>::ScanNewImplicitRoots(Recycler * recycler)
         heapBlock->ScanNewImplicitRoots(recycler);
     });
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         HeapBlockList::ForEach(sweepableHeapBlockList, [recycler](TBlockType * heapBlock)
@@ -947,7 +945,7 @@ HeapBucketT<TBlockType>::VerifyBlockConsistencyInList(TBlockType * heapBlock, Re
     }
     if (heapBlock->IsClearedFromAllocator())
     {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         // As the blocks are added to a SLIST and used from there during concurrent sweep, the expectFull assertion doesn't hold anymore.
         // We could do some work to make this work again but there may be perf hit and it may be fragile.
         if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
@@ -972,7 +970,7 @@ HeapBucketT<TBlockType>::VerifyBlockConsistencyInList(TBlockType * heapBlock, Re
         // blocks before nextAllocableBlockHead that are not being bump allocated from must be considered "full".
         // However, the exception is if this is the only heap block in this bucket, in which case nextAllocableBlockHead
         // would be null
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         // As the blocks are added to the SLIST and used from there during concurrent sweep, the expectFull assertion doesn't hold anymore.
         if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
 #endif
@@ -1075,7 +1073,7 @@ HeapBucketT<TBlockType>::SweepHeapBlockList(RecyclerSweep& recyclerSweep, TBlock
     bool const queuePendingSweep = false;
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     Assert(this->IsAllocationStopped() || this->AllocationsStartedDuringConcurrentSweep());
 #else
     Assert(this->IsAllocationStopped());
@@ -1222,7 +1220,9 @@ HeapBucketT<TBlockType>::SweepHeapBlockList(RecyclerSweep& recyclerSweep, TBlock
                 // Just free the page in thread (and zero the page)
                 heapBlock->ReleasePagesSweep(recycler);
                 FreeHeapBlock(heapBlock);
-                RECYCLER_SLOW_CHECK(this->heapBlockCount--);
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+                this->heapBlockCount--;
+#endif
 #ifdef RECYCLER_TRACE
                 recyclerSweep.GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**9**] finished Sweep Pass1, heapblock EMPTY, was FREED in-thread."));
 #endif
@@ -1262,7 +1262,7 @@ HeapBucketT<TBlockType>::SweepBucket(RecyclerSweep& recyclerSweep)
     Assert(!recyclerSweep.IsBackground());
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->sweepableHeapBlockList != nullptr)
     {
         Assert(!this->IsAnyFinalizableBucket());
@@ -1315,7 +1315,7 @@ HeapBucketT<TBlockType>::SweepBucket(RecyclerSweep& recyclerSweep)
     this->heapBlockList = nullptr;
     this->fullBlockList = nullptr;
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (this->AllowAllocationsDuringConcurrentSweep())
     {
         Assert(!this->IsAnyFinalizableBucket());
@@ -1354,13 +1354,13 @@ template <typename TBlockType>
 bool
 HeapBucketT<TBlockType>::AllowAllocationsDuringConcurrentSweep()
 {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-    if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    Recycler * recycler = this->GetRecycler();
+    if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !recycler->AllowAllocationsDuringConcurrentSweep())
     {
         return false;
     }
 
-    Recycler * recycler = this->GetRecycler();
 #if ENABLE_PARTIAL_GC
     bool isPartialGC = (recycler->recyclerSweep != nullptr) && recycler->recyclerSweep->InPartialCollect();
 #else
@@ -1378,7 +1378,7 @@ template <typename TBlockType>
 void
 HeapBucketT<TBlockType>::StopAllocationBeforeSweep()
 {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     this->allocationsStartedDuringConcurrentSweep = false;
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     this->lastKnownNextAllocableBlockHead = this->nextAllocableBlockHead;
@@ -1399,7 +1399,7 @@ HeapBucketT<TBlockType>::StartAllocationAfterSweep()
     this->nextAllocableBlockHead = this->heapBlockList;
 }
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 template <typename TBlockType>
 void
 HeapBucketT<TBlockType>::StartAllocationDuringConcurrentSweep()
@@ -1431,7 +1431,38 @@ HeapBucketT<TBlockType>::ResumeNormalAllocationAfterConcurrentSweep(TBlockType *
     this->nextAllocableBlockHead = newNextAllocableBlockHead;
 }
 
-//TODO:akatti: Add comments here about the notion of Pass-1 and Pass-2 of the sweep and how blocks are managed during the concurrent sweep.
+/*//////////////////////////////////////////////////////////////////////////////////////////////////////
+If allocations are to be allowed to existing heap blocks during concurrent sweep, we set aside a few
+heap blocks from the heapBlockList prior to beginning sweep. However, we eed to then go back and make
+sure these blocks also swept before this sweep finishes. In order to do this we clearly define concurrent
+sweep having 2 passes now. These passes existed before but were not distiguished as they would always start
+and finish in one go on the background thread. However, whenever allocations are allowed during concurrent
+sweep; the concurrent sweep will start Pass1 on the background thread, wait to finish Pass1 of the blocks
+we set aside to allocate from on the main thread and then go back to finish Pass2 for all heap blocks on
+the background thread. Note that, due to this need to finish Pass1 on the foreground thread the overall
+background sweep will now appear to take longer whenever we chose to do such a two-pass sweep.
+
+The sequence of things we do to allow allocations during concurrent sweep is described below:
+1. At the beginning of concurrrent sweep we decide if we will benefit from allowing allocations during concurrent
+sweep for any of the buckets. If there is at-least one bucket for which we think we will benefit we will turn on
+allocations during concurrent sweep. Once turned on we will attempt to enable allocations during concurrent sweep
+for all supported buckets (i.e. small/medium, normal/leaf, non-finalizable buckets.write barrrier bickets are supported
+as well.).
+2. If allocations are turned on during concurrent sweep, we will see if there are any allocable blocks in the
+heapBlockList after the nextAllocableBlockHead. If we find any such blocks, we move them to a SLIST that the
+allocator can pick these blocks from during sweep.
+3. CollectionStateConcurrentSweepPass1: We will finish Pass1 of the sweep for all the remaining blocks (other than the
+ones we put in the SLIST in step 2 above) This will generally happen on the background thread unless we are forcing
+in-thread sweep. This state is now specifically identified as CollectionStateConcurrentSweepPass1.
+4. CollectionStateConcurrentSweepPass1Wait: At this point we need to wait for all the blocks that we put in the SLIST
+to also finish the Pass1 of the sweep. This needs to happen on the foreground thread so we prevent the allocator from
+picking up the blocks from SLIST while we do this. This state is now identified as CollectionStateConcurrentSweepPass1Wait.
+5. CollectionStateConcurrentSweepPass2: At this point we will do the actual sweeping of all the blocks that are not yet swept,
+for eaxample, any blocks that were put onto the pendingSweepList. As these blocks get swept we keep adding them to the
+SLIST again to allow allocators to allocate from them as soon as they are swept.
+6. Before the Pass2 can finish we can call this concurrent sweep done we need to move all the blocks off of the SLIST so
+that normal allocations can begin after the sweep. This is the last step of the concurrent sweep.
+//////////////////////////////////////////////////////////////////////////////////////////////////////*/
 template<typename TBlockType>
 void
 HeapBucketT<TBlockType>::PrepareForAllocationsDuringConcurrentSweep(TBlockType * &currentHeapBlockList)
@@ -1444,7 +1475,6 @@ HeapBucketT<TBlockType>::PrepareForAllocationsDuringConcurrentSweep(TBlockType *
         Assert(HeapBlockList::Count(this->sweepableHeapBlockList) == 0);
         Assert(HeapBlockList::Count(this->pendingSweepPrepHeapBlockList) == 0);
 
-        // TODO:akatti: What if lastKnownNextAllocableBlockHead is NULL?
         TBlockType* startingNextAllocableBlockHead = this->lastKnownNextAllocableBlockHead;
         bool allocationsStarted = false;
         if (startingNextAllocableBlockHead != nullptr)
@@ -1475,7 +1505,8 @@ HeapBucketT<TBlockType>::PrepareForAllocationsDuringConcurrentSweep(TBlockType *
 #ifdef RECYCLER_TRACE
                         this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**5**] was being moved to SLIST, but OOM while adding to the SLIST."));
 #endif
-                        this->GetRecycler()->OutOfMemory();
+                        //TODO: akatti: We should handle this gracefully and try to recover from this state.
+                        AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
                     }
                 });
 #ifdef RECYCLER_TRACE
@@ -1497,7 +1528,7 @@ HeapBucketT<TBlockType>::PrepareForAllocationsDuringConcurrentSweep(TBlockType *
         }
 
         Assert(!this->IsAllocationStopped());
-    }
+}
 #endif
 }
 #endif
@@ -1546,8 +1577,10 @@ HeapBucketT<TBlockType>::MergeNewHeapBlock(TBlockType * heapBlock)
     Assert(heapBlock->GetObjectSize() == this->sizeCat);
     heapBlock->SetNextBlock(this->heapBlockList);
     this->heapBlockList = heapBlock;
-    RECYCLER_SLOW_CHECK(::InterlockedDecrement(&this->newHeapBlockCount));
-    RECYCLER_SLOW_CHECK(this->heapBlockCount++);
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    ::InterlockedDecrement(&this->newHeapBlockCount);
+    this->heapBlockCount++;
+#endif
 }
 
 template <typename TBlockType>
@@ -1719,7 +1752,7 @@ HeapBucketT<TBlockType>::EnumerateObjects(ObjectInfoBits infoBits, void (*CallBa
 {
     UpdateAllocators();
     HeapBucket::EnumerateObjects(fullBlockList, infoBits, CallBackFunction);
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         HeapBucket::EnumerateObjects(sweepableHeapBlockList, infoBits, CallBackFunction);
@@ -1750,7 +1783,7 @@ HeapBucketT<TBlockType>::Check(bool checkCount)
     UpdateAllocators();
     size_t smallHeapBlockCount = HeapInfo::Check(true, false, this->fullBlockList);
     bool allocatingDuringConcurrentSweep = false;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         allocatingDuringConcurrentSweep = true;
@@ -1795,7 +1828,7 @@ HeapBucketT<TBlockType>::AggregateBucketStats()
     };
 
     HeapBlockList::ForEach(fullBlockList, blockStatsAggregator);
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         HeapBlockList::ForEach(sweepableHeapBlockList, blockStatsAggregator);
@@ -1835,7 +1868,7 @@ HeapBucketT<TBlockType>::Verify()
         heapBlock->Verify();
     });
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
 #if DBG
@@ -1931,7 +1964,7 @@ HeapBucketT<TBlockType>::VerifyMark()
         heapBlock->VerifyMark();
     });
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         HeapBlockList::ForEach(this->sweepableHeapBlockList, [](TBlockType * heapBlock)
@@ -2303,7 +2336,7 @@ HeapBucketGroup<TBlockAttributes>::VerifyMark()
 }
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 template <class TBlockAttributes>
 void
 HeapBucketGroup<TBlockAttributes>::StartAllocationDuringConcurrentSweep()
@@ -2326,6 +2359,18 @@ HeapBucketGroup<TBlockAttributes>::StartAllocationDuringConcurrentSweep()
 #endif
 }
 
+template <class TBlockAttributes>
+bool
+HeapBucketGroup<TBlockAttributes>::DoTwoPassConcurrentSweepPreCheck()
+{
+    return heapBucket.DoTwoPassConcurrentSweepPreCheck() || 
+    leafHeapBucket.DoTwoPassConcurrentSweepPreCheck()
+
+#ifdef RECYCLER_WRITE_BARRIER
+    || smallNormalWithBarrierHeapBucket.DoTwoPassConcurrentSweepPreCheck();
+#endif
+}
+
 template <class TBlockAttributes>
 void
 HeapBucketGroup<TBlockAttributes>::FinishSweepPrep(RecyclerSweep& recyclerSweep)

+ 11 - 4
lib/Common/Memory/HeapBucket.h

@@ -75,9 +75,12 @@ public:
 protected:
     HeapInfo * heapInfo;
     uint sizeCat;
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     bool allocationsStartedDuringConcurrentSweep;
+    bool concurrentSweepAllocationsThresholdExceeded;
+#endif
 
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     size_t heapBlockCount;
     size_t newHeapBlockCount;       // count of heap bock that is in the heap info and not in the heap bucket yet
     size_t emptyHeapBlockCount;
@@ -115,7 +118,11 @@ public:
 #endif
 
     Recycler * GetRecycler() const;
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     bool AllocationsStartedDuringConcurrentSweep() const;
+    bool ConcurrentSweepAllocationsThresholdExceeded() const;
+    bool DoTwoPassConcurrentSweepPreCheck();
+#endif
 
     template <typename TBlockType>
     friend class SmallHeapBlockAllocator;
@@ -209,14 +216,14 @@ protected:
 
     void Initialize(HeapInfo * heapInfo, DECLSPEC_GUARD_OVERFLOW uint sizeCat);
     void AppendAllocableHeapBlockList(TBlockType * list);
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     void FinishSweepPrep(RecyclerSweep& recyclerSweep);
     void FinishConcurrentSweep();
 #endif
     void DeleteHeapBlockList(TBlockType * list);
     static void DeleteEmptyHeapBlockList(TBlockType * list);
     static void DeleteHeapBlockList(TBlockType * list, Recycler * recycler);
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     static bool PushHeapBlockToSList(PSLIST_HEADER list, TBlockType * heapBlock);
     static TBlockType * PopHeapBlockFromSList(PSLIST_HEADER list);
     static ushort QueryDepthInterlockedSList(PSLIST_HEADER list);
@@ -295,7 +302,7 @@ protected:
     TBlockType * fullBlockList;      // list of blocks that are fully allocated
     TBlockType * heapBlockList;      // list of blocks that has free objects
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     PSLIST_HEADER allocableHeapBlockListHead;
     TBlockType * lastKnownNextAllocableBlockHead;

+ 28 - 4
lib/Common/Memory/HeapInfo.cpp

@@ -1498,7 +1498,7 @@ HeapInfo::SweepPendingObjects(RecyclerSweep& recyclerSweep)
 }
 #endif
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
 void HeapInfo::StartAllocationsDuringConcurrentSweep()
 {
     for (uint i = 0; i < HeapConstants::BucketCount; i++)
@@ -1514,7 +1514,31 @@ void HeapInfo::StartAllocationsDuringConcurrentSweep()
 #endif
 }
 
-void 
+bool
+HeapInfo::DoTwoPassConcurrentSweepPreCheck()
+{
+    for (uint i = 0; i < HeapConstants::BucketCount; i++)
+    {
+        if (heapBuckets[i].DoTwoPassConcurrentSweepPreCheck())
+        {
+            return true;
+        }
+    }
+
+#if defined(BUCKETIZE_MEDIUM_ALLOCATIONS) && SMALLBLOCK_MEDIUM_ALLOC
+    for (uint i = 0; i < HeapConstants::MediumBucketCount; i++)
+    {
+        if (mediumHeapBuckets[i].DoTwoPassConcurrentSweepPreCheck())
+        {
+            return true;
+        }
+    }
+#endif
+
+    return false;
+}
+
+void
 HeapInfo::FinishSweepPrep(RecyclerSweep& recyclerSweep)
 {
     for (uint i = 0; i < HeapConstants::BucketCount; i++)
@@ -1649,7 +1673,7 @@ HeapInfo::DisposeObjects()
 #if ENABLE_CONCURRENT_GC
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     // As during concurrent sweep we start/stop allocations it is safer to prevent transferring disposed objects altogether.
-    if (!recycler->IsConcurrentExecutingState() /*&& !recycler->IsConcurrentSweepState()*/)
+    if (!recycler->IsConcurrentExecutingState() && !recycler->IsConcurrentSweepState())
 #else
     if (!recycler->IsConcurrentExecutingState())
 #endif
@@ -1675,7 +1699,7 @@ HeapInfo::TransferDisposedObjects()
     Assert(recycler->hasPendingTransferDisposedObjects);
 #if ENABLE_CONCURRENT_GC
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
-    Assert(!recycler->IsConcurrentExecutingState() /*&& !recycler->IsConcurrentSweepState()*/);
+    Assert(!recycler->IsConcurrentExecutingState() && !recycler->IsConcurrentSweepState());
 #else
     Assert(!recycler->IsConcurrentExecutingState());
 #endif

+ 2 - 1
lib/Common/Memory/HeapInfo.h

@@ -89,6 +89,7 @@ public:
     void PrepareSweep();
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     void StartAllocationsDuringConcurrentSweep();
+    bool DoTwoPassConcurrentSweepPreCheck();
     void FinishSweepPrep(RecyclerSweep& recyclerSweep);
     void FinishConcurrentSweep();
 #endif
@@ -463,7 +464,7 @@ private:
     LargeHeapBucket largeObjectBucket;
 
     static const size_t ObjectAlignmentMask = HeapConstants::ObjectGranularity - 1;         // 0xF
-#ifdef RECYCLER_SLOW_CHECK_ENABLED
+#if defined(RECYCLER_SLOW_CHECK_ENABLED)
     size_t heapBlockCount[HeapBlock::BlockTypeCount];
 #endif
 #ifdef RECYCLER_FINALIZE_CHECK

+ 52 - 51
lib/Common/Memory/Recycler.cpp

@@ -47,8 +47,9 @@ enum ETWEventGCActivationKind : unsigned
     ETWEvent_ConcurrentSweep                       = 13,
     ETWEvent_ConcurrentTransferSwept               = 14,
     ETWEvent_ConcurrentFinishMark                  = 15,
-    ETWEvent_ConcurrentSweep_FinishSweepPrep       = 16,
-    ETWEvent_ConcurrentSweep_FinishConcurrentSweep = 17,
+    ETWEvent_ConcurrentSweep_TwoPassConcurrentSweepPreCheck = 16,
+    ETWEvent_ConcurrentSweep_FinishSweepPrep       = 17,
+    ETWEvent_ConcurrentSweep_FinishConcurrentSweep = 18,
 };
 
 DefaultRecyclerCollectionWrapper DefaultRecyclerCollectionWrapper::Instance;
@@ -162,6 +163,9 @@ Recycler::Recycler(AllocationPolicyManager * policyManager, IdleDecommitPageAllo
     enableConcurrentMark(false),  // Default to non-concurrent
     enableParallelMark(false),
     enableConcurrentSweep(false),
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    allowAllocationsDuringConcurrentSweepForCollection(false),
+#endif
     concurrentThread(NULL),
     concurrentWorkReadyEvent(NULL),
     concurrentWorkDoneEvent(NULL),
@@ -3089,7 +3093,7 @@ Recycler::Sweep(bool concurrent)
     {
         bool needForceForground = false;
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+        if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
         {
             needForceForground = !StartConcurrent(CollectionStateConcurrentSweepPass1);
         }
@@ -3106,13 +3110,10 @@ Recycler::Sweep(bool concurrent)
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
             if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
             {
-                this->collectionState = CollectionStateConcurrentSweepPass1;
+                this->allowAllocationsDuringConcurrentSweepForCollection = false;
             }
-            else
 #endif
-            {
-                this->collectionState = CollectionStateConcurrentSweep;
-            }
+            this->collectionState = CollectionStateConcurrentSweep;
 
             DoBackgroundWork(true);
             // Continue as if the concurrent sweep were executing
@@ -3250,6 +3251,10 @@ Recycler::SweepHeap(bool concurrent, RecyclerSweep& recyclerSweep)
 #endif
 
         GCETW(GC_SETUPBACKGROUNDSWEEP_STOP, (this));
+
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+        this->DoTwoPassConcurrentSweepPreCheck();
+#endif
     }
     else
     {
@@ -3780,13 +3785,8 @@ Recycler::DoCollectWrapped(CollectionFlags flags)
     this->allowDispose = (flags & CollectOverride_AllowDispose) == CollectOverride_AllowDispose;
     BOOL collected = collectionWrapper->ExecuteRecyclerCollectionFunction(this, &Recycler::DoCollect, flags);
 
-    //TODO:akatti: Remove this.
 #if ENABLE_CONCURRENT_GC
-#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-    Assert(IsConcurrentExecutingState() || IsConcurrentFinishedState() /*|| IsConcurrentSweepState()*/ || !CollectionInProgress());
-#else
-    Assert(IsConcurrentExecutingState() || IsConcurrentFinishedState() || !CollectionInProgress());
-#endif
+    Assert(IsConcurrentExecutingState() || IsConcurrentSweepState() || IsConcurrentFinishedState() || !CollectionInProgress());
 #else
     Assert(!CollectionInProgress());
 #endif
@@ -4399,11 +4399,7 @@ BOOL
 Recycler::RequestConcurrentWrapperCallback()
 {
 #if ENABLE_CONCURRENT_GC
-#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     Assert(!IsConcurrentExecutingState() && !IsConcurrentSweepState());
-#else
-    Assert(!IsConcurrentExecutingState());
-#endif
 
     // Save the original collection state
     CollectionState oldState = this->collectionState;
@@ -4509,12 +4505,7 @@ Recycler::FinishConcurrent()
 
         const BOOL forceFinish = flags & CollectOverride_ForceFinish;
 
-#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        // TODO:akatti: Should the concurrent wait states be considered executing states??
-        if (forceFinish || !(IsConcurrentExecutingState() /*|| IsConcurrentSweepState()*/))
-#else
         if (forceFinish || !IsConcurrentExecutingState())
-#endif
         {
 #if ENABLE_BACKGROUND_PAGE_FREEING
             if (CONFIG_FLAG(EnableBGFreeZero))
@@ -4572,12 +4563,7 @@ Recycler::TryFinishConcurrentCollect()
     Assert(!concurrent || !forceInThread);
     if (concurrent && concurrentThread != NULL)
     {
-#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        // TODO:akatti: Should the concurrent wait states be considered executing states??
-        if (IsConcurrentExecutingState() /*|| IsConcurrentSweepState()*/)
-#else
         if (IsConcurrentExecutingState())
-#endif
         {
             if (!this->priorityBoost)
             {
@@ -4684,7 +4670,7 @@ Recycler::IsConcurrentSweepSetupState() const
 BOOL
 Recycler::IsConcurrentSweepState() const
 {
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
     {
         return this->collectionState == CollectionStateConcurrentSweepPass1 ||
@@ -4802,7 +4788,7 @@ bool Recycler::AbortConcurrent(bool restoreState)
             {
                 this->ResetMarkCollectionState();
             }
-            //TODO:akatti: Do we need to handle the Pass1Wait state and finish ConcurrentSweep here??
+            //TODO:akatti: Do we need to handle the CollectionStateConcurrentSweepPass1Wait state and finish ConcurrentSweep here??
             else if (collectionState == CollectionStateTransferSweptWait)
             {
                 // Make sure we don't do another GC after finishing this one.
@@ -5744,14 +5730,14 @@ Recycler::FinishConcurrentCollect(CollectionFlags flags)
 #endif
         GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentRescan));
     }
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     else if (collectionState == CollectionStateConcurrentSweepPass1Wait)
     {
         this->FinishSweepPrep();
-        this->collectionState = CollectionStateConcurrentSweepPass2;
 
         if (forceInThread)
         {
+            this->collectionState = CollectionStateConcurrentSweepPass2;
 #ifdef RECYCLER_TRACE
             if (this->GetRecyclerFlagsTable().Trace.IsEnabled(Js::ConcurrentSweepPhase))
             {
@@ -5761,7 +5747,6 @@ Recycler::FinishConcurrentCollect(CollectionFlags flags)
             this->recyclerSweep->FinishSweep();
             this->FinishConcurrentSweep();
             this->recyclerSweep->EndBackground();
-            //this->collectionState = CollectionStateConcurrentSweepPass2Wait;
 
             uint sweptBytes = 0;
 #ifdef RECYCLER_STATS
@@ -5778,19 +5763,11 @@ Recycler::FinishConcurrentCollect(CollectionFlags flags)
         }
         else
         {
-            // Signal the background thread to finish concurrent sweep Pass2 for all the buckets.
-            SetEvent(this->concurrentWorkReadyEvent);
             needConcurrentSweep = true;
+            // Signal the background thread to finish concurrent sweep Pass2 for all the buckets.
+            StartConcurrent(CollectionStateConcurrentSweepPass2);
         }
     }
-    //else if (collectionState == CollectionStateConcurrentSweepPass2Wait)
-    //{
-    //    // This needs to happen in-thread as we will return the swept blocks from the SLIST to the heapBlockList.
-    //    this->FinishConcurrentSweep();
-
-    //    collectionState = CollectionStateTransferSweptWait;
-    //    FinishTransferSwept(flags);
-    //}
 #endif
     else
     {
@@ -5994,7 +5971,7 @@ Recycler::DoBackgroundWork(bool forceForeground)
         Assert(this->enableConcurrentSweep);
 
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+        if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
         {
             Assert(this->collectionState == CollectionStateConcurrentSweepPass1 || this->collectionState == CollectionStateConcurrentSweepPass2);
         }
@@ -6006,7 +5983,7 @@ Recycler::DoBackgroundWork(bool forceForeground)
 
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (this->collectionState == CollectionStateConcurrentSweepPass1 ||
-            (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->collectionState == CollectionStateConcurrentSweep))
+            ((!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) ||!this->AllowAllocationsDuringConcurrentSweep()) && this->collectionState == CollectionStateConcurrentSweep))
 #endif
         {
             RECYCLER_PROFILE_EXEC_BACKGROUND_BEGIN(this, Js::ConcurrentSweepPhase);
@@ -6034,7 +6011,7 @@ Recycler::DoBackgroundWork(bool forceForeground)
             // If allocations were allowed during concurrent sweep then the allocableHeapBlock lists still needs to be swept so we
             // will remain in CollectionStateConcurrentSweepPass1Wait state.
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-            if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+            if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
             {
                 this->collectionState = CollectionStateConcurrentSweepPass1Wait;
             }
@@ -6075,7 +6052,7 @@ Recycler::DoBackgroundWork(bool forceForeground)
 #endif
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (this->collectionState == CollectionStateConcurrentSweepPass2Wait ||
-            (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc)))
+            (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !this->AllowAllocationsDuringConcurrentSweep()))
 #endif
         {
             uint sweptBytes = 0;
@@ -6102,7 +6079,7 @@ Recycler::DoBackgroundWork(bool forceForeground)
 
             GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep));
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-            if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+            if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && this->AllowAllocationsDuringConcurrentSweep())
             {
                 Assert(this->collectionState == CollectionStateConcurrentSweepPass2Wait);
             }
@@ -6258,12 +6235,28 @@ Recycler::ThreadProc()
 
 #endif //ENABLE_CONCURRENT_GC
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+
+void
+Recycler::DoTwoPassConcurrentSweepPreCheck()
+{
+    GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_TwoPassConcurrentSweepPreCheck));
+    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+    {
+        this->allowAllocationsDuringConcurrentSweepForCollection = this->autoHeap.DoTwoPassConcurrentSweepPreCheck();
+    }
+    GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_TwoPassConcurrentSweepPreCheck));
+}
+
 void
 Recycler::FinishSweepPrep()
 {
     GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
-    this->autoHeap.FinishSweepPrep(this->recyclerSweepInstance);
+    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+    {
+        AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
+        this->autoHeap.FinishSweepPrep(this->recyclerSweepInstance);
+    }
     GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishSweepPrep));
 }
 
@@ -6272,7 +6265,11 @@ Recycler::FinishConcurrentSweep()
 {
 #if SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     GCETW_INTERNAL(GC_START, (this, ETWEvent_ConcurrentSweep_FinishConcurrentSweep));
-    this->autoHeap.FinishConcurrentSweep();
+    if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+    {
+        AssertMsg(this->allowAllocationsDuringConcurrentSweepForCollection, "Two pass concurrent sweep must be turned on.");
+        this->autoHeap.FinishConcurrentSweep();
+    }
     GCETW_INTERNAL(GC_STOP, (this, ETWEvent_ConcurrentSweep_FinishConcurrentSweep));
 #endif
 }
@@ -6364,6 +6361,10 @@ Recycler::FinishCollection()
     }
 #endif
 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    this->allowAllocationsDuringConcurrentSweepForCollection = false;
+#endif
+
 #if ENABLE_MEM_STATS
     autoHeap.ReportMemStats();
 #endif

+ 14 - 3
lib/Common/Memory/Recycler.h

@@ -728,6 +728,9 @@ private:
 
     CollectionState collectionState;
     JsUtil::ThreadService *threadService;
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    bool allowAllocationsDuringConcurrentSweepForCollection;
+#endif
 
     HeapBlockMap heapBlockMap;
 
@@ -783,9 +786,8 @@ private:
     DListBase<GuestArenaAllocator> guestArenaList;
     DListBase<ArenaData*> externalGuestArenaList;    // guest arenas are scanned for roots
 
-    bool isPageHeapEnabled;
-
 #ifdef RECYCLER_PAGE_HEAP
+    bool isPageHeapEnabled;
     bool capturePageHeapAllocStack;
     bool capturePageHeapFreeStack;
 
@@ -1600,7 +1602,8 @@ private:
     void SweepHeap(bool concurrent, RecyclerSweep& recyclerSweep);
     void FinishSweep(RecyclerSweep& recyclerSweep);
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    void DoTwoPassConcurrentSweepPreCheck();
     void FinishSweepPrep();
     void FinishConcurrentSweep();
 #endif
@@ -1665,6 +1668,14 @@ private:
     {
         return ((collectionState & Collection_ConcurrentSweep) == Collection_ConcurrentSweep);
     }
+
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    bool AllowAllocationsDuringConcurrentSweep()
+    {
+        return this->allowAllocationsDuringConcurrentSweepForCollection;
+    }
+#endif
+
 #if DBG
     BOOL IsConcurrentFinishedState() const;
 #endif // DBG

+ 3 - 0
lib/Common/Memory/RecyclerHeuristic.h

@@ -61,6 +61,9 @@ public:
     // If we rescan at least 128 pages in the first background repeat mark,
     // then trigger a second repeat mark pass.
     static const uint BackgroundSecondRepeatMarkThreshold = 128;
+
+    // Number of blocks a heap bucket needs to have before allocations during concurrent sweep feature kicks-in.
+    static const uint AllocDuringConcurrentSweepHeapBlockThreshold = 5000;
 #endif
 private:
 

+ 1 - 1
lib/Common/Memory/RecyclerSweep.cpp

@@ -290,7 +290,7 @@ RecyclerSweep::BackgroundSweep()
     this->recycler->autoHeap.SweepSmallNonFinalizable(*this);
 
 #if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-    if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+    if (!CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) || !this->recycler->AllowAllocationsDuringConcurrentSweep())
 #endif
     {
         // Finish the rest of the sweep

+ 5 - 3
lib/Common/Memory/RecyclerSweep.h

@@ -243,7 +243,7 @@ RecyclerSweep::TransferPendingEmptyHeapBlocks(HeapBucketT<TBlockType> * heapBuck
     if (list)
     {
         TBlockType * tail = bucketData.pendingEmptyBlockListTail;
-#if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED)
+#if DBG || defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         size_t count = 0;
         HeapBlockList::ForEach(list, [tail, &count](TBlockType * heapBlock)
         {
@@ -252,8 +252,10 @@ RecyclerSweep::TransferPendingEmptyHeapBlocks(HeapBucketT<TBlockType> * heapBuck
             Assert(heapBlock->GetNextBlock() != nullptr || heapBlock == tail);
             count++;
         });
-        RECYCLER_SLOW_CHECK(heapBucket->emptyHeapBlockCount += count);
-        RECYCLER_SLOW_CHECK(heapBucket->heapBlockCount -= count);
+#if defined(RECYCLER_SLOW_CHECK_ENABLED) || ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+        heapBucket->emptyHeapBlockCount += count;
+        heapBucket->heapBlockCount -= count;
+#endif
 #endif
 
 

+ 2 - 1
lib/Common/Memory/SmallFinalizableHeapBucket.h

@@ -414,8 +414,9 @@ public:
 #ifdef RECYCLER_VERIFY_MARK
     void VerifyMark();
 #endif
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     void StartAllocationDuringConcurrentSweep();
+    bool DoTwoPassConcurrentSweepPreCheck();
     void FinishSweepPrep(RecyclerSweep& recyclerSweep);
     void FinishConcurrentSweep();
 #endif

+ 4 - 4
lib/Common/Memory/SmallHeapBlockAllocator.cpp

@@ -28,7 +28,7 @@ SmallHeapBlockAllocator<TBlockType>::Initialize()
     this->prev = this;
     this->next = this;
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     DebugOnly(this->isAllocatingFromNewBlock = false);
 #endif
 }
@@ -138,7 +138,7 @@ SmallHeapBlockAllocator<TBlockType>::Clear()
 #endif
         this->freeObjectList = nullptr;
     }
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     DebugOnly(this->isAllocatingFromNewBlock = false);
 #endif
 }
@@ -162,7 +162,7 @@ SmallHeapBlockAllocator<TBlockType>::SetNew(BlockType * heapBlock)
     this->freeObjectList = (FreeObject *)heapBlock->GetAddress();
     this->endAddress = heapBlock->GetEndAddress();
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     DebugOnly(this->isAllocatingFromNewBlock = true);
 #endif
 }
@@ -186,7 +186,7 @@ SmallHeapBlockAllocator<TBlockType>::Set(BlockType * heapBlock)
     RECYCLER_SLOW_CHECK(this->heapBlock->CheckDebugFreeBitVector(true));
     this->freeObjectList = this->heapBlock->freeObjectList;
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
     DebugOnly(this->isAllocatingFromNewBlock = false);
 #endif
 }

+ 10 - 4
lib/Common/Memory/SmallHeapBlockAllocator.h

@@ -66,6 +66,12 @@ public:
     {
         return !IsBumpAllocMode() && !IsExplicitFreeObjectListAllocMode();
     }
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+    bool IsAllocatingDuringConcurrentSweepMode(Recycler * recycler) const
+    {
+        return IsFreeListAllocMode() && recycler->IsConcurrentSweepState();
+    }
+#endif
 private:
     static bool NeedSetAttributes(ObjectInfoBits attributes)
     {
@@ -75,7 +81,7 @@ private:
     char * endAddress;
     FreeObject * freeObjectList;
     TBlockType * heapBlock;
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP 
 #if DBG
     bool isAllocatingFromNewBlock;
 #endif
@@ -199,14 +205,14 @@ SmallHeapBlockAllocator<TBlockType>::InlinedAllocImpl(Recycler * recycler, DECLS
             Assert(isSet);
         }
 #endif
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
-        if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc))
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
+        if (recycler->AllowAllocationsDuringConcurrentSweep())
         {
             // If we are allocating during concurrent sweep we must mark the object to prevent it from being swept
             // in the ongoing sweep.
             if (heapBlock != nullptr && heapBlock->isPendingConcurrentSweepPrep)
             {
-                AssertMsg(!this->isAllocatingFromNewBlock, "We shouldn't be tracking allocation to a new block; i.e. bump allcoation; during concurrent sweep.");
+                AssertMsg(!this->isAllocatingFromNewBlock, "We shouldn't be tracking allocation to a new block; i.e. bump allocation; during concurrent sweep.");
                 AssertMsg(!heapBlock->IsAnyFinalizableBlock(), "Allocations are not allowed to finalizable blocks during concurrent sweep.");
                 AssertMsg(heapBlock->heapBucket->AllocationsStartedDuringConcurrentSweep(), "We shouldn't be allocating from this block while allocations are disabled.");
 

+ 18 - 9
lib/Common/Memory/SmallNormalHeapBucket.cpp

@@ -41,7 +41,7 @@ SmallNormalHeapBucketBase<TBlockType>::ScanInitialImplicitRoots(Recycler * recyc
         heapBlock->ScanInitialImplicitRoots(recycler);
     });
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
     if (CONFIG_FLAG_RELEASE(EnableConcurrentSweepAlloc) && !this->IsAnyFinalizableBucket())
     {
         HeapBlockList::ForEach(this->sweepableHeapBlockList, [recycler](TBlockType * heapBlock)
@@ -327,7 +327,7 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPendingObjects(Recycler * recycler,
         heapBlock->template SweepObjects<mode>(recycler, false /*onlyRecalculateMarkCountAndFreeBits*/);
         tail = heapBlock;
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
         if (this->AllowAllocationsDuringConcurrentSweep())
         {
             Assert(!this->IsAnyFinalizableBucket());
@@ -343,7 +343,8 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPendingObjects(Recycler * recycler,
 #ifdef RECYCLER_TRACE
                     recycler->PrintBlockStatus(this, heapBlock, _u("[**23**] finished SweepPendingObjects, OOM while adding to the SLIST."));
 #endif
-                    recycler->OutOfMemory();
+                    //TODO: akatti: We should handle this gracefully and try to recover from this state.
+                    AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
                 }
 #ifdef RECYCLER_TRACE
                 else
@@ -393,7 +394,7 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(RecyclerSweep& rec
             callback(heapBlock, true);
 
 
-#if ENABLE_CONCURRENT_GC && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP && SUPPORT_WIN32_SLIST && ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP_USE_SLIST
             // During concurrent sweep if allocations were allowed, the heap blocks directly go into the SLIST of
             // allocable heap blocks. They will be returned to the heapBlockList at the end of the sweep.
             if(!allocationsAllowedDuringConcurrentSweep)
@@ -464,7 +465,8 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(RecyclerSweep& rec
 #ifdef RECYCLER_TRACE
                         this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**10**] finished SweepPartialReusePages, heapblock REUSED but OOM while adding to the SLIST."));
 #endif
-                        this->GetRecycler()->OutOfMemory();
+                        //TODO: akatti: We should handle this gracefully and try to recover from this state.
+                        AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
                     }
 #ifdef RECYCLER_TRACE
                     else
@@ -490,8 +492,6 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(RecyclerSweep& rec
             this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**12**] finished SweepPartialReusePages, heapblock NOT REUSED, added to partialHeapBlockList."));
 #endif
 
-            //TODO:akatti:PERF Do we need to do this for non-debug builds? We might be able to skip
-            // this if this is only needed for passing debug asserts.
             // If we allocated from this block during concurrent sweep, the block may now have become
             // full (or almost full) and hence not reusable. If we allocated from this block during 
             // concurrent sweep, we must recalculate the mark count and rebuild free bits for this block.
@@ -547,7 +547,8 @@ SmallNormalHeapBucketBase<TBlockType>::SweepPartialReusePages(RecyclerSweep& rec
 #ifdef RECYCLER_TRACE
                             this->GetRecycler()->PrintBlockStatus(this, heapBlock, _u("[**13**] finished SweepPartialReusePages, heapblock REUSED but OOM while adding to the SLIST."));
 #endif
-                            this->GetRecycler()->OutOfMemory();
+                            //TODO: akatti: We should handle this gracefully and try to recover from this state.
+                            AssertOrFailFastMsg(false, "OOM while adding a heap block to the SLIST during concurrent sweep.");
                         }
 #ifdef RECYCLER_TRACE
                         else
@@ -604,7 +605,12 @@ SmallNormalHeapBucketBase<TBlockType>::FinishPartialCollect(RecyclerSweep * recy
     RECYCLER_SLOW_CHECK(this->VerifyHeapBlockCount(recyclerSweep != nullptr && recyclerSweep->IsBackground()));
 
     Assert(this->GetRecycler()->inPartialCollectMode);
+
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
     Assert(recyclerSweep == nullptr || this->IsAllocationStopped() || this->AllocationsStartedDuringConcurrentSweep());
+#else
+    Assert(recyclerSweep == nullptr || this->IsAllocationStopped());
+#endif
 
 #if ENABLE_CONCURRENT_GC
     // Process the partial Swept block and move it to the partial heap block list
@@ -652,7 +658,11 @@ SmallNormalHeapBucketBase<TBlockType>::FinishPartialCollect(RecyclerSweep * recy
             }
         }
 #if ENABLE_CONCURRENT_GC
+#if ENABLE_ALLOCATIONS_DURING_CONCURRENT_SWEEP
         if (recyclerSweep->GetPendingSweepBlockList(this) == nullptr && !this->AllocationsStartedDuringConcurrentSweep())
+#else
+        if (recyclerSweep->GetPendingSweepBlockList(this) == nullptr)
+#endif
 #endif
         {
             // nothing else to sweep now,  we can start allocating now.
@@ -741,7 +751,6 @@ SmallNormalHeapBucketBase<TBlockType>::Check(bool checkCount)
     smallHeapBlockCount += HeapInfo::Check(false, false, this->partialSweptHeapBlockList);
 #endif
 
-    //TODO:akatti:Can this assert fail because blocks are in the SLIST and are > 65535 blocks?
     Assert(!checkCount || this->heapBlockCount == smallHeapBlockCount);
     return smallHeapBlockCount;
 }