Jelajahi Sumber

Fixing Custom Heap Protect Pages API

Issue:
There was a code path where we protect pages with PAGE_EXECUTE without the
PAGE_TARGETS_NO_UPDATE flag, which will control all the CFG bits in the
page.

Fix:
Fixed and ensured that we don't use PAGE_EXECUTE directly in the virtual
protect API. Instead #defines are used.
Cleaned up some complex code path in CustomHeap Protect Allocation
mechanism.
Removed isReadWrite flag from PageAllocation class, as it is not required.

Earlier, we were also letting RW pages to be added to the bucket list of
pages in custom heap, which lead the hacker to add his own RW page to the
list. Now it is made sure that only RX pages go into the bucket list.

Tests:
Unit tests passed.
Exprgen, Web crawler - Reran the tests and they look fine..
Satheesh Ravindranath 10 tahun lalu
induk
melakukan
99fbe1d4e7

+ 7 - 1
lib/Backend/CodeGenWorkItem.cpp

@@ -205,7 +205,13 @@ void CodeGenWorkItem::RecordNativeCodeSize(Func *func, size_t bytes, ushort pdat
 #else
     bool canAllocInPreReservedHeapPageSegment = func->CanAllocInPreReservedHeapPageSegment();
 #endif
-    EmitBufferAllocation *allocation = func->GetEmitBufferManager()->AllocateBuffer(bytes, &buffer, false, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, true);
+    EmitBufferAllocation *allocation = func->GetEmitBufferManager()->AllocateBuffer(bytes, &buffer, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, true);
+
+#if DBG
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(allocation->allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
 
     Assert(allocation != nullptr);
     if (buffer == nullptr)

+ 24 - 16
lib/Backend/EmitBuffer.cpp

@@ -246,8 +246,8 @@ bool EmitBufferManager<SyncObject>::FinalizeAllocation(EmitBufferAllocation *all
     DWORD bytes = allocation->BytesFree();
     if(bytes > 0)
     {
-        BYTE* buffer;
-        this->GetBuffer(allocation, bytes, &buffer, false /*readWrite*/);
+        BYTE* buffer = nullptr;
+        this->GetBuffer(allocation, bytes, &buffer);
         if (!this->CommitBuffer(allocation, buffer, 0, /*sourceBuffer=*/ nullptr, /*alignPad=*/ bytes))
         {
             return false;
@@ -262,11 +262,10 @@ bool EmitBufferManager<SyncObject>::FinalizeAllocation(EmitBufferAllocation *all
 }
 
 template <typename SyncObject>
-EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite)
+EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer)
 {
     Assert(this->criticalSection.IsLocked());
 
-    this->allocationHeap.EnsureAllocationProtection(allocation->allocation, readWrite);
     Assert(allocation->BytesFree() >= bytes);
 
     // In case of ThunkEmitter the script context would be null and we don't want to track that as code size.
@@ -288,7 +287,7 @@ EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocat
 //      to modify this buffer one page at a time.
 //----------------------------------------------------------------------------
 template <typename SyncObject>
-EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite /*= false*/, ushort pdataCount /*=0*/, ushort xdataSize  /*=0*/, bool canAllocInPreReservedHeapPageSegment /*=false*/,
+EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount /*=0*/, ushort xdataSize  /*=0*/, bool canAllocInPreReservedHeapPageSegment /*=false*/,
     bool isAnyJittedCode /* = false*/)
 {
     AutoRealOrFakeCriticalSection<SyncObject> autoCs(&this->criticalSection);
@@ -297,7 +296,13 @@ EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t
 
     EmitBufferAllocation * allocation = this->NewAllocation(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode);
 
-    GetBuffer(allocation, bytes, ppBuffer, readWrite);
+    GetBuffer(allocation, bytes, ppBuffer);
+
+#if DBG
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(allocation->allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
 
     return allocation;
 }
@@ -327,6 +332,14 @@ bool EmitBufferManager<SyncObject>::CheckCommitFaultInjection()
 
 #endif
 
+template <typename SyncObject>
+bool EmitBufferManager<SyncObject>::ProtectBufferWithExecuteReadWriteForInterpreter(EmitBufferAllocation* allocation)
+{
+    Assert(this->criticalSection.IsLocked());
+    Assert(allocation != nullptr);
+    return (this->allocationHeap.ProtectAllocationWithExecuteReadWrite(allocation->allocation) == TRUE);
+}
+
 // Returns true if we successfully commit the buffer
 // Returns false if we OOM
 template <typename SyncObject>
@@ -340,8 +353,6 @@ bool EmitBufferManager<SyncObject>::CommitReadWriteBufferForInterpreter(EmitBuff
     this->totalBytesCode += bufferSize;
 #endif
 
-    DWORD oldProtect;
-
     VerboseHeapTrace(L"Setting execute permissions on 0x%p, allocation: 0x%p\n", pBuffer, allocation->allocation->address);
 
 #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
@@ -351,14 +362,13 @@ bool EmitBufferManager<SyncObject>::CommitReadWriteBufferForInterpreter(EmitBuff
     }
 #endif
 
-    if (!this->allocationHeap.ProtectAllocation(allocation->allocation, PAGE_EXECUTE, &oldProtect, PAGE_READWRITE))
+    if (!this->allocationHeap.ProtectAllocationWithExecuteReadOnly(allocation->allocation))
     {
         return false;
     }
 
     FlushInstructionCache(AutoSystemInfo::Data.GetProcessHandle(), pBuffer, bufferSize);
 
-    Assert(oldProtect == PAGE_READWRITE);
     return true;
 }
 
@@ -378,8 +388,6 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
     Assert(destBuffer != nullptr);
     Assert(allocation != nullptr);
 
-    DWORD oldProtect;
-
     BYTE *currentDestBuffer = allocation->GetUnused();
     BYTE *bufferToFlush = currentDestBuffer;
     Assert(allocation->BytesFree() >= bytes + alignPad);
@@ -404,11 +412,11 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
             return false;
         }
 #endif
-        if (!this->allocationHeap.ProtectAllocationPage(allocation->allocation, (char*)readWriteBuffer, PAGE_EXECUTE_READWRITE, &oldProtect, PAGE_EXECUTE))
+
+        if (!this->allocationHeap.ProtectAllocationWithExecuteReadWrite(allocation->allocation, (char*)readWriteBuffer))
         {
             return false;
         }
-        Assert(oldProtect == PAGE_EXECUTE);
 
         if (alignPad != 0)
         {
@@ -440,11 +448,11 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
         }
 
         Assert(readWriteBuffer + readWriteBytes == currentDestBuffer);
-        if (!this->allocationHeap.ProtectAllocationPage(allocation->allocation, (char*)readWriteBuffer, PAGE_EXECUTE, &oldProtect, PAGE_EXECUTE_READWRITE))
+
+        if (!this->allocationHeap.ProtectAllocationWithExecuteReadOnly(allocation->allocation, (char*)readWriteBuffer))
         {
             return false;
         }
-        Assert(oldProtect == PAGE_EXECUTE_READWRITE);
     }
 
     FlushInstructionCache(AutoSystemInfo::Data.GetProcessHandle(), bufferToFlush, sizeToFlush);

+ 3 - 2
lib/Backend/EmitBuffer.h

@@ -37,8 +37,9 @@ public:
     void Decommit();
     void Clear();
 
-    EmitBufferAllocation* AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite = false, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
+    EmitBufferAllocation* AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
     bool CommitBuffer(EmitBufferAllocation* allocation, __out_bcount(bytes) BYTE* destBuffer, __in size_t bytes, __in_bcount(bytes) const BYTE* sourceBuffer, __in DWORD alignPad = 0);
+    bool ProtectBufferWithExecuteReadWriteForInterpreter(EmitBufferAllocation* allocation);
     bool CommitReadWriteBufferForInterpreter(EmitBufferAllocation* allocation, _In_reads_bytes_(bufferSize) BYTE* pBuffer, _In_ size_t bufferSize);
     void CompletePreviousAllocation(EmitBufferAllocation* allocation);
     bool FreeAllocation(void* address);
@@ -119,7 +120,7 @@ private:
     Js::ScriptContext * scriptContext;
 
     EmitBufferAllocation * NewAllocation(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode);
-    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite);
+    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer);
 
     bool FinalizeAllocation(EmitBufferAllocation *allocation);
     CustomHeap::Heap allocationHeap;

+ 6 - 1
lib/Backend/InterpreterThunkEmitter.cpp

@@ -253,7 +253,12 @@ void InterpreterThunkEmitter::NewThunkBlock()
     DWORD bufferSize = BlockSize;
     DWORD thunkCount = 0;
 
-    allocation = emitBufferManager.AllocateBuffer(bufferSize, &buffer, /*readWrite*/ true);
+    allocation = emitBufferManager.AllocateBuffer(bufferSize, &buffer);
+    if (!emitBufferManager.ProtectBufferWithExecuteReadWriteForInterpreter(allocation))
+    {
+        Js::Throw::OutOfMemory();
+    }
+
     currentBuffer = buffer;
 
 #ifdef _M_X64

+ 1 - 1
lib/Runtime/Language/AsmJSEncoder.cpp

@@ -208,7 +208,7 @@ namespace Js
             Assert( ::Math::FitsInDWord( codeSize ) );
 
             BYTE *buffer;
-            EmitBufferAllocation *allocation = GetCodeGenAllocator()->emitBufferManager.AllocateBuffer( codeSize, &buffer, false, 0, 0 );
+            EmitBufferAllocation *allocation = GetCodeGenAllocator()->emitBufferManager.AllocateBuffer( codeSize, &buffer, 0, 0 );
             functionBody->GetAsmJsFunctionInfo()->mTJBeginAddress = buffer;
 
             Assert( allocation != nullptr );

+ 11 - 0
lib/common/Memory/CommonMemoryPch.h

@@ -45,3 +45,14 @@ typedef _Return_type_success_(return >= 0) LONG NTSTATUS;
 #include "Memory\LargeHeapBucket.inl"
 #include "Memory\HeapBlock.inl"
 #include "Memory\HeapBlockMap.inl"
+
+// Memory Protections
+#ifdef _CONTROL_FLOW_GUARD
+#define PAGE_EXECUTE_RW_TARGETS_INVALID   (PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID)
+#define PAGE_EXECUTE_RW_TARGETS_NO_UPDATE (PAGE_EXECUTE_READWRITE | PAGE_TARGETS_NO_UPDATE)
+#define PAGE_EXECUTE_RO_TARGETS_NO_UPDATE (PAGE_EXECUTE           | PAGE_TARGETS_NO_UPDATE)
+#else
+#define PAGE_EXECUTE_RW_TARGETS_INVALID   (PAGE_EXECUTE_READWRITE)
+#define PAGE_EXECUTE_RW_TARGETS_NO_UPDATE (PAGE_EXECUTE_READWRITE)
+#define PAGE_EXECUTE_RO_TARGETS_NO_UPDATE (PAGE_EXECUTE)
+#endif

+ 105 - 35
lib/common/Memory/CustomHeap.cpp

@@ -100,6 +100,7 @@ bool Heap::Free(__in Allocation* object)
     {
         return true;
     }
+
     return FreeAllocation(object);
 }
 
@@ -141,6 +142,7 @@ bool Heap::Decommit(__in Allocation* object)
     // Skip asserting here- multiple objects could be on the same page
     // Review: should we really decommit here or decommit only when all objects
     // on the page have been decommitted?
+
     if (!object->page->isDecommitted)
     {
 #if PDATA_ENABLED
@@ -194,7 +196,13 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
 
     if (bucket == BucketId::LargeObjectList)
     {
-        return AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+        allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+#if defined(DBG)
+        MEMORY_BASIC_INFORMATION memBasicInfo;
+        size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+        Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
+        return allocation;
     }
 
     VerboseHeapTrace(L"Bucket is %d\n", bucket);
@@ -221,46 +229,58 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
         return nullptr;
     }
 
+#if defined(DBG)
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
+
     allocation = AllocInPage(page, bytesToAllocate, pdataCount, xdataSize);
     return allocation;
 }
 
-BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage)
 {
-    Assert(allocation != nullptr);
-    Assert(allocation->isAllocationUsed);
+    DWORD protectFlags = 0;
 
-    return ProtectAllocationInternal(allocation, nullptr, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE_READWRITE;
+    }
+    return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE, addressInPage);
 }
 
-BOOL Heap::ProtectAllocationPage(__in Allocation* allocation, __in char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage)
 {
-    Assert(addressInPage != nullptr);
-    Assert(allocation != nullptr);
-    Assert(addressInPage >= allocation->address);
-    Assert(allocation->isAllocationUsed);
-
-    return ProtectAllocationInternal(allocation, addressInPage, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+    DWORD protectFlags = 0;
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE;
+    }
+    return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
 }
 
-BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
 {
     // Allocate at the page level so that our protections don't
     // transcend allocation page boundaries. Here, allocation->address is page
     // aligned if the object is a large object allocation. If it isn't, in the else
     // branch of the following if statement, we set it to the allocation's page's
     // address. This ensures that the address being protected is always page aligned
-    char* address = allocation->address;
 
-#ifdef _CONTROL_FLOW_GUARD
-    if (AutoSystemInfo::Data.IsCFGEnabled() &&
-        (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)))
-    {
-        AssertMsg(!(dwVirtualProtectFlags & PAGE_EXECUTE_WRITECOPY), "PAGE_EXECUTE_WRITECOPY is not used today. Remove this precondition \
-            and add to the if condition above, if this flag is used.");
-        dwVirtualProtectFlags |= PAGE_TARGETS_NO_UPDATE;
-    }
-#endif
+    Assert(allocation != nullptr);
+    Assert(allocation->isAllocationUsed);
+    Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
+
+    char* address = allocation->address;
 
     size_t pageCount;
     void * segment;
@@ -273,7 +293,6 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
         }
 #endif
         segment = allocation->largeObjectAllocation.segment;
-        allocation->largeObjectAllocation.isReadWrite = ((dwVirtualProtectFlags & PAGE_READWRITE) == PAGE_READWRITE);
 
         if (addressInPage != nullptr)
         {
@@ -291,7 +310,7 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
         }
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
     else
     {
@@ -303,11 +322,10 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
 #endif
         segment = allocation->page->segment;
         address = allocation->page->address;
-        allocation->page->isReadWrite = ((dwVirtualProtectFlags & PAGE_READWRITE) == PAGE_READWRITE);
         pageCount = 1;
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
 }
 
@@ -353,6 +371,16 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
         }
 
         FillDebugBreak((BYTE*) address, pages*AutoSystemInfo::PageSize);
+        DWORD protectFlags = 0;
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+        }
+        else
+        {
+            protectFlags = PAGE_EXECUTE;
+        }
+        this->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
 
 #if PDATA_ENABLED
         if(pdataCount > 0)
@@ -386,7 +414,6 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
     allocation->address = address;
     allocation->largeObjectAllocation.segment = segment;
     allocation->largeObjectAllocation.isDecommitted = false;
-    allocation->largeObjectAllocation.isReadWrite = true;
     allocation->size = pages * AutoSystemInfo::PageSize;
 
 #if PDATA_ENABLED
@@ -424,6 +451,31 @@ void Heap::FreeDecommittedLargeObjects()
     NEXT_DLISTBASE_ENTRY_EDITING;
 }
 
+//Called during Free (while shutting down)
+DWORD Heap::EnsurePageWriteable(Page* page)
+{
+    return EnsurePageReadWrite<PAGE_READWRITE>(page);
+}
+
+// this get called when freeing the whole page
+DWORD Heap::EnsureAllocationWriteable(Allocation* allocation)
+{
+    return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
+}
+
+// this get called when only freeing a part in the page
+DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
+{
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
+    }
+    else
+    {
+        return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
+    }   
+}
+
 template <bool freeAll>
 bool Heap::FreeLargeObject(Allocation* address)
 {
@@ -602,6 +654,20 @@ Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegm
 
     FillDebugBreak((BYTE*) address, AutoSystemInfo::PageSize);
 
+    DWORD protectFlags = 0;
+
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE;
+    }
+
+    //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
+    ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
+
     // Switch to allocating on a list of pages so we can do leak tracking later
     VerboseHeapTrace(L"Allocing new page in bucket %d\n", bucket);
     Page* page = this->buckets[bucket].PrependNode(this->auxiliaryAllocator, address, pageSegment, bucket);
@@ -830,15 +896,19 @@ bool Heap::FreeAllocation(Allocation* object)
 
         return false;
     }
-    else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE
+    else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
     {
-        DWORD dwExpectedFlags = 0;
-
-        this->ProtectPages(page->address, 1, segment, PAGE_EXECUTE, &dwExpectedFlags, PAGE_EXECUTE_READWRITE);
-
-        Assert(!object->isAllocationUsed || dwExpectedFlags == PAGE_EXECUTE_READWRITE);
-        page->isReadWrite = false;
+        DWORD protectFlags = 0;
 
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+        }
+        else
+        {
+            protectFlags = PAGE_EXECUTE;
+        }
+        this->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
         return true;
     }
 }

+ 21 - 86
lib/common/Memory/CustomHeap.h

@@ -16,6 +16,7 @@ namespace Memory
     Output::Flush(); \
 }
 
+
 namespace CustomHeap
 {
 
@@ -37,7 +38,6 @@ BucketId GetBucketForSize(size_t bytes);
 struct PageAllocatorAllocation
 {
     bool isDecommitted;
-    bool isReadWrite;
 };
 
 struct Page: public PageAllocatorAllocation
@@ -70,7 +70,6 @@ struct Page: public PageAllocatorAllocation
     {
         // Initialize PageAllocatorAllocation fields
         this->isDecommitted = false;
-        this->isReadWrite = true;
     }
 
     // Each bit in the bit vector corresponds to 128 bytes of memory
@@ -200,16 +199,16 @@ public:
         }
     }
 
-    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
     {
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
-            return this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+            return this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
         }
         else
         {
-            return this->GetPageAllocator<VirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+            return this->GetPageAllocator<VirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
         }
     }
 
@@ -302,22 +301,9 @@ public:
         return page->HasNoSpace() || (allocXdata && !((Segment*)(page->segment))->CanAllocSecondary());
     }
 
-    BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-    BOOL ProtectAllocationPage(__in Allocation* allocation, __in char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-
-    DWORD EnsureAllocationProtection(Allocation* allocation, bool readWrite)
-    {
-        if (readWrite)
-        {
-            // this only call from InterpreterThunkEmitter
-            return EnsureAllocationReadWrite<true, PAGE_READWRITE>(allocation);
-        }
-        else
-        {
-            return EnsureAllocationReadWrite<false, PAGE_READWRITE>(allocation);
-        }
-
-    }
+    BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
+    BOOL ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage = nullptr);
+    BOOL ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage = nullptr);
 
     ~Heap();
 
@@ -367,91 +353,40 @@ private:
         FreeLargeObject<true>(nullptr);
     }
 
-    DWORD EnsurePageWriteable(Page* page)
-    {
-        return EnsurePageReadWrite<true, PAGE_READWRITE>(page);
-    }
+    //Called during Free
+    DWORD EnsurePageWriteable(Page* page);
 
     // this get called when freeing the whole page
-    DWORD EnsureAllocationWriteable(Allocation* allocation)
-    {
-        return EnsureAllocationReadWrite<true, PAGE_READWRITE>(allocation);
-    }
+    DWORD EnsureAllocationWriteable(Allocation* allocation);
 
     // this get called when only freeing a part in the page
-    DWORD EnsureAllocationExecuteWriteable(Allocation* allocation)
-    {
-        return EnsureAllocationReadWrite<true, PAGE_EXECUTE_READWRITE>(allocation);
-    }
+    DWORD EnsureAllocationExecuteWriteable(Allocation* allocation);
 
-    template<bool readWrite, DWORD readWriteFlags>
+    template<DWORD readWriteFlags>
     DWORD EnsurePageReadWrite(Page* page)
     {
-        if (readWrite)
-        {
-            if (!page->isReadWrite && !page->isDecommitted)
-            {
-                DWORD dwOldProtectFlags = 0;
-                BOOL result = this->ProtectPages(page->address, 1, page->segment, readWriteFlags, &dwOldProtectFlags, PAGE_EXECUTE);
-                page->isReadWrite = true;
-                Assert(result && (dwOldProtectFlags & readWriteFlags) == 0);
-                return dwOldProtectFlags;
-            }
-        }
-        else
-        {
-            if (page->isReadWrite && !page->isDecommitted)
-            {
-                DWORD dwOldProtectFlags = 0;
-                BOOL result = this->ProtectPages(page->address, 1, page->segment, PAGE_EXECUTE, &dwOldProtectFlags, readWriteFlags);
-                page->isReadWrite = false;
-                Assert(result && (dwOldProtectFlags & PAGE_EXECUTE) == 0);
-                return dwOldProtectFlags;
-            }
-        }
+        Assert(!page->isDecommitted);
 
-        return 0;
+        BOOL result = this->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
+        Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
+        return PAGE_EXECUTE;
     }
 
-    template<bool readWrite, DWORD readWriteFlags>
+    template<DWORD readWriteFlags>
     DWORD EnsureAllocationReadWrite(Allocation* allocation)
     {
         if (allocation->IsLargeAllocation())
         {
-            if (readWrite)
-            {
-                if (!allocation->largeObjectAllocation.isReadWrite)
-                {
-                    DWORD dwOldProtectFlags;
-                    BOOL result = this->ProtectAllocation(allocation, readWriteFlags, &dwOldProtectFlags, PAGE_EXECUTE);
-                    Assert(result && (dwOldProtectFlags & readWriteFlags) == 0);
-                    return dwOldProtectFlags;
-                }
-            }
-            else
-            {
-                if (allocation->largeObjectAllocation.isReadWrite)
-                {
-                    DWORD dwOldProtectFlags;
-                    this->ProtectAllocation(allocation, PAGE_EXECUTE, &dwOldProtectFlags, readWriteFlags);
-                    Assert((dwOldProtectFlags & PAGE_EXECUTE) == 0);
-                    return dwOldProtectFlags;
-                }
-            }
-
+            BOOL result = this->ProtectAllocation(allocation, readWriteFlags, PAGE_EXECUTE);
+            Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
+            return PAGE_EXECUTE;
         }
         else
         {
-            return EnsurePageReadWrite<readWrite, readWriteFlags>(allocation->page);
+            return EnsurePageReadWrite<readWriteFlags>(allocation->page);
         }
-
-        // 0 is safe to return as its not a memory protection constant
-        // so it indicates that nothing was changed
-        return 0;
     }
 
-    BOOL ProtectAllocationInternal(__in Allocation* allocation, __in_opt char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-
     /**
      * Freeing Methods
      */

+ 14 - 6
lib/common/Memory/PageAllocator.cpp

@@ -2193,13 +2193,12 @@ void PageAllocatorBase<TVirtualAlloc>::ReleaseSegmentList(DListBase<T> * segment
 
 template<typename T>
 BOOL
-HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in void* segmentParam, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in void* segmentParam, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
 {
     SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
 #if DBG
     Assert(address >= segment->GetAddress());
     Assert(((uint)(((char *)address) - segment->GetAddress()) <= (segment->GetPageCount() - pageCount) * AutoSystemInfo::PageSize));
-    Assert(dwOldVirtualProtectFlags != NULL);
 
     if (IsPageSegment(segment))
     {
@@ -2231,13 +2230,22 @@ HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in vo
     size_t bytes = VirtualQuery(address, &memBasicInfo, sizeof(memBasicInfo));
     if (bytes == 0
         || memBasicInfo.RegionSize < pageCount * AutoSystemInfo::PageSize
-        || desiredOldProtectFlag != memBasicInfo.Protect
-        )
+        || desiredOldProtectFlag != memBasicInfo.Protect)
     {
         CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
         return FALSE;
     }
-    *dwOldVirtualProtectFlags = memBasicInfo.Protect;
+
+    /*Verify if we always pass the PAGE_TARGETS_NO_UPDATE flag, if the protect flag is EXECUTE*/
+#if defined(_CONTROL_FLOW_GUARD)
+    if (AutoSystemInfo::Data.IsCFGEnabled() &&
+        (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) &&
+        ((dwVirtualProtectFlags & PAGE_TARGETS_NO_UPDATE) == 0))
+    {
+        CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
+        return FALSE;
+    }
+#endif
 
 #if defined(ENABLE_JIT_CLAMP)
     bool makeExecutable = (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) ? true : false;
@@ -2247,7 +2255,7 @@ HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in vo
 
     DWORD oldProtect; // this is only for first page
     BOOL retVal = ::VirtualProtect(address, pageCount * AutoSystemInfo::PageSize, dwVirtualProtectFlags, &oldProtect);
-    Assert(oldProtect == *dwOldVirtualProtectFlags);
+    Assert(oldProtect == desiredOldProtectFlag);
 
     return retVal;
 }

+ 2 - 2
lib/common/Memory/PageAllocator.h

@@ -368,7 +368,7 @@ public:
 
     static size_t GetAndResetMaxUsedBytes();
 
-    virtual BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+    virtual BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
     {
         Assert(false);
         return false;
@@ -748,7 +748,7 @@ class HeapPageAllocator : public PageAllocatorBase<TVirtualAlloc>
 public:
     HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages);
 
-    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
+    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
     bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation);
     void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
     void TrackDecommittedPages(void * address, uint pageCount, __in void* segment);

+ 24 - 3
lib/common/Memory/VirtualAllocWrapper.cpp

@@ -35,7 +35,16 @@ LPVOID VirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocat
     {
         //We do the allocation in two steps - CFG Bitmap in kernel will be created only on allocation with EXECUTE flag.
         //We again call VirtualProtect to set to the requested protectFlags.
-        address = VirtualAlloc(lpAddress, dwSize, allocationType, PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID);
+        DWORD allocProtectFlags = 0;
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+        }
+        else
+        {
+            allocProtectFlags = PAGE_EXECUTE_READWRITE;
+        }
+        address = VirtualAlloc(lpAddress, dwSize, allocationType, allocProtectFlags);
         VirtualProtect(address, dwSize, protectFlags, &oldProtectFlags);
     }
     else
@@ -262,8 +271,20 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
 #if defined(_CONTROL_FLOW_GUARD)
         if (AutoSystemInfo::Data.IsCFGEnabled())
         {
-            DWORD oldProtect;
-            committedAddress = (char *) VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID);
+            DWORD oldProtect = 0;
+            DWORD allocProtectFlags = 0;
+
+            if (AutoSystemInfo::Data.IsCFGEnabled())
+            {
+                allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+            }
+            else
+            {
+                allocProtectFlags = PAGE_EXECUTE_READWRITE;
+            }
+
+            committedAddress = (char *)VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, allocProtectFlags);
+
             AssertMsg(committedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?");
             VirtualProtect(committedAddress, dwSize, protectFlags, &oldProtect);
             AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags.");

+ 5 - 5
test/AsmJs/nanbug.baseline

@@ -1,5 +1,5 @@
-Successfully compiled asm.js code
-NaN
-0
-NaN
-0
+Successfully compiled asm.js code
+NaN
+0
+NaN
+0