ソースを参照

[MERGE #666] sync up last security fix to release/1.1 branch

Merge pull request #666 from Yongqu:release11
Integrate a set of security fixes we did earlier to release/1.1 branch. This change covers the following CVE changes:  CVE-2016-0116, CVE-2016-0129, CVE-2016-0080, CVE-2016-0130
Yong Qu 10 年 前
コミット
b30c60d09a

+ 7 - 1
lib/Backend/CodeGenWorkItem.cpp

@@ -205,7 +205,13 @@ void CodeGenWorkItem::RecordNativeCodeSize(Func *func, size_t bytes, ushort pdat
 #else
     bool canAllocInPreReservedHeapPageSegment = func->CanAllocInPreReservedHeapPageSegment();
 #endif
-    EmitBufferAllocation *allocation = func->GetEmitBufferManager()->AllocateBuffer(bytes, &buffer, false, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, true);
+    EmitBufferAllocation *allocation = func->GetEmitBufferManager()->AllocateBuffer(bytes, &buffer, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, true);
+
+#if DBG
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(allocation->allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
 
     Assert(allocation != nullptr);
     if (buffer == nullptr)

+ 24 - 16
lib/Backend/EmitBuffer.cpp

@@ -246,8 +246,8 @@ bool EmitBufferManager<SyncObject>::FinalizeAllocation(EmitBufferAllocation *all
     DWORD bytes = allocation->BytesFree();
     if(bytes > 0)
     {
-        BYTE* buffer;
-        this->GetBuffer(allocation, bytes, &buffer, false /*readWrite*/);
+        BYTE* buffer = nullptr;
+        this->GetBuffer(allocation, bytes, &buffer);
         if (!this->CommitBuffer(allocation, buffer, 0, /*sourceBuffer=*/ nullptr, /*alignPad=*/ bytes))
         {
             return false;
@@ -262,11 +262,10 @@ bool EmitBufferManager<SyncObject>::FinalizeAllocation(EmitBufferAllocation *all
 }
 
 template <typename SyncObject>
-EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite)
+EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer)
 {
     Assert(this->criticalSection.IsLocked());
 
-    this->allocationHeap.EnsureAllocationProtection(allocation->allocation, readWrite);
     Assert(allocation->BytesFree() >= bytes);
 
     // In case of ThunkEmitter the script context would be null and we don't want to track that as code size.
@@ -288,7 +287,7 @@ EmitBufferAllocation* EmitBufferManager<SyncObject>::GetBuffer(EmitBufferAllocat
 //      to modify this buffer one page at a time.
 //----------------------------------------------------------------------------
 template <typename SyncObject>
-EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite /*= false*/, ushort pdataCount /*=0*/, ushort xdataSize  /*=0*/, bool canAllocInPreReservedHeapPageSegment /*=false*/,
+EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount /*=0*/, ushort xdataSize  /*=0*/, bool canAllocInPreReservedHeapPageSegment /*=false*/,
     bool isAnyJittedCode /* = false*/)
 {
     AutoRealOrFakeCriticalSection<SyncObject> autoCs(&this->criticalSection);
@@ -297,7 +296,13 @@ EmitBufferAllocation* EmitBufferManager<SyncObject>::AllocateBuffer(__in size_t
 
     EmitBufferAllocation * allocation = this->NewAllocation(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode);
 
-    GetBuffer(allocation, bytes, ppBuffer, readWrite);
+    GetBuffer(allocation, bytes, ppBuffer);
+
+#if DBG
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(allocation->allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
 
     return allocation;
 }
@@ -327,6 +332,14 @@ bool EmitBufferManager<SyncObject>::CheckCommitFaultInjection()
 
 #endif
 
+template <typename SyncObject>
+bool EmitBufferManager<SyncObject>::ProtectBufferWithExecuteReadWriteForInterpreter(EmitBufferAllocation* allocation)
+{
+    Assert(this->criticalSection.IsLocked());
+    Assert(allocation != nullptr);
+    return (this->allocationHeap.ProtectAllocationWithExecuteReadWrite(allocation->allocation) == TRUE);
+}
+
 // Returns true if we successfully commit the buffer
 // Returns false if we OOM
 template <typename SyncObject>
@@ -340,8 +353,6 @@ bool EmitBufferManager<SyncObject>::CommitReadWriteBufferForInterpreter(EmitBuff
     this->totalBytesCode += bufferSize;
 #endif
 
-    DWORD oldProtect;
-
     VerboseHeapTrace(L"Setting execute permissions on 0x%p, allocation: 0x%p\n", pBuffer, allocation->allocation->address);
 
 #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
@@ -351,14 +362,13 @@ bool EmitBufferManager<SyncObject>::CommitReadWriteBufferForInterpreter(EmitBuff
     }
 #endif
 
-    if (!this->allocationHeap.ProtectAllocation(allocation->allocation, PAGE_EXECUTE, &oldProtect, PAGE_READWRITE))
+    if (!this->allocationHeap.ProtectAllocationWithExecuteReadOnly(allocation->allocation))
     {
         return false;
     }
 
     FlushInstructionCache(AutoSystemInfo::Data.GetProcessHandle(), pBuffer, bufferSize);
 
-    Assert(oldProtect == PAGE_READWRITE);
     return true;
 }
 
@@ -378,8 +388,6 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
     Assert(destBuffer != nullptr);
     Assert(allocation != nullptr);
 
-    DWORD oldProtect;
-
     BYTE *currentDestBuffer = allocation->GetUnused();
     BYTE *bufferToFlush = currentDestBuffer;
     Assert(allocation->BytesFree() >= bytes + alignPad);
@@ -404,11 +412,11 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
             return false;
         }
 #endif
-        if (!this->allocationHeap.ProtectAllocationPage(allocation->allocation, (char*)readWriteBuffer, PAGE_EXECUTE_READWRITE, &oldProtect, PAGE_EXECUTE))
+
+        if (!this->allocationHeap.ProtectAllocationWithExecuteReadWrite(allocation->allocation, (char*)readWriteBuffer))
         {
             return false;
         }
-        Assert(oldProtect == PAGE_EXECUTE);
 
         if (alignPad != 0)
         {
@@ -440,11 +448,11 @@ EmitBufferManager<SyncObject>::CommitBuffer(EmitBufferAllocation* allocation, __
         }
 
         Assert(readWriteBuffer + readWriteBytes == currentDestBuffer);
-        if (!this->allocationHeap.ProtectAllocationPage(allocation->allocation, (char*)readWriteBuffer, PAGE_EXECUTE, &oldProtect, PAGE_EXECUTE_READWRITE))
+
+        if (!this->allocationHeap.ProtectAllocationWithExecuteReadOnly(allocation->allocation, (char*)readWriteBuffer))
         {
             return false;
         }
-        Assert(oldProtect == PAGE_EXECUTE_READWRITE);
     }
 
     FlushInstructionCache(AutoSystemInfo::Data.GetProcessHandle(), bufferToFlush, sizeToFlush);

+ 3 - 2
lib/Backend/EmitBuffer.h

@@ -37,8 +37,9 @@ public:
     void Decommit();
     void Clear();
 
-    EmitBufferAllocation* AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite = false, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
+    EmitBufferAllocation* AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
     bool CommitBuffer(EmitBufferAllocation* allocation, __out_bcount(bytes) BYTE* destBuffer, __in size_t bytes, __in_bcount(bytes) const BYTE* sourceBuffer, __in DWORD alignPad = 0);
+    bool ProtectBufferWithExecuteReadWriteForInterpreter(EmitBufferAllocation* allocation);
     bool CommitReadWriteBufferForInterpreter(EmitBufferAllocation* allocation, _In_reads_bytes_(bufferSize) BYTE* pBuffer, _In_ size_t bufferSize);
     void CompletePreviousAllocation(EmitBufferAllocation* allocation);
     bool FreeAllocation(void* address);
@@ -119,7 +120,7 @@ private:
     Js::ScriptContext * scriptContext;
 
     EmitBufferAllocation * NewAllocation(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode);
-    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, bool readWrite);
+    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer);
 
     bool FinalizeAllocation(EmitBufferAllocation *allocation);
     CustomHeap::Heap allocationHeap;

+ 6 - 1
lib/Backend/InterpreterThunkEmitter.cpp

@@ -253,7 +253,12 @@ void InterpreterThunkEmitter::NewThunkBlock()
     DWORD bufferSize = BlockSize;
     DWORD thunkCount = 0;
 
-    allocation = emitBufferManager.AllocateBuffer(bufferSize, &buffer, /*readWrite*/ true);
+    allocation = emitBufferManager.AllocateBuffer(bufferSize, &buffer);
+    if (!emitBufferManager.ProtectBufferWithExecuteReadWriteForInterpreter(allocation))
+    {
+        Js::Throw::OutOfMemory();
+    }
+
     currentBuffer = buffer;
 
 #ifdef _M_X64

+ 4 - 21
lib/Backend/Lower.cpp

@@ -8126,14 +8126,7 @@ Lowerer::LowerStElemI(IR::Instr * instr, Js::PropertyOperationFlags flags, bool
 
     if (srcType == TyFloat64)
     {
-        // We don't support the X64 floating-point calling convention. So put this parameter on the end
-        // and save directly to the stack slot.
-#if _M_X64
-        IR::Opnd *argOpnd = IR::SymOpnd::New(m_func->m_symTable->GetArgSlotSym(5), TyFloat64, m_func);
-        m_lowererMD.CreateAssign(argOpnd, src1, instr);
-#else
         m_lowererMD.LoadDoubleHelperArgument(instr, src1);
-#endif
     }
     m_lowererMD.LoadHelperArgument(instr,
         IR::IntConstOpnd::New(static_cast<IntConstType>(flags), IRType::TyInt32, m_func, true));
@@ -8344,7 +8337,7 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
     IR::Opnd * src2 = instr->GetSrc2();
 
     IR::Instr * done;
-    if (indexOpnd || (uint32)src1->AsIndirOpnd()->GetOffset() >= 0x1000000)
+    if (indexOpnd || m_func->GetJnFunction()->GetAsmJsFunctionInfo()->AccessNeedsBoundCheck((uint32)src1->AsIndirOpnd()->GetOffset()))
     {
         // CMP indexOpnd, src2(arrSize)
         // JA $helper
@@ -8362,7 +8355,7 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
     }
     else
     {
-        // any access below 0x1000000 is safe
+        // any access below 0x10000 is safe
         instr->UnlinkDst();
         instr->UnlinkSrc1();
         if (src2)
@@ -8540,7 +8533,7 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)
     Assert(!dst->IsFloat64() || src1->IsFloat64());
 
     IR::Instr * done;
-    if (indexOpnd || (uint32)dst->AsIndirOpnd()->GetOffset() >= 0x1000000)
+    if (indexOpnd || m_func->GetJnFunction()->GetAsmJsFunctionInfo()->AccessNeedsBoundCheck((uint32)dst->AsIndirOpnd()->GetOffset()))
     {
         // CMP indexOpnd, src2(arrSize)
         // JA $helper
@@ -8555,7 +8548,7 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)
     }
     else
     {
-        // any constant access below 0x1000000 is safe, as that is the min heap size
+        // any constant access below 0x10000 is safe, as that is the min heap size
         instr->UnlinkDst();
         instr->UnlinkSrc1();
         done = instr;
@@ -10327,17 +10320,7 @@ Lowerer::GenerateHelperToArrayPushFastPath(IR::Instr * instr, IR::LabelInstr * b
         Assert(arrayHelperOpnd->GetValueType().IsLikelyNativeFloatArray());
         helperMethod = IR::HelperArray_NativeFloatPush;
 
-    //Currently, X64 floating-point calling convention is not supported. Hence store the
-    // float value explicitly in RegXMM2 (RegXMM0 and RegXMM1 will be filled with ScriptContext and Var respectively)
-#if _M_X64
-        IR::RegOpnd* regXMM2 = IR::RegOpnd::New(nullptr, (RegNum)RegXMM2, TyMachDouble, this->m_func);
-        regXMM2->m_isCallArg = true;
-        IR::Instr * movInstr = IR::Instr::New(Js::OpCode::MOVSD, regXMM2, elementHelperOpnd, this->m_func);
-        instr->InsertBefore(movInstr);
-#else
         m_lowererMD.LoadDoubleHelperArgument(instr, elementHelperOpnd);
-#endif
-
     }
     else
     {

+ 62 - 46
lib/Backend/LowerMDShared.cpp

@@ -5972,11 +5972,57 @@ LowererMD::SaveDoubleToVar(IR::RegOpnd * dstOpnd, IR::RegOpnd *opndFloat, IR::In
 #else
 
     // s1 = MOVD opndFloat
+    IR::RegOpnd *s1 = IR::RegOpnd::New(TyMachReg, m_func);
+    IR::Instr *movd = IR::Instr::New(Js::OpCode::MOVD, s1, opndFloat, m_func);
+    instrInsert->InsertBefore(movd);
+
+    if (m_func->GetJnFunction()->GetIsAsmjsMode())
+    {
+        // s1 = MOVD src
+        // tmp = NOT s1
+        // tmp = AND tmp, 0x7FF0000000000000ull
+        // test tmp, tmp
+        // je helper
+        // jmp done
+        // helper:
+        // tmp2 = AND s1, 0x000FFFFFFFFFFFFFull
+        // test tmp2, tmp2
+        // je done
+        // s1 = JavascriptNumber::k_Nan
+        // done:
+
+        IR::RegOpnd *tmp = IR::RegOpnd::New(TyMachReg, m_func);
+        IR::Instr * newInstr = IR::Instr::New(Js::OpCode::NOT, tmp, s1, m_func);
+        instrInsert->InsertBefore(newInstr);
+        LowererMD::MakeDstEquSrc1(newInstr);
+
+        newInstr = IR::Instr::New(Js::OpCode::AND, tmp, tmp, IR::AddrOpnd::New((Js::Var)0x7FF0000000000000, IR::AddrOpndKindConstantVar, m_func, true), m_func);
+        instrInsert->InsertBefore(newInstr);
+        LowererMD::Legalize(newInstr);
+
+        IR::LabelInstr* helper = Lowerer::InsertLabel(true, instrInsert);
+
+        Lowerer::InsertTestBranch(tmp, tmp, Js::OpCode::BrEq_A, helper, helper);
+
+        IR::LabelInstr* done = Lowerer::InsertLabel(isHelper, instrInsert);
+
+        Lowerer::InsertBranch(Js::OpCode::Br, done, helper);
+
+        IR::RegOpnd *tmp2 = IR::RegOpnd::New(TyMachReg, m_func);
+
+        newInstr = IR::Instr::New(Js::OpCode::AND, tmp2, s1, IR::AddrOpnd::New((Js::Var)0x000FFFFFFFFFFFFFull, IR::AddrOpndKindConstantVar, m_func, true), m_func);
+        done->InsertBefore(newInstr);
+        LowererMD::Legalize(newInstr);
+
+        Lowerer::InsertTestBranch(tmp2, tmp2, Js::OpCode::BrEq_A, done, done);
+
+        IR::Opnd * opndNaN = IR::AddrOpnd::New((Js::Var)Js::JavascriptNumber::k_Nan, IR::AddrOpndKindConstantVar, m_func, true);
+        Lowerer::InsertMove(s1, opndNaN, done);
+    }
+
     // s1 = XOR s1, FloatTag_Value
     // dst = s1
-
-    IR::RegOpnd *s1 = IR::RegOpnd::New(TyMachReg, this->m_func);
-    IR::Instr *movd = IR::Instr::New(Js::OpCode::MOVD, s1, opndFloat, this->m_func);
+    
     IR::Instr *setTag = IR::Instr::New(Js::OpCode::XOR,
                                        s1,
                                        s1,
@@ -5987,7 +6033,6 @@ LowererMD::SaveDoubleToVar(IR::RegOpnd * dstOpnd, IR::RegOpnd *opndFloat, IR::In
                                        this->m_func);
     IR::Instr *movDst = IR::Instr::New(Js::OpCode::MOV, dstOpnd, s1, this->m_func);
 
-    instrInsert->InsertBefore(movd);
     instrInsert->InsertBefore(setTag);
     instrInsert->InsertBefore(movDst);
     LowererMD::Legalize(setTag);
@@ -7705,6 +7750,11 @@ LowererMD::InsertConvertFloat64ToInt32(const RoundMode roundMode, IR::Opnd *cons
 void
 LowererMD::EmitFloatToInt(IR::Opnd *dst, IR::Opnd *src, IR::Instr *instrInsert)
 {
+#ifdef _M_IX86
+    // We should only generate this if sse2 is available
+    Assert(AutoSystemInfo::Data.SSE2Available());
+#endif
+
     IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func);
     IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true);
     IR::Instr *instr;
@@ -7714,37 +7764,20 @@ LowererMD::EmitFloatToInt(IR::Opnd *dst, IR::Opnd *src, IR::Instr *instrInsert)
     // $Helper
     instrInsert->InsertBefore(labelHelper);
 
-#ifdef _M_X64
-    // On x64, we can simply pass the var, this way we don't have to worry having to
-    // pass a double in a param reg
-
-    // s1 = MOVD src
-    IR::RegOpnd *s1 = IR::RegOpnd::New(TyMachReg, this->m_func);
-    instr = IR::Instr::New(Js::OpCode::MOVD, s1, src, this->m_func);
-    instrInsert->InsertBefore(instr);
-
-    // s1 = XOR s1, FloatTag_Value
-    instr = IR::Instr::New(Js::OpCode::XOR, s1, s1,
-                           IR::AddrOpnd::New((Js::Var)Js::FloatTag_Value, IR::AddrOpndKindConstantVar, this->m_func, /* dontEncode = */ true),
-                           this->m_func);
-    instrInsert->InsertBefore(instr);
-    LowererMD::Legalize(instr);
-
-    // dst = ToInt32_Full(s1, scriptContext);
-    m_lowerer->LoadScriptContext(instrInsert);
-    LoadHelperArgument(instrInsert, s1);
+    IR::Opnd * arg = src;
+    if (src->IsFloat32())
+    {
+        arg = IR::RegOpnd::New(TyFloat64, m_func);
 
-    instr = IR::Instr::New(Js::OpCode::CALL, dst, this->m_func);
-    instrInsert->InsertBefore(instr);
-    this->ChangeToHelperCall(instr, IR::HelperConv_ToInt32_Full);
-#else
+        EmitFloat32ToFloat64(arg, src, instrInsert);
+    }
     // dst = ToInt32Core(src);
-    LoadDoubleHelperArgument(instrInsert, src);
+    LoadDoubleHelperArgument(instrInsert, arg);
 
     instr = IR::Instr::New(Js::OpCode::CALL, dst, this->m_func);
     instrInsert->InsertBefore(instr);
     this->ChangeToHelperCall(instr, IR::HelperConv_ToInt32Core);
-#endif
+
     // $Done
     instrInsert->InsertBefore(labelDone);
 }
@@ -9018,28 +9051,11 @@ IR::Opnd* LowererMD::IsOpndNegZero(IR::Opnd* opnd, IR::Instr* instr)
 {
     IR::Opnd * isNegZero = IR::RegOpnd::New(TyInt32, this->m_func);
 
-#if defined(_M_IX86)
     LoadDoubleHelperArgument(instr, opnd);
     IR::Instr * helperCallInstr = IR::Instr::New(Js::OpCode::CALL, isNegZero, this->m_func);
     instr->InsertBefore(helperCallInstr);
     this->ChangeToHelperCall(helperCallInstr, IR::HelperIsNegZero);
 
-#else
-    IR::RegOpnd* regXMM0 = IR::RegOpnd::New(nullptr, (RegNum)FIRST_FLOAT_ARG_REG, TyMachDouble, this->m_func);
-    regXMM0->m_isCallArg = true;
-    IR::Instr * movInstr = IR::Instr::New(Js::OpCode::MOVSD, regXMM0, opnd, this->m_func);
-    instr->InsertBefore(movInstr);
-
-    IR::RegOpnd* reg1 = IR::RegOpnd::New(TyMachReg, this->m_func);
-    IR::AddrOpnd* helperAddr = IR::AddrOpnd::New((Js::Var)IR::GetMethodOriginalAddress(IR::HelperIsNegZero), IR::AddrOpndKind::AddrOpndKindDynamicMisc, this->m_func);
-    IR::Instr* mov = IR::Instr::New(Js::OpCode::MOV, reg1, helperAddr, this->m_func);
-    instr->InsertBefore(mov);
-
-    IR::Instr *helperCallInstr = IR::Instr::New(Js::OpCode::CALL, isNegZero, reg1, this->m_func);
-    instr->InsertBefore(helperCallInstr);
-
-#endif
-
     return isNegZero;
 }
 

+ 0 - 17
lib/Backend/i386/LowererMDArch.cpp

@@ -916,15 +916,6 @@ LowererMDArch::LowerAsmJsLdElemHelper(IR::Instr * instr, bool isSimdLoad /*= fal
 
     Lowerer::InsertBranch(Js::OpCode::Br, loadLabel, helperLabel);
 
-    if (m_func->GetJnFunction()->GetAsmJsFunctionInfo()->IsHeapBufferConst())
-    {
-        src1->AsIndirOpnd()->ReplaceBaseOpnd(src1->AsIndirOpnd()->UnlinkIndexOpnd());
-        Js::Var* module = (Js::Var*)m_func->m_workItem->GetEntryPoint()->GetModuleAddress();
-        Js::ArrayBuffer* arrayBuffer = *(Js::ArrayBuffer**)(module + Js::AsmJsModuleMemory::MemoryTableBeginOffset);
-        Assert(arrayBuffer);
-        src1->AsIndirOpnd()->SetOffset((uintptr)arrayBuffer->GetBuffer(), true);
-    }
-
     if (isSimdLoad)
     {
         lowererMD->m_lowerer->GenerateRuntimeError(loadLabel, JSERR_ArgumentOutOfRange, IR::HelperOp_RuntimeRangeError);
@@ -992,14 +983,6 @@ LowererMDArch::LowerAsmJsStElemHelper(IR::Instr * instr, bool isSimdStore /*= fa
 
     Lowerer::InsertBranch(Js::OpCode::Br, doneLabel, storeLabel);
 
-    if (m_func->GetJnFunction()->GetAsmJsFunctionInfo()->IsHeapBufferConst())
-    {
-        dst->AsIndirOpnd()->ReplaceBaseOpnd(dst->AsIndirOpnd()->UnlinkIndexOpnd());
-        Js::Var* module = (Js::Var*)m_func->m_workItem->GetEntryPoint()->GetModuleAddress();
-        Js::ArrayBuffer* arrayBuffer = *(Js::ArrayBuffer**)(module + Js::AsmJsModuleMemory::MemoryTableBeginOffset);
-        Assert(arrayBuffer);
-        dst->AsIndirOpnd()->SetOffset((uintptr)arrayBuffer->GetBuffer(), true);
-    }
     return doneLabel;
 }
 

+ 11 - 1
lib/Runtime/Base/CrossSite.cpp

@@ -300,7 +300,17 @@ namespace Js
 
         if (funcInfo->HasBody())
         {
-            entryPoint = (JavascriptMethod)ScriptFunction::FromVar(function)->GetEntryPointInfo()->address;
+#ifdef ASMJS_PLAT
+            if (funcInfo->GetFunctionProxy()->IsFunctionBody() &&
+                funcInfo->GetFunctionBody()->GetIsAsmJsFunction())
+            {
+                entryPoint = Js::AsmJsExternalEntryPoint;
+            }
+            else
+#endif
+            {
+                entryPoint = (JavascriptMethod)ScriptFunction::FromVar(function)->GetEntryPointInfo()->address;
+            }
         }
         else
         {

+ 1 - 1
lib/Runtime/ByteCode/ByteCodeEmitter.cpp

@@ -622,7 +622,7 @@ void ByteCodeGenerator::InitBlockScopedContent(ParseNode *pnodeBlock, Js::Debugg
                 this->m_writer.ElementRootU(op, funcInfo->FindOrAddReferencedPropertyId(propertyId));
             }
         }
-        else if (sym->IsInSlot(funcInfo))
+        else if (sym->IsInSlot(funcInfo) || (scope->GetIsObject() && sym->NeedsSlotAlloc(funcInfo)))
         {
             if (scope->GetIsObject())
             {

+ 1 - 1
lib/Runtime/Language/AsmJSEncoder.cpp

@@ -208,7 +208,7 @@ namespace Js
             Assert( ::Math::FitsInDWord( codeSize ) );
 
             BYTE *buffer;
-            EmitBufferAllocation *allocation = GetCodeGenAllocator()->emitBufferManager.AllocateBuffer( codeSize, &buffer, false, 0, 0 );
+            EmitBufferAllocation *allocation = GetCodeGenAllocator()->emitBufferManager.AllocateBuffer( codeSize, &buffer, 0, 0 );
             functionBody->GetAsmJsFunctionInfo()->mTJBeginAddress = buffer;
 
             Assert( allocation != nullptr );

+ 3 - 3
lib/Runtime/Language/AsmJSModule.cpp

@@ -2221,10 +2221,10 @@ namespace Js
                 switch (asmSlot->varType)
                 {
                 case AsmJsVarType::Double:
-                    value = JavascriptNumber::New(asmDoubleVars[asmSlot->location], scriptContext);
+                    value = JavascriptNumber::NewWithCheck(asmDoubleVars[asmSlot->location], scriptContext);
                     break;
                 case AsmJsVarType::Float:
-                    value = JavascriptNumber::New(asmFloatVars[asmSlot->location], scriptContext);
+                    value = JavascriptNumber::NewWithCheck(asmFloatVars[asmSlot->location], scriptContext);
                     break;
                 case AsmJsVarType::Int:
                     value = JavascriptNumber::ToVar(asmIntVars[asmSlot->location], scriptContext);
@@ -2272,7 +2272,7 @@ namespace Js
                 value = asmFuncs[asmSlot->location];
                 break;
             case AsmJsSymbol::MathConstant:
-                value = JavascriptNumber::New(asmSlot->mathConstVal, scriptContext);
+                value = JavascriptNumber::NewWithCheck(asmSlot->mathConstVal, scriptContext);
                 break;
             case AsmJsSymbol::ArrayView:
             {

+ 5 - 0
lib/Runtime/Language/AsmJSTypes.h

@@ -1082,6 +1082,11 @@ namespace Js
             mArgType = val;
         }
 
+        inline bool AccessNeedsBoundCheck(uint offset) const
+        {
+            // Normally, heap has min size of 0x10000, but if you use ChangeHeap, min heap size is increased to 0x1000000
+            return offset >= 0x1000000 || (IsHeapBufferConst() && offset >= 0x10000);
+        }
 
     };
 

+ 4 - 4
lib/Runtime/Language/AsmJSUtils.cpp

@@ -327,11 +327,11 @@ namespace Js
             break;
         }
         case AsmJsRetType::Double:{
-            returnValue = JavascriptNumber::New(doubleRetVal, func->GetScriptContext());
+            returnValue = JavascriptNumber::NewWithCheck(doubleRetVal, func->GetScriptContext());
             break;
         }
         case AsmJsRetType::Float:{
-            returnValue = JavascriptNumber::New(floatRetVal, func->GetScriptContext());
+            returnValue = JavascriptNumber::NewWithCheck(floatRetVal, func->GetScriptContext());
             break;
         }
         case AsmJsRetType::Float32x4:
@@ -515,7 +515,7 @@ namespace Js
                 call ecx
                 movsd dval, xmm0
             }
-            returnValue = JavascriptNumber::New(dval, func->GetScriptContext());
+            returnValue = JavascriptNumber::NewWithCheck(dval, func->GetScriptContext());
             break;
         }
         case AsmJsRetType::Float:{
@@ -530,7 +530,7 @@ namespace Js
                 call ecx
                 movss fval, xmm0
             }
-            returnValue = JavascriptNumber::New((double)fval, func->GetScriptContext());
+            returnValue = JavascriptNumber::NewWithCheck((double)fval, func->GetScriptContext());
             break;
         }
         case AsmJsRetType::Int32x4:

+ 0 - 1
lib/Runtime/Language/AsmJSUtils.h

@@ -33,7 +33,6 @@ namespace Js {
     static const double SIMD_SLOTS_SPACE = (sizeof(SIMDValue) / sizeof(Var)); // 4 in x86 and 2 in x64
 
     Var AsmJsChangeHeapBuffer(RecyclableObject * function, CallInfo callInfo, ...);
-    Var AsmJsExternalEntryPoint(Js::RecyclableObject* entryObject, Js::CallInfo callInfo, ...);
 #if _M_X64
     int GetStackSizeForAsmJsUnboxing(ScriptFunction* func);
     void * UnboxAsmJsArguments(ScriptFunction* func, Var * origArgs, char * argDst, CallInfo callInfo);

+ 1 - 1
lib/Runtime/Language/InterpreterStackFrame.cpp

@@ -2091,7 +2091,7 @@ namespace Js
     inline void InterpreterStackFrame::OP_SetOutAsmDb( RegSlot outRegisterID, double val )
     {
         Assert( m_outParams + outRegisterID < m_outSp );
-        m_outParams[outRegisterID] = JavascriptNumber::New( val, scriptContext );
+        m_outParams[outRegisterID] = JavascriptNumber::NewWithCheck( val, scriptContext );
     }
 
     inline void InterpreterStackFrame::OP_SetOutAsmInt( RegSlot outRegisterID, int val )

+ 1 - 1
lib/Runtime/Language/i386/AsmJSJitTemplate.cpp

@@ -3145,7 +3145,7 @@ namespace Js
             size += SUB::EncodeInstruction<int>( buffer, InstrParamsRegImm<int8>( RegESP, 8 ) );
             size += MOVSD::EncodeInstruction<double>( buffer, InstrParamsAddrReg( RegESP, 0, regVariable ) );
 
-            size += MOV::EncodeInstruction<int>( buffer, InstrParamsRegImm<int32>( RegEAX, (int32)(Var(*)(double,ScriptContext*))JavascriptNumber::New) );
+            size += MOV::EncodeInstruction<int>( buffer, InstrParamsRegImm<int32>( RegEAX, (int32)(Var(*)(double,ScriptContext*))JavascriptNumber::NewWithCheck) );
             size += CALL::EncodeInstruction<int>( buffer, InstrParamsReg( RegEAX ) );
 
             size += MOV::EncodeInstruction<int>( buffer, InstrParamsAddrReg( RegESP, argIndex << 2, RegEAX ) );

+ 6 - 0
lib/Runtime/Library/TypedArray.h

@@ -330,6 +330,12 @@ namespace Js
                 // fixup the length with the change
                 newLength += start;
             }
+            if (newStart >= GetLength())
+            {
+                // If we want to start copying past the length of the array, all index are no-op
+                return true;
+            }
+
             if (UInt32Math::Add(newStart, newLength) > GetLength())
             {
                 newLength = GetLength() - newStart;

+ 1 - 0
lib/Runtime/Runtime.h

@@ -261,6 +261,7 @@ namespace Js
     class AsmJsMathFunction;
     class AsmJsMathConst;
 #ifdef ASMJS_PLAT
+    Var AsmJsExternalEntryPoint(Js::RecyclableObject* entryObject, Js::CallInfo callInfo, ...);
     class AsmJsCodeGenerator;
     class AsmJsEncoder;
 #endif

+ 1 - 1
lib/common/CommonDefines.h

@@ -14,7 +14,7 @@
 #define CHAKRA_CORE_MINOR_VERSION 1
 #define CHAKRA_CORE_VERSION_RELEASE 1
 #define CHAKRA_CORE_VERSION_PRERELEASE 0
-#define CHAKRA_CORE_VERSION_RELEASE_QFE 4
+#define CHAKRA_CORE_VERSION_RELEASE_QFE 5
 
 #define CHAKRA_VERSION_RELEASE 0
 #define CHAKRA_VERSION_PRERELEASE 1

+ 11 - 0
lib/common/Memory/CommonMemoryPch.h

@@ -45,3 +45,14 @@ typedef _Return_type_success_(return >= 0) LONG NTSTATUS;
 #include "Memory\LargeHeapBucket.inl"
 #include "Memory\HeapBlock.inl"
 #include "Memory\HeapBlockMap.inl"
+
+// Memory Protections
+#ifdef _CONTROL_FLOW_GUARD
+#define PAGE_EXECUTE_RW_TARGETS_INVALID   (PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID)
+#define PAGE_EXECUTE_RW_TARGETS_NO_UPDATE (PAGE_EXECUTE_READWRITE | PAGE_TARGETS_NO_UPDATE)
+#define PAGE_EXECUTE_RO_TARGETS_NO_UPDATE (PAGE_EXECUTE           | PAGE_TARGETS_NO_UPDATE)
+#else
+#define PAGE_EXECUTE_RW_TARGETS_INVALID   (PAGE_EXECUTE_READWRITE)
+#define PAGE_EXECUTE_RW_TARGETS_NO_UPDATE (PAGE_EXECUTE_READWRITE)
+#define PAGE_EXECUTE_RO_TARGETS_NO_UPDATE (PAGE_EXECUTE)
+#endif

+ 105 - 35
lib/common/Memory/CustomHeap.cpp

@@ -100,6 +100,7 @@ bool Heap::Free(__in Allocation* object)
     {
         return true;
     }
+
     return FreeAllocation(object);
 }
 
@@ -141,6 +142,7 @@ bool Heap::Decommit(__in Allocation* object)
     // Skip asserting here- multiple objects could be on the same page
     // Review: should we really decommit here or decommit only when all objects
     // on the page have been decommitted?
+
     if (!object->page->isDecommitted)
     {
 #if PDATA_ENABLED
@@ -194,7 +196,13 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
 
     if (bucket == BucketId::LargeObjectList)
     {
-        return AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+        allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
+#if defined(DBG)
+        MEMORY_BASIC_INFORMATION memBasicInfo;
+        size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
+        Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
+        return allocation;
     }
 
     VerboseHeapTrace(L"Bucket is %d\n", bucket);
@@ -221,46 +229,58 @@ Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool
         return nullptr;
     }
 
+#if defined(DBG)
+    MEMORY_BASIC_INFORMATION memBasicInfo;
+    size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
+    Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
+#endif
+
     allocation = AllocInPage(page, bytesToAllocate, pdataCount, xdataSize);
     return allocation;
 }
 
-BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage)
 {
-    Assert(allocation != nullptr);
-    Assert(allocation->isAllocationUsed);
+    DWORD protectFlags = 0;
 
-    return ProtectAllocationInternal(allocation, nullptr, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE_READWRITE;
+    }
+    return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE, addressInPage);
 }
 
-BOOL Heap::ProtectAllocationPage(__in Allocation* allocation, __in char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage)
 {
-    Assert(addressInPage != nullptr);
-    Assert(allocation != nullptr);
-    Assert(addressInPage >= allocation->address);
-    Assert(allocation->isAllocationUsed);
-
-    return ProtectAllocationInternal(allocation, addressInPage, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+    DWORD protectFlags = 0;
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE;
+    }
+    return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
 }
 
-BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
 {
     // Allocate at the page level so that our protections don't
     // transcend allocation page boundaries. Here, allocation->address is page
     // aligned if the object is a large object allocation. If it isn't, in the else
     // branch of the following if statement, we set it to the allocation's page's
     // address. This ensures that the address being protected is always page aligned
-    char* address = allocation->address;
 
-#ifdef _CONTROL_FLOW_GUARD
-    if (AutoSystemInfo::Data.IsCFGEnabled() &&
-        (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)))
-    {
-        AssertMsg(!(dwVirtualProtectFlags & PAGE_EXECUTE_WRITECOPY), "PAGE_EXECUTE_WRITECOPY is not used today. Remove this precondition \
-            and add to the if condition above, if this flag is used.");
-        dwVirtualProtectFlags |= PAGE_TARGETS_NO_UPDATE;
-    }
-#endif
+    Assert(allocation != nullptr);
+    Assert(allocation->isAllocationUsed);
+    Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
+
+    char* address = allocation->address;
 
     size_t pageCount;
     void * segment;
@@ -273,7 +293,6 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
         }
 #endif
         segment = allocation->largeObjectAllocation.segment;
-        allocation->largeObjectAllocation.isReadWrite = ((dwVirtualProtectFlags & PAGE_READWRITE) == PAGE_READWRITE);
 
         if (addressInPage != nullptr)
         {
@@ -291,7 +310,7 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
         }
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
     else
     {
@@ -303,11 +322,10 @@ BOOL Heap::ProtectAllocationInternal(__in Allocation* allocation, __in_opt char*
 #endif
         segment = allocation->page->segment;
         address = allocation->page->address;
-        allocation->page->isReadWrite = ((dwVirtualProtectFlags & PAGE_READWRITE) == PAGE_READWRITE);
         pageCount = 1;
 
         VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
-        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+        return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
     }
 }
 
@@ -353,6 +371,16 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
         }
 
         FillDebugBreak((BYTE*) address, pages*AutoSystemInfo::PageSize);
+        DWORD protectFlags = 0;
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+        }
+        else
+        {
+            protectFlags = PAGE_EXECUTE;
+        }
+        this->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
 
 #if PDATA_ENABLED
         if(pdataCount > 0)
@@ -386,7 +414,6 @@ Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdata
     allocation->address = address;
     allocation->largeObjectAllocation.segment = segment;
     allocation->largeObjectAllocation.isDecommitted = false;
-    allocation->largeObjectAllocation.isReadWrite = true;
     allocation->size = pages * AutoSystemInfo::PageSize;
 
 #if PDATA_ENABLED
@@ -424,6 +451,31 @@ void Heap::FreeDecommittedLargeObjects()
     NEXT_DLISTBASE_ENTRY_EDITING;
 }
 
+//Called during Free (while shutting down)
+DWORD Heap::EnsurePageWriteable(Page* page)
+{
+    return EnsurePageReadWrite<PAGE_READWRITE>(page);
+}
+
+// this get called when freeing the whole page
+DWORD Heap::EnsureAllocationWriteable(Allocation* allocation)
+{
+    return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
+}
+
+// this get called when only freeing a part in the page
+DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
+{
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
+    }
+    else
+    {
+        return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
+    }   
+}
+
 template <bool freeAll>
 bool Heap::FreeLargeObject(Allocation* address)
 {
@@ -602,6 +654,20 @@ Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegm
 
     FillDebugBreak((BYTE*) address, AutoSystemInfo::PageSize);
 
+    DWORD protectFlags = 0;
+
+    if (AutoSystemInfo::Data.IsCFGEnabled())
+    {
+        protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+    }
+    else
+    {
+        protectFlags = PAGE_EXECUTE;
+    }
+
+    //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
+    ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
+
     // Switch to allocating on a list of pages so we can do leak tracking later
     VerboseHeapTrace(L"Allocing new page in bucket %d\n", bucket);
     Page* page = this->buckets[bucket].PrependNode(this->auxilliaryAllocator, address, pageSegment, bucket);
@@ -830,15 +896,19 @@ bool Heap::FreeAllocation(Allocation* object)
 
         return false;
     }
-    else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE
+    else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
     {
-        DWORD dwExpectedFlags = 0;
-
-        this->ProtectPages(page->address, 1, segment, PAGE_EXECUTE, &dwExpectedFlags, PAGE_EXECUTE_READWRITE);
-
-        Assert(!object->isAllocationUsed || dwExpectedFlags == PAGE_EXECUTE_READWRITE);
-        page->isReadWrite = false;
+        DWORD protectFlags = 0;
 
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
+        }
+        else
+        {
+            protectFlags = PAGE_EXECUTE;
+        }
+        this->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
         return true;
     }
 }

+ 21 - 86
lib/common/Memory/CustomHeap.h

@@ -16,6 +16,7 @@ namespace Memory
     Output::Flush(); \
 }
 
+
 namespace CustomHeap
 {
 
@@ -37,7 +38,6 @@ BucketId GetBucketForSize(size_t bytes);
 struct PageAllocatorAllocation
 {
     bool isDecommitted;
-    bool isReadWrite;
 };
 
 struct Page: public PageAllocatorAllocation
@@ -70,7 +70,6 @@ struct Page: public PageAllocatorAllocation
     {
         // Initialize PageAllocatorAllocation fields
         this->isDecommitted = false;
-        this->isReadWrite = true;
     }
 
     // Each bit in the bit vector corresponds to 128 bytes of memory
@@ -200,16 +199,16 @@ public:
         }
     }
 
-    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
     {
         Assert(segment);
         if (IsPreReservedSegment(segment))
         {
-            return this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+            return this->GetPageAllocator<PreReservedVirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
         }
         else
         {
-            return this->GetPageAllocator<VirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, dwOldVirtualProtectFlags, desiredOldProtectFlag);
+            return this->GetPageAllocator<VirtualAllocWrapper>(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
         }
     }
 
@@ -302,22 +301,9 @@ public:
         return page->HasNoSpace() || (allocXdata && !((Segment*)(page->segment))->CanAllocSecondary());
     }
 
-    BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-    BOOL ProtectAllocationPage(__in Allocation* allocation, __in char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-
-    DWORD EnsureAllocationProtection(Allocation* allocation, bool readWrite)
-    {
-        if (readWrite)
-        {
-            // this only call from InterpreterThunkEmitter
-            return EnsureAllocationReadWrite<true, PAGE_READWRITE>(allocation);
-        }
-        else
-        {
-            return EnsureAllocationReadWrite<false, PAGE_READWRITE>(allocation);
-        }
-
-    }
+    BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
+    BOOL ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage = nullptr);
+    BOOL ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage = nullptr);
 
     ~Heap();
 
@@ -367,91 +353,40 @@ private:
         FreeLargeObject<true>(nullptr);
     }
 
-    DWORD EnsurePageWriteable(Page* page)
-    {
-        return EnsurePageReadWrite<true, PAGE_READWRITE>(page);
-    }
+    //Called during Free
+    DWORD EnsurePageWriteable(Page* page);
 
     // this get called when freeing the whole page
-    DWORD EnsureAllocationWriteable(Allocation* allocation)
-    {
-        return EnsureAllocationReadWrite<true, PAGE_READWRITE>(allocation);
-    }
+    DWORD EnsureAllocationWriteable(Allocation* allocation);
 
     // this get called when only freeing a part in the page
-    DWORD EnsureAllocationExecuteWriteable(Allocation* allocation)
-    {
-        return EnsureAllocationReadWrite<true, PAGE_EXECUTE_READWRITE>(allocation);
-    }
+    DWORD EnsureAllocationExecuteWriteable(Allocation* allocation);
 
-    template<bool readWrite, DWORD readWriteFlags>
+    template<DWORD readWriteFlags>
     DWORD EnsurePageReadWrite(Page* page)
     {
-        if (readWrite)
-        {
-            if (!page->isReadWrite && !page->isDecommitted)
-            {
-                DWORD dwOldProtectFlags = 0;
-                BOOL result = this->ProtectPages(page->address, 1, page->segment, readWriteFlags, &dwOldProtectFlags, PAGE_EXECUTE);
-                page->isReadWrite = true;
-                Assert(result && (dwOldProtectFlags & readWriteFlags) == 0);
-                return dwOldProtectFlags;
-            }
-        }
-        else
-        {
-            if (page->isReadWrite && !page->isDecommitted)
-            {
-                DWORD dwOldProtectFlags = 0;
-                BOOL result = this->ProtectPages(page->address, 1, page->segment, PAGE_EXECUTE, &dwOldProtectFlags, readWriteFlags);
-                page->isReadWrite = false;
-                Assert(result && (dwOldProtectFlags & PAGE_EXECUTE) == 0);
-                return dwOldProtectFlags;
-            }
-        }
+        Assert(!page->isDecommitted);
 
-        return 0;
+        BOOL result = this->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
+        Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
+        return PAGE_EXECUTE;
     }
 
-    template<bool readWrite, DWORD readWriteFlags>
+    template<DWORD readWriteFlags>
     DWORD EnsureAllocationReadWrite(Allocation* allocation)
     {
         if (allocation->IsLargeAllocation())
         {
-            if (readWrite)
-            {
-                if (!allocation->largeObjectAllocation.isReadWrite)
-                {
-                    DWORD dwOldProtectFlags;
-                    BOOL result = this->ProtectAllocation(allocation, readWriteFlags, &dwOldProtectFlags, PAGE_EXECUTE);
-                    Assert(result && (dwOldProtectFlags & readWriteFlags) == 0);
-                    return dwOldProtectFlags;
-                }
-            }
-            else
-            {
-                if (allocation->largeObjectAllocation.isReadWrite)
-                {
-                    DWORD dwOldProtectFlags;
-                    this->ProtectAllocation(allocation, PAGE_EXECUTE, &dwOldProtectFlags, readWriteFlags);
-                    Assert((dwOldProtectFlags & PAGE_EXECUTE) == 0);
-                    return dwOldProtectFlags;
-                }
-            }
-
+            BOOL result = this->ProtectAllocation(allocation, readWriteFlags, PAGE_EXECUTE);
+            Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
+            return PAGE_EXECUTE;
         }
         else
         {
-            return EnsurePageReadWrite<readWrite, readWriteFlags>(allocation->page);
+            return EnsurePageReadWrite<readWriteFlags>(allocation->page);
         }
-
-        // 0 is safe to return as its not a memory protection constant
-        // so it indicates that nothing was changed
-        return 0;
     }
 
-    BOOL ProtectAllocationInternal(__in Allocation* allocation, __in_opt char* addressInPage, DWORD dwVirtualProtectFlags, __out DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
-
     /**
      * Freeing Methods
      */

+ 14 - 6
lib/common/Memory/PageAllocator.cpp

@@ -2193,13 +2193,12 @@ void PageAllocatorBase<TVirtualAlloc>::ReleaseSegmentList(DListBase<T> * segment
 
 template<typename T>
 BOOL
-HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in void* segmentParam, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in void* segmentParam, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
 {
     SegmentBase<T> * segment = (SegmentBase<T>*)segmentParam;
 #if DBG
     Assert(address >= segment->GetAddress());
     Assert(((uint)(((char *)address) - segment->GetAddress()) <= (segment->GetPageCount() - pageCount) * AutoSystemInfo::PageSize));
-    Assert(dwOldVirtualProtectFlags != NULL);
 
     if (IsPageSegment(segment))
     {
@@ -2231,17 +2230,26 @@ HeapPageAllocator<T>::ProtectPages(__in char* address, size_t pageCount, __in vo
     size_t bytes = VirtualQuery(address, &memBasicInfo, sizeof(memBasicInfo));
     if (bytes == 0
         || memBasicInfo.RegionSize < pageCount * AutoSystemInfo::PageSize
-        || desiredOldProtectFlag != memBasicInfo.Protect
-        )
+        || desiredOldProtectFlag != memBasicInfo.Protect)
     {
         CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
         return FALSE;
     }
-    *dwOldVirtualProtectFlags = memBasicInfo.Protect;
+
+    /*Verify if we always pass the PAGE_TARGETS_NO_UPDATE flag, if the protect flag is EXECUTE*/
+#if defined(_CONTROL_FLOW_GUARD)
+    if (AutoSystemInfo::Data.IsCFGEnabled() &&
+        (dwVirtualProtectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)) &&
+        ((dwVirtualProtectFlags & PAGE_TARGETS_NO_UPDATE) == 0))
+    {
+        CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
+        return FALSE;
+    }
+#endif
 
     DWORD oldProtect; // this is only for first page
     BOOL retVal = ::VirtualProtect(address, pageCount * AutoSystemInfo::PageSize, dwVirtualProtectFlags, &oldProtect);
-    Assert(oldProtect == *dwOldVirtualProtectFlags);
+    Assert(oldProtect == desiredOldProtectFlag);
 
     return retVal;
 }

+ 2 - 2
lib/common/Memory/PageAllocator.h

@@ -368,7 +368,7 @@ public:
 
     static size_t GetAndResetMaxUsedBytes();
 
-    virtual BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag)
+    virtual BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
     {
         Assert(false);
         return false;
@@ -748,7 +748,7 @@ class HeapPageAllocator : public PageAllocatorBase<TVirtualAlloc>
 public:
     HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages);
 
-    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD* dwOldVirtualProtectFlags, DWORD desiredOldProtectFlag);
+    BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
     bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation);
     void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
     void TrackDecommitedPages(void * address, uint pageCount, __in void* segment);

+ 24 - 3
lib/common/Memory/VirtualAllocWrapper.cpp

@@ -19,7 +19,16 @@ LPVOID VirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocat
     {
         //We do the allocation in two steps - CFG Bitmap in kernel will be created only on allocation with EXECUTE flag.
         //We again call VirtualProtect to set to the requested protectFlags.
-        address = VirtualAlloc(lpAddress, dwSize, allocationType, PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID);
+        DWORD allocProtectFlags = 0;
+        if (AutoSystemInfo::Data.IsCFGEnabled())
+        {
+            allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+        }
+        else
+        {
+            allocProtectFlags = PAGE_EXECUTE_READWRITE;
+        }
+        address = VirtualAlloc(lpAddress, dwSize, allocationType, allocProtectFlags);
         VirtualProtect(address, dwSize, protectFlags, &oldProtectFlags);
     }
     else
@@ -239,8 +248,20 @@ LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DW
 #if defined(_CONTROL_FLOW_GUARD)
         if (AutoSystemInfo::Data.IsCFGEnabled())
         {
-            DWORD oldProtect;
-            commitedAddress = (char *) VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE | PAGE_TARGETS_INVALID);
+            DWORD oldProtect = 0;
+            DWORD allocProtectFlags = 0;
+
+            if (AutoSystemInfo::Data.IsCFGEnabled())
+            {
+                allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID;
+            }
+            else
+            {
+                allocProtectFlags = PAGE_EXECUTE_READWRITE;
+            }
+
+            commitedAddress = (char *)VirtualAlloc(addressToCommit, dwSize, MEM_COMMIT, allocProtectFlags);
+
             AssertMsg(commitedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?");
             VirtualProtect(commitedAddress, dwSize, protectFlags, &oldProtect);
             AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags.");

+ 2 - 0
test/AsmJs/asmjscctx.baseline

@@ -0,0 +1,2 @@
+Successfully compiled asm.js code
+1

+ 7 - 0
test/AsmJs/asmjscctx.js

@@ -0,0 +1,7 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+var sc6 = WScript.LoadScriptFile('cctxmodule.js', 'samethread');
+print(sc6.asm()());

+ 10 - 0
test/AsmJs/cctxmodule.js

@@ -0,0 +1,10 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+function asm(){
+    "use asm";
+    function f(){ return 1; }
+    return f;
+}

+ 3 - 0
test/AsmJs/constloads.baseline

@@ -0,0 +1,3 @@
+Successfully compiled asm.js code
+Successfully compiled asm.js code
+Passed

+ 37 - 0
test/AsmJs/constloads.js

@@ -0,0 +1,37 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+var asmHeap = new ArrayBuffer(33554432);
+var m = (function(stdlib, foreign, heap) { 'use asm';
+  var Uint8ArrayView = new stdlib.Uint8Array(heap);
+  function f()
+  {
+    var i2 = 0;
+    (Uint8ArrayView[33554431]) = i2;
+    return 0;
+  }
+  return f; })(this, {}, asmHeap)
+
+m();
+m();
+
+var asmHeap = new ArrayBuffer(65536);
+var m = (function(stdlib, foreign, heap) { 'use asm';
+  var Uint8ArrayView = new stdlib.Uint8Array(heap);
+  function f(d0, i1)
+  {
+    d0 = +d0;
+    i1 = i1|0;
+    var i2 = 0;
+    i2 = 524288;
+    (Uint8ArrayView[i2 >> 0]) = i2;
+    return ;
+  }
+  return f; })(this, {}, asmHeap)
+
+m();
+m();
+
+WScript.Echo("Passed");

+ 5 - 0
test/AsmJs/nanbug.baseline

@@ -0,0 +1,5 @@
+Successfully compiled asm.js code
+NaN
+0
+NaN
+0

+ 33 - 0
test/AsmJs/nanbug.js

@@ -0,0 +1,33 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+function AsmModule(stdlib,foreign,buffer) {
+    "use asm";
+    var HEAP32 =new stdlib.Float32Array(buffer);
+    var fround = stdlib.Math.fround;
+    var c = foreign.fun2;
+    function f() {
+        var a = fround(0);
+        var b = 0.;
+        a = fround(HEAP32[0]);
+        b = +a;
+        c(b);
+        return (~~b)|0;
+    }
+    
+    return {
+        f : f
+    };
+}
+
+var global = {Math:Math,Int8Array:Int8Array,Int16Array:Int16Array,Int32Array:Int32Array,Uint8Array:Uint8Array,Uint16Array:Uint16Array,Uint32Array:Uint32Array,Float32Array:Float32Array,Float64Array:Float64Array,Infinity:Infinity, NaN:NaN}
+var env = {fun1:function(x1,x2,x3,x4,x5,x6,x7,x8){print(x1,x2,x3,x4,x5,x6,x7,x8);}, fun2:function(x){print(x);},x:155,i2:658,d1:68.25,d2:3.14156,f1:48.1523,f2:14896.2514}
+var buffer = new ArrayBuffer(1<<20);
+var view = new Int32Array(buffer);
+view[0] = 0xffffffff
+var asmModule = new AsmModule(global,env,buffer);
+
+print(asmModule.f(Number.MAX_VALUE));
+print(asmModule.f(Number.MAX_VALUE));

+ 28 - 0
test/AsmJs/rlexe.xml

@@ -493,6 +493,20 @@
       <compile-flags>-testtrace:asmjs -simdjs -maic:0</compile-flags>
     </default>
   </test>
+  <test>
+    <default>
+      <files>nanbug.js</files>
+      <baseline>nanbug.baseline</baseline>
+      <compile-flags>-testtrace:asmjs -simdjs -maic:0</compile-flags>
+    </default>
+  </test>
+  <test>
+    <default>
+      <files>nanbug.js</files>
+      <baseline>nanbug.baseline</baseline>
+      <compile-flags>-testtrace:asmjs -simdjs</compile-flags>
+    </default>
+  </test>
   <test>
     <default>
       <files>switchbug.js</files>
@@ -757,4 +771,18 @@
       <compile-flags>-forcedeferparse -testtrace:asmjs -simdjs</compile-flags>
     </default>
   </test>
+  <test>
+    <default>
+      <files>asmjscctx.js</files>
+      <baseline>asmjscctx.baseline</baseline>
+      <compile-flags>-testtrace:asmjs</compile-flags>
+    </default>
+  </test>
+  <test>
+    <default>
+      <files>constloads.js</files>
+      <baseline>constloads.baseline</baseline>
+      <compile-flags>-testtrace:asmjs -maic:1</compile-flags>
+    </default>
+  </test>
 </regress-exe>

+ 15 - 0
test/typedarray/memset_neg.js

@@ -0,0 +1,15 @@
+//-------------------------------------------------------------------------------------------------------
+// Copyright (C) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
+//-------------------------------------------------------------------------------------------------------
+
+function foo() {
+  var a = new Int8Array(500);
+  for(var i = 500; i < 1000; ++i) {
+    a[i] = 0;
+  }
+}
+
+foo();
+foo();
+WScript.Echo("PASSED");

+ 6 - 0
test/typedarray/rlexe.xml

@@ -304,6 +304,12 @@ Below test fails with difference in space. Investigate the cause and re-enable t
       <compile-flags>-mic:1 -off:simplejit -off:JITLoopBody -mmoc:0</compile-flags>
     </default>
   </test>
+  <test>
+    <default>
+      <files>memset_neg.js</files>
+      <compile-flags>-mic:1 -off:simplejit -bgjit- -mmoc:0</compile-flags>
+    </default>
+  </test>
   <test>
     <default>
       <files>memcopy.js</files>