Răsfoiți Sursa

Merge remaining code changes from internal RS1 branch

Curtis Man 9 ani în urmă
părinte
comite
898582f13b
61 a modificat fișierele cu 1131 adăugiri și 321 ștergeri
  1. 1 0
      Build/Common.Build.props
  2. 3 3
      lib/Backend/EmitBuffer.h
  3. 1 1
      lib/Backend/GlobHashTable.h
  4. 2 3
      lib/Backend/Lower.cpp
  5. 2 2
      lib/Backend/NativeCodeData.h
  6. 14 1
      lib/Backend/amd64/LowererMDArch.cpp
  7. 4 0
      lib/Backend/arm/Thunks.asm
  8. 2 2
      lib/Common/Core/AllocSizeMath.h
  9. 3 3
      lib/Common/DataStructures/BaseDictionary.h
  10. 4 4
      lib/Common/DataStructures/FixedBitVector.h
  11. 2 2
      lib/Common/DataStructures/HashTable.h
  12. 4 4
      lib/Common/DataStructures/InternalString.h
  13. 5 5
      lib/Common/DataStructures/List.h
  14. 1 1
      lib/Common/DataStructures/MruDictionary.h
  15. 1 1
      lib/Common/Memory/AllocationPolicyManager.h
  16. 9 9
      lib/Common/Memory/Allocator.h
  17. 27 27
      lib/Common/Memory/ArenaAllocator.h
  18. 4 2
      lib/Common/Memory/CustomHeap.cpp
  19. 28 31
      lib/Common/Memory/CustomHeap.h
  20. 26 23
      lib/Common/Memory/HeapAllocator.h
  21. 2 2
      lib/Common/Memory/HeapAllocatorOperators.cpp
  22. 9 2
      lib/Common/Memory/HeapBlockMap.inl
  23. 5 5
      lib/Common/Memory/HeapBucket.h
  24. 6 0
      lib/Common/Memory/HeapConstants.h
  25. 2 0
      lib/Common/Memory/LargeHeapBlock.cpp
  26. 8 8
      lib/Common/Memory/LargeHeapBlock.h
  27. 6 2
      lib/Common/Memory/LargeHeapBucket.cpp
  28. 8 8
      lib/Common/Memory/LargeHeapBucket.h
  29. 22 23
      lib/Common/Memory/PageAllocator.h
  30. 23 22
      lib/Common/Memory/Recycler.h
  31. 2 5
      lib/Common/Memory/Recycler.inl
  32. 2 5
      lib/Common/Memory/RecyclerFastAllocator.h
  33. 2 2
      lib/Common/Memory/RecyclerWriteBarrierManager.h
  34. 3 3
      lib/Common/Memory/SmallHeapBlockAllocator.h
  35. 4 3
      lib/Common/Memory/VirtualAllocWrapper.cpp
  36. 4 4
      lib/Common/Memory/VirtualAllocWrapper.h
  37. 2 2
      lib/Parser/Alloc.h
  38. 25 7
      lib/Parser/Parse.cpp
  39. 1 0
      lib/Parser/perrors.h
  40. 7 2
      lib/Runtime/Base/ScriptContext.cpp
  41. 119 44
      lib/Runtime/ByteCode/ByteCodeEmitter.cpp
  42. 10 1
      lib/Runtime/ByteCode/ByteCodeGenerator.cpp
  43. 1 0
      lib/Runtime/ByteCode/ByteCodeGenerator.h
  44. 79 8
      lib/Runtime/ByteCode/FuncInfo.cpp
  45. 26 4
      lib/Runtime/ByteCode/FuncInfo.h
  46. 7 0
      lib/Runtime/ByteCode/Scope.cpp
  47. 2 1
      lib/Runtime/ByteCode/ScopeInfo.cpp
  48. 1 1
      lib/Runtime/ByteCode/Symbol.cpp
  49. 1 1
      lib/Runtime/Language/DynamicProfileStorage.h
  50. 6 1
      lib/Runtime/Language/InlineCache.cpp
  51. 1 1
      lib/Runtime/Language/InlineCache.h
  52. 4 4
      lib/Runtime/Language/InterpreterStackFrame.cpp
  53. 2 2
      lib/Runtime/Language/JavascriptOperators.h
  54. 4 0
      lib/Runtime/Language/arm/arm_Thunks.asm
  55. 16 16
      lib/Runtime/Library/ArrayBuffer.h
  56. 46 4
      lib/Runtime/Library/JavascriptArray.cpp
  57. 1 1
      lib/Runtime/Library/JavascriptProxy.cpp
  58. 1 1
      test/AsmJs/rlexe.xml
  59. 1 1
      test/es6/HTMLComments.js
  60. 35 0
      test/es6/default-splitscope-undodeferparse.js
  61. 482 1
      test/es6/default-splitscope.js

+ 1 - 0
Build/Common.Build.props

@@ -17,6 +17,7 @@
       <RuntimeTypeInfo>false</RuntimeTypeInfo>
       <!-- /Zi -->
       <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <DebugInformationFormat Condition="'$(MultiProcessorCompilation)' == 'true'">OldStyle</DebugInformationFormat>
       <!-- /EHsc- -->
       <ExceptionHandling>SyncCThrow</ExceptionHandling>
       <!-- /Gz -->

+ 3 - 3
lib/Backend/EmitBuffer.h

@@ -37,7 +37,7 @@ public:
     void Decommit();
     void Clear();
 
-    EmitBufferAllocation* AllocateBuffer(__in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
+    EmitBufferAllocation* AllocateBuffer(__declspec(guard(overflow)) __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer, ushort pdataCount = 0, ushort xdataSize = 0, bool canAllocInPreReservedHeapPageSegment = false, bool isAnyJittedCode = false);
     bool CommitBuffer(EmitBufferAllocation* allocation, __out_bcount(bytes) BYTE* destBuffer, __in size_t bytes, __in_bcount(bytes) const BYTE* sourceBuffer, __in DWORD alignPad = 0);
     bool ProtectBufferWithExecuteReadWriteForInterpreter(EmitBufferAllocation* allocation);
     bool CommitReadWriteBufferForInterpreter(EmitBufferAllocation* allocation, _In_reads_bytes_(bufferSize) BYTE* pBuffer, _In_ size_t bufferSize);
@@ -72,8 +72,8 @@ private:
     ArenaAllocator * allocator;
     Js::ScriptContext * scriptContext;
 
-    EmitBufferAllocation * NewAllocation(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode);
-    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer);
+    EmitBufferAllocation * NewAllocation(__declspec(guard(overflow)) size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode);
+    EmitBufferAllocation* GetBuffer(EmitBufferAllocation *allocation, __declspec(guard(overflow)) __in size_t bytes, __deref_bcount(bytes) BYTE** ppBuffer);
 
     bool FinalizeAllocation(EmitBufferAllocation *allocation);
     CustomHeap::Heap allocationHeap;

+ 1 - 1
lib/Backend/GlobHashTable.h

@@ -55,7 +55,7 @@ public:
     SListBase<HashBucket> * table;
 
 public:
-    static ValueHashTable * New(JitArenaAllocator *allocator, uint tableSize)
+    static ValueHashTable * New(JitArenaAllocator *allocator, __declspec(guard(overflow)) uint tableSize)
     {
         return AllocatorNewPlus(JitArenaAllocator, allocator, (tableSize*sizeof(SListBase<HashBucket>)), ValueHashTable, allocator, tableSize);
     }

+ 2 - 3
lib/Backend/Lower.cpp

@@ -13,8 +13,6 @@
 
 #include "ExternalLowerer.h"
 
-#include "ExternalLowerer.h"
-
 ///----------------------------------------------------------------------------
 ///
 /// Lowerer::Lower
@@ -8350,6 +8348,7 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
     IR::Opnd * src2 = instr->GetSrc2();
 
     IR::Instr * done;
+
     if (indexOpnd || m_func->GetJnFunction()->GetAsmJsFunctionInfoWithLock()->AccessNeedsBoundCheck((uint32)src1->AsIndirOpnd()->GetOffset()))
     {
         // CMP indexOpnd, src2(arrSize)
@@ -20182,7 +20181,7 @@ Lowerer::GenerateLdSuper(IR::Instr* instrInsert)
     Assert(dstOpnd->IsRegOpnd());
     LowererMD::CreateAssign(dstOpnd, opndUndefAddress, instrInsert);
 
-    IR::Opnd * functionObjOpnd;
+    IR::Opnd * functionObjOpnd = nullptr;
     m_lowererMD.LoadFunctionObjectOpnd(instrInsert, functionObjOpnd);
     LowererMD::CreateAssign(instanceRegOpnd, functionObjOpnd, instrInsert);
 

+ 2 - 2
lib/Backend/NativeCodeData.h

@@ -36,8 +36,8 @@ public:
         Allocator();
         ~Allocator();
 
-        char * Alloc(size_t requestedBytes);
-        char * AllocZero(size_t requestedBytes);
+        char * Alloc(__declspec(guard(overflow)) size_t requestedBytes);
+        char * AllocZero(__declspec(guard(overflow)) size_t requestedBytes);
         NativeCodeData * Finalize();
         void Free(void * buffer, size_t byteSize);
 

+ 14 - 1
lib/Backend/amd64/LowererMDArch.cpp

@@ -1204,7 +1204,20 @@ LowererMDArch::LoadDynamicArgumentUsingLength(IR::Instr *instr)
 IR::Instr *
 LowererMDArch::LoadDoubleHelperArgument(IR::Instr * instrInsert, IR::Opnd * opndArg)
 {
-    Assert(opndArg->IsFloat64());
+    IR::Opnd * float64Opnd;
+    if (opndArg->GetType() == TyFloat32)
+    {
+        float64Opnd = IR::RegOpnd::New(TyFloat64, m_func);
+        IR::Instr * instr = IR::Instr::New(Js::OpCode::CVTSS2SD, float64Opnd, opndArg, this->m_func);
+        instrInsert->InsertBefore(instr);
+    }
+    else
+    {
+        float64Opnd = opndArg;
+    }
+
+    Assert(opndArg->IsFloat());
+
     return LoadHelperArgument(instrInsert, opndArg);
 }
 

+ 4 - 0
lib/Backend/arm/Thunks.asm

@@ -14,6 +14,10 @@
     ;Js::JavascriptMethod NativeCodeGenerator::CheckCodeGen(Js::JavascriptFunction * function)
     IMPORT  |?CheckCodeGen@NativeCodeGenerator@@SAP6APAXPAVRecyclableObject@Js@@UCallInfo@3@ZZPAVScriptFunction@3@@Z|
 
+#if defined(_CONTROL_FLOW_GUARD)
+    IMPORT __guard_check_icall_fptr
+#endif
+
     TEXTAREA
 
 ;;============================================================================================================

+ 2 - 2
lib/Common/Core/AllocSizeMath.h

@@ -10,7 +10,7 @@ public:
     // Works for both 32bit and 64bit size_t arithmetic. It's also pretty
     // optimal in the cases where either left or right or both are small, compile-
     // time constants.
-    static size_t Add(size_t left, size_t right)
+    static size_t Add(__declspec(guard(overflow)) size_t left, __declspec(guard(overflow)) size_t right)
     {
         size_t allocSize = left + right;
         if (allocSize < left)
@@ -28,7 +28,7 @@ public:
     }
 
     // Optimized for right being a constant power of 2...
-    static size_t Mul(size_t left, size_t right)
+    static size_t Mul(__declspec(guard(overflow)) size_t left, __declspec(guard(overflow)) size_t right)
     {
         size_t allocSize = left * right;
         if (left != (allocSize / right))

+ 3 - 3
lib/Common/DataStructures/BaseDictionary.h

@@ -1043,7 +1043,7 @@ namespace JsUtil
             entries = newEntries;
         }
 
-        __ecount(bucketCount) int *AllocateBuckets(const uint bucketCount)
+        __ecount(bucketCount) int *AllocateBuckets(__declspec(guard(overflow)) const uint bucketCount)
         {
             return
                 AllocateArray<AllocatorType, int, false>(
@@ -1052,7 +1052,7 @@ namespace JsUtil
                     bucketCount);
         }
 
-        __ecount(size) EntryType * AllocateEntries(int size, const bool zeroAllocate = true)
+        __ecount(size) EntryType * AllocateEntries(__declspec(guard(overflow)) int size, const bool zeroAllocate = true)
         {
             // Note that the choice of leaf/non-leaf node is decided for the EntryType on the basis of TValue. By default, if
             // TValue is a pointer, a non-leaf allocation is done. This behavior can be overridden by specializing
@@ -1080,7 +1080,7 @@ namespace JsUtil
             AllocatorFree(alloc, EntryAllocatorFuncType::GetFreeFunc(), entries, size * sizeof(EntryType));
         }
 
-        void Allocate(__deref_out_ecount(bucketCount) int** ppBuckets, __deref_out_ecount(size) EntryType** ppEntries, uint bucketCount, int size)
+        void Allocate(__deref_out_ecount(bucketCount) int** ppBuckets, __deref_out_ecount(size) EntryType** ppEntries, __declspec(guard(overflow)) uint bucketCount, __declspec(guard(overflow)) int size)
         {
             int *const buckets = AllocateBuckets(bucketCount);
             Assert(buckets); // no-throw allocators are currently not supported

+ 4 - 4
lib/Common/DataStructures/FixedBitVector.h

@@ -35,10 +35,10 @@ public:
     static  BVFixed *       New(TAllocator* alloc, BVFixed * initBv);
 
     template <typename TAllocator>
-    static  BVFixed *       New(BVIndex length, TAllocator* alloc, bool initialSet = false);
+    static  BVFixed *       New(__declspec(guard(overflow)) BVIndex length, TAllocator* alloc, bool initialSet = false);
 
     template <typename TAllocator>
-    static  BVFixed *       NewNoThrow(BVIndex length, TAllocator* alloc, bool initialSet = false);
+    static  BVFixed *       NewNoThrow(__declspec(guard(overflow)) BVIndex length, TAllocator* alloc, bool initialSet = false);
 
     template <typename TAllocator>
     void                    Delete(TAllocator * alloc);
@@ -152,14 +152,14 @@ BVFixed * BVFixed::New(TAllocator * alloc, BVFixed * initBv)
 }
 
 template <typename TAllocator>
-BVFixed * BVFixed::New(BVIndex length, TAllocator * alloc, bool initialSet)
+BVFixed * BVFixed::New(__declspec(guard(overflow)) BVIndex length, TAllocator * alloc, bool initialSet)
 {
     BVFixed *result = AllocatorNewPlus(TAllocator, alloc, sizeof(BVUnit) * BVFixed::WordCount(length), BVFixed, length, initialSet);
     return result;
 }
 
 template <typename TAllocator>
-BVFixed * BVFixed::NewNoThrow(BVIndex length, TAllocator * alloc, bool initialSet)
+BVFixed * BVFixed::NewNoThrow(__declspec(guard(overflow)) BVIndex length, TAllocator * alloc, bool initialSet)
 {
     BVFixed *result = AllocatorNewNoThrowPlus(TAllocator, alloc, sizeof(BVUnit) * BVFixed::WordCount(length), BVFixed, length, initialSet);
     return result;

+ 2 - 2
lib/Common/DataStructures/HashTable.h

@@ -33,7 +33,7 @@ public:
     SListBase<Bucket<T>> *  table;
 
 public:
-    static HashTable<T, TAllocator> * New(TAllocator *allocator, uint tableSize)
+    static HashTable<T, TAllocator> * New(TAllocator *allocator, __declspec(guard(overflow)) uint tableSize)
     {
         return AllocatorNewPlus(TAllocator, allocator, (tableSize*sizeof(SListBase<Bucket<T>>)), HashTable, allocator, tableSize);
     }
@@ -382,7 +382,7 @@ public:
 #endif
 
 protected:
-    HashTable(TAllocator * allocator, uint tableSize) : alloc(allocator), tableSize(tableSize)
+    HashTable(TAllocator * allocator, __declspec(guard(overflow)) uint tableSize) : alloc(allocator), tableSize(tableSize)
     {
         Init();
 #if PROFILE_DICTIONARY

+ 4 - 4
lib/Common/DataStructures/InternalString.h

@@ -14,10 +14,10 @@ namespace Js
 
     public:
         InternalString() : m_charLength(0), m_content(NULL), m_offset(0) { };
-        InternalString(const char16* content, charcount_t charLength, unsigned char offset = 0);
-        static InternalString* New(ArenaAllocator* alloc, const char16* content, charcount_t length);
-        static InternalString* New(Recycler* recycler, const char16* content, charcount_t length);
-        static InternalString* NewNoCopy(ArenaAllocator* alloc, const char16* content, charcount_t length);
+        InternalString(const char16* content, __declspec(guard(overflow)) charcount_t charLength, unsigned char offset = 0);
+        static InternalString* New(ArenaAllocator* alloc, const char16* content, __declspec(guard(overflow)) charcount_t length);
+        static InternalString* New(Recycler* recycler, const char16* content, __declspec(guard(overflow)) charcount_t length);
+        static InternalString* NewNoCopy(ArenaAllocator* alloc, const char16* content, __declspec(guard(overflow)) charcount_t length);
 
         inline charcount_t GetLength() const
         {

+ 5 - 5
lib/Common/DataStructures/List.h

@@ -64,7 +64,7 @@ namespace JsUtil
         }
 
         template<class TAllocator>
-        static ReadOnlyList * New(TAllocator* alloc, __in_ecount(count) T* buffer, int count)
+        static ReadOnlyList * New(TAllocator* alloc, __in_ecount(count) T* buffer, __declspec(guard(overflow)) int count)
         {
             return AllocatorNew(TAllocator, alloc, ReadOnlyList, buffer, count, alloc);
         }
@@ -213,9 +213,9 @@ namespace JsUtil
         int increment;
         TRemovePolicyType removePolicy;
 
-        template <bool isLeaf> T * AllocArray(int size);
-        template <> T * AllocArray<true>(int size) { return AllocatorNewArrayLeaf(TAllocator, alloc, T, size); }
-        template <> T * AllocArray<false>(int size) { return AllocatorNewArray(TAllocator, alloc, T, size); }
+        template <bool isLeaf> T * AllocArray(__declspec(guard(overflow)) int size);
+        template <> T * AllocArray<true>(__declspec(guard(overflow)) int size) { return AllocatorNewArrayLeaf(TAllocator, alloc, T, size); }
+        template <> T * AllocArray<false>(__declspec(guard(overflow)) int size) { return AllocatorNewArray(TAllocator, alloc, T, size); }
 
         PREVENT_COPY(List); // Disable copy constructor and operator=
 
@@ -235,7 +235,7 @@ namespace JsUtil
             EnsureArray(0);
         }
 
-        void EnsureArray(int32 requiredCapacity)
+        void EnsureArray(__declspec(guard(overflow)) int32 requiredCapacity)
         {
             if (buffer == nullptr)
             {

+ 1 - 1
lib/Common/DataStructures/MruDictionary.h

@@ -111,7 +111,7 @@ namespace JsUtil
             Assert(mruListCapacity > 0);
         }
 
-        static MruDictionary *New(TAllocator *const allocator, const int mruListCapacity)
+        static MruDictionary *New(TAllocator *const allocator, __declspec(guard(overflow)) const int mruListCapacity)
         {
             return AllocatorNew(TAllocator, allocator, MruDictionary, allocator, mruListCapacity);
         }

+ 1 - 1
lib/Common/Memory/AllocationPolicyManager.h

@@ -63,7 +63,7 @@ public:
         memoryLimit = newLimit;
     }
 
-    bool RequestAlloc(size_t byteCount)
+    bool RequestAlloc(__declspec(guard(overflow)) size_t byteCount)
     {
         if (supportConcurrency)
         {

+ 9 - 9
lib/Common/Memory/Allocator.h

@@ -259,7 +259,7 @@ void DeleteObject(typename AllocatorInfo<TAllocator, T>::AllocatorType * allocat
 #define ZERO_LENGTH_ARRAY (void *)sizeof(void *)
 template <typename TAllocator, typename T, bool nothrow>
 _When_(nothrow, _Ret_writes_to_maybenull_(count, 0)) _When_(!nothrow, _Ret_writes_to_(count, 0))
-__inline T * AllocateArray(TAllocator * allocator, char * (TAllocator::*AllocFunc)(size_t), size_t count)
+__inline T * AllocateArray(TAllocator * allocator, char * (TAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t count)
 {
     if (count == 0 && TAllocator::FakeZeroLengthArray)
     {
@@ -338,7 +338,7 @@ void AssertValue(void * mem, T value, uint byteCount)
 _Ret_notnull_
 __inline void * __cdecl
 operator new(
-size_t byteSize,
+__declspec(guard(overflow)) size_t byteSize,
 _In_ void * previousAllocation) throw()
 {
     return previousAllocation;
@@ -361,7 +361,7 @@ void * previousAllocation               // Previously allocated memory
 //----------------------------------------
 template <typename TAllocator>
 _Ret_notnull_ void * __cdecl
-operator new(size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t))
+operator new(__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t))
 {
     AssertCanHandleOutOfMemory();
     Assert(byteSize != 0);
@@ -372,7 +372,7 @@ operator new(size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc
 
 template <typename TAllocator>
 _Ret_notnull_ __inline void * __cdecl
-operator new[](size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t))
+operator new[](__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t))
 {
     AssertCanHandleOutOfMemory();
     Assert(byteSize != 0 || !TAllocator::FakeZeroLengthArray);
@@ -383,7 +383,7 @@ operator new[](size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFu
 
 template <typename TAllocator>
 _Ret_notnull_ __inline void * __cdecl
-operator new(size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t), size_t plusSize)
+operator new(__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t plusSize)
 {
     AssertCanHandleOutOfMemory();
     Assert(byteSize != 0);
@@ -400,7 +400,7 @@ operator new(size_t byteSize, TAllocator * alloc, char * (TAllocator::*AllocFunc
 //----------------------------------------
 template <typename TAllocator>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t))
+operator new(__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t))
 {
     Assert(nothrow);
     Assert(byteSize != 0);
@@ -411,7 +411,7 @@ operator new(size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocat
 
 template <typename TAllocator>
 _Ret_maybenull_ __inline void * __cdecl
-operator new[](size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t))
+operator new[](__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t))
 {
     Assert(nothrow);
     Assert(byteSize != 0 || !TAllocator::FakeZeroLengthArray);
@@ -422,7 +422,7 @@ operator new[](size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAlloc
 
 template <typename TAllocator>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t), size_t plusSize)
+operator new(__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t plusSize)
 {
     Assert(nothrow);
     Assert(byteSize != 0);
@@ -436,7 +436,7 @@ operator new(size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocat
 
 template <typename TAllocator>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t), size_t plusSize, bool prefix)
+operator new(__declspec(guard(overflow)) size_t byteSize, TAllocator * alloc, bool nothrow, char * (TAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t plusSize, bool prefix)
 {
     Assert(nothrow);
     Assert(prefix);

+ 27 - 27
lib/Common/Memory/ArenaAllocator.h

@@ -224,9 +224,9 @@ public:
 
     static size_t GetAlignedSize(size_t size) { return AllocSizeMath::Align(size, ArenaAllocatorBase::ObjectAlignment); }
 
-    char * AllocInternal(size_t requestedBytes);
+    char * AllocInternal(__declspec(guard(overflow)) size_t requestedBytes);
 
-    char* Realloc(void* buffer, size_t existingBytes, size_t requestedBytes);
+    char* Realloc(void* buffer, __declspec(guard(overflow)) size_t existingBytes, __declspec(guard(overflow)) size_t requestedBytes);
     void Free(void * buffer, size_t byteSize);
 #ifdef TRACK_ALLOC
     // Doesn't support tracking information, dummy implementation
@@ -235,8 +235,8 @@ public:
 #endif
 
 protected:
-    char * RealAlloc(size_t nbytes);
-    __forceinline char * RealAllocInlined(size_t nbytes);
+    char * RealAlloc(__declspec(guard(overflow)) size_t nbytes);
+    __forceinline char * RealAllocInlined(__declspec(guard(overflow)) size_t nbytes);
 private:
 #ifdef PROFILE_MEM
     void LogBegin();
@@ -250,11 +250,11 @@ private:
     static size_t Size(BigBlock * blockList);
     void FullReset();
     void SetCacheBlock(BigBlock * cacheBlock);
-    template <bool DoRecoverMemory> char * AllocFromHeap(size_t nbytes);
+    template <bool DoRecoverMemory> char * AllocFromHeap(__declspec(guard(overflow)) size_t nbytes);
     void ReleaseMemory();
     void ReleasePageMemory();
     void ReleaseHeapMemory();
-    char * SnailAlloc(size_t nbytes);
+    char * SnailAlloc(__declspec(guard(overflow)) size_t nbytes);
     BigBlock * AddBigBlock(size_t pages);
 
 #ifdef ARENA_ALLOCATOR_FREE_LIST_SIZE
@@ -289,7 +289,7 @@ public:
     static const unsigned char DbgFreeMemFill = DbgMemFill;
 #endif
     static void * New(ArenaAllocatorBase<InPlaceFreeListPolicy> * allocator);
-    static void * Allocate(void * policy, size_t size);
+    static void * Allocate(void * policy, __declspec(guard(overflow)) size_t size);
     static void * Free(void * policy, void * object, size_t size);
     static void * Reset(void * policy);
     static void PrepareFreeObject(__out_bcount(size) void * object, _In_ size_t size)
@@ -341,7 +341,7 @@ public:
     static const char DbgFreeMemFill = 0x0;
 #endif
     static void * New(ArenaAllocatorBase<StandAloneFreeListPolicy> * allocator);
-    static void * Allocate(void * policy, size_t size);
+    static void * Allocate(void * policy, __declspec(guard(overflow)) size_t size);
     static void * Free(void * policy, void * object, size_t size);
     static void * Reset(void * policy);
     static void PrepareFreeObject(_Out_writes_bytes_all_(size) void * object, _In_ size_t size)
@@ -377,12 +377,12 @@ public:
     }
 
     __forceinline
-    char * Alloc(size_t requestedBytes)
+    char * Alloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return AllocInternal(requestedBytes);
     }
 
-    char * AllocZero(size_t nbytes)
+    char * AllocZero(__declspec(guard(overflow)) size_t nbytes)
     {
         char * buffer = Alloc(nbytes);
         memset(buffer, 0, nbytes);
@@ -393,13 +393,13 @@ public:
         return buffer;
     }
 
-    char * AllocLeaf(size_t requestedBytes)
+    char * AllocLeaf(__declspec(guard(overflow)) size_t requestedBytes)
     {
         // Leaf allocation is not meaningful here, but needed by Allocator-templatized classes that may call one of the Leaf versions of AllocatorNew
         return Alloc(requestedBytes);
     }
 
-    char * NoThrowAlloc(size_t requestedBytes)
+    char * NoThrowAlloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         void (*tempOutOfMemoryFunc)() = outOfMemoryFunc;
         outOfMemoryFunc = nullptr;
@@ -408,7 +408,7 @@ public:
         return buffer;
     }
 
-    char * NoThrowAllocZero(size_t requestedBytes)
+    char * NoThrowAllocZero(__declspec(guard(overflow)) size_t requestedBytes)
     {
         char * buffer = NoThrowAlloc(requestedBytes);
         if (buffer != nullptr)
@@ -418,7 +418,7 @@ public:
         return buffer;
     }
 
-    char * NoThrowNoRecoveryAlloc(size_t requestedBytes)
+    char * NoThrowNoRecoveryAlloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         void (*tempRecoverMemoryFunc)() = recoverMemoryFunc;
         recoverMemoryFunc = nullptr;
@@ -427,7 +427,7 @@ public:
         return buffer;
     }
 
-    char * NoThrowNoRecoveryAllocZero(size_t requestedBytes)
+    char * NoThrowNoRecoveryAllocZero(__declspec(guard(overflow)) size_t requestedBytes)
     {
         char * buffer = NoThrowNoRecoveryAlloc(requestedBytes);
         if (buffer != nullptr)
@@ -453,7 +453,7 @@ public:
     {
     }
 
-    char * Alloc(size_t requestedBytes)
+    char * Alloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         // Fast path
         if (sizeof(BVSparseNode) == requestedBytes)
@@ -492,22 +492,22 @@ public:
         return ArenaAllocator::Free(buffer, byteSize);
     }
 
-    char * AllocZero(size_t nbytes)
+    char * AllocZero(__declspec(guard(overflow)) size_t nbytes)
     {
         return ArenaAllocator::AllocZero(nbytes);
     }
 
-    char * AllocLeaf(size_t requestedBytes)
+    char * AllocLeaf(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return ArenaAllocator::AllocLeaf(requestedBytes);
     }
 
-    char * NoThrowAlloc(size_t requestedBytes)
+    char * NoThrowAlloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return ArenaAllocator::NoThrowAlloc(requestedBytes);
     }
 
-    char * NoThrowAllocZero(size_t requestedBytes)
+    char * NoThrowAllocZero(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return ArenaAllocator::NoThrowAllocZero(requestedBytes);
     }
@@ -593,7 +593,7 @@ public:
     static const unsigned char DbgFreeMemFill = DbgMemFill;
 #endif
     static void * New(ArenaAllocatorBase<InlineCacheAllocatorTraits> * allocator);
-    static void * Allocate(void * policy, size_t size);
+    static void * Allocate(void * policy, __declspec(guard(overflow)) size_t size);
     static void * Free(void * policy, void * object, size_t size);
     static void * Reset(void * policy);
     static void Release(void * policy);
@@ -649,12 +649,12 @@ public:
 #endif
     {}
 
-    char * Alloc(size_t requestedBytes)
+    char * Alloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return AllocInternal(requestedBytes);
     }
 
-    char * AllocZero(size_t nbytes)
+    char * AllocZero(__declspec(guard(overflow)) size_t nbytes)
     {
         char * buffer = Alloc(nbytes);
         memset(buffer, 0, nbytes);
@@ -705,12 +705,12 @@ public:
     InlineCacheAllocator(__in LPCWSTR name, PageAllocator * pageAllocator, void(*outOfMemoryFunc)()) :
         ArenaAllocatorBase<InlineCacheAllocatorTraits>(name, pageAllocator, outOfMemoryFunc) {}
 
-    char * Alloc(size_t requestedBytes)
+    char * Alloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return AllocInternal(requestedBytes);
     }
 
-    char * AllocZero(size_t nbytes)
+    char * AllocZero(__declspec(guard(overflow)) size_t nbytes)
     {
         char * buffer = Alloc(nbytes);
         memset(buffer, 0, nbytes);
@@ -770,12 +770,12 @@ public:
     IsInstInlineCacheAllocator(__in LPCWSTR name, PageAllocator * pageAllocator, void(*outOfMemoryFunc)()) :
         ArenaAllocatorBase<IsInstInlineCacheAllocatorTraits>(name, pageAllocator, outOfMemoryFunc) {}
 
-    char * Alloc(size_t requestedBytes)
+    char * Alloc(__declspec(guard(overflow)) size_t requestedBytes)
     {
         return AllocInternal(requestedBytes);
     }
 
-    char * AllocZero(size_t nbytes)
+    char * AllocZero(__declspec(guard(overflow)) size_t nbytes)
     {
         char * buffer = Alloc(nbytes);
         memset(buffer, 0, nbytes);

+ 4 - 2
lib/Common/Memory/CustomHeap.cpp

@@ -471,7 +471,7 @@ DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
     else
     {
         return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
-    }   
+    }
 }
 
 void Heap::FreeLargeObjects()
@@ -485,7 +485,7 @@ void Heap::FreeLargeObjects()
 #endif
         this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
 
-        largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
+            largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
     }
     NEXT_DLISTBASE_ENTRY_EDITING;
 }
@@ -826,7 +826,9 @@ bool Heap::FreeAllocation(Allocation* object)
         {
             protectFlags = PAGE_EXECUTE;
         }
+
         this->codePageAllocators->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
+        
         return true;
     }
 }

+ 28 - 31
lib/Common/Memory/CustomHeap.h

@@ -16,7 +16,6 @@ namespace Memory
     Output::Flush(); \
 }
 
-
 namespace CustomHeap
 {
 
@@ -33,13 +32,13 @@ enum BucketId
     NumBuckets
 };
 
-BucketId GetBucketForSize(size_t bytes);
+BucketId GetBucketForSize(__declspec(guard(overflow)) size_t bytes);
 
 struct Page
 {
-    bool         inFullList;
-    bool         isDecommitted;
-    void*        segment;
+    bool inFullList;
+    bool isDecommitted;
+    void* segment;
     BVUnit       freeBitVector;
     char*        address;
     BucketId     currentBucket;
@@ -59,13 +58,13 @@ struct Page
         return freeBitVector.FirstStringOfOnes(targetBucket + 1) != BVInvalidIndex;
     }
 
-    Page(__in char* address, void* segment, BucketId bucket) :
-        address(address),
-        segment(segment),
-        currentBucket(bucket),
-        freeBitVector(0xFFFFFFFF),
-        isDecommitted(false),
-        inFullList(false)
+    Page(__in char* address, void* segment, BucketId bucket):
+      address(address),
+      segment(segment),
+      currentBucket(bucket),
+      freeBitVector(0xFFFFFFFF),
+    isDecommitted(false),
+    inFullList(false)
     {
     }
 
@@ -190,8 +189,7 @@ public:
         }
         return address;
     }
-
-    char * AllocPages(uint pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
+    char * AllocPages(__declspec(guard(overflow)) uint pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
     {
         Assert(this->cs.IsLocked());
         char * address = nullptr;
@@ -403,7 +401,7 @@ class Heap
 public:
     Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators);
 
-    Allocation* Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
+    Allocation* Alloc(__declspec(guard(overflow)) size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
     void Free(__in Allocation* allocation);
     void DecommitAll();
     void FreeAll();
@@ -431,12 +429,12 @@ private:
     /**
      * Inline methods
      */
-    inline unsigned int GetChunkSizeForBytes(size_t bytes)
+    inline unsigned int GetChunkSizeForBytes(__declspec(guard(overflow)) size_t bytes)
     {
         return (bytes > Page::Alignment ? static_cast<unsigned int>(bytes) / Page::Alignment : 1);
     }
 
-    inline size_t GetNumPagesForSize(size_t bytes)
+    inline size_t GetNumPagesForSize(__declspec(guard(overflow)) size_t bytes)
     {
         size_t allocSize = AllocSizeMath::Add(bytes, AutoSystemInfo::PageSize);
 
@@ -448,7 +446,7 @@ private:
         return ((allocSize - 1)/ AutoSystemInfo::PageSize);
     }
 
-    inline BVIndex GetFreeIndexForPage(Page* page, size_t bytes)
+    inline BVIndex GetFreeIndexForPage(Page* page, __declspec(guard(overflow)) size_t bytes)
     {
         unsigned int length = GetChunkSizeForBytes(bytes);
         BVIndex index = page->freeBitVector.FirstStringOfOnes(length);
@@ -459,8 +457,8 @@ private:
     /**
      * Large object methods
      */
-    Allocation* AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
-    
+    Allocation* AllocLargeObject(__declspec(guard(overflow)) size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
+
     void FreeLargeObject(Allocation* header);
 
     void FreeLargeObjects();
@@ -478,13 +476,13 @@ private:
     DWORD EnsurePageReadWrite(Page* page)
     {
         Assert(!page->isDecommitted);
-
         BOOL result = this->codePageAllocators->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
         Assert(result && (PAGE_EXECUTE & readWriteFlags) == 0);
         return PAGE_EXECUTE;
     }
 
     template<DWORD readWriteFlags>
+
     DWORD EnsureAllocationReadWrite(Allocation* allocation)
     {
         if (allocation->IsLargeAllocation())
@@ -518,18 +516,17 @@ private:
      * Page methods
      */
     Page*       AddPageToBucket(Page* page, BucketId bucket, bool wasFull = false);
-    bool        AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
+    bool        AllocInPage(Page* page, __declspec(guard(overflow)) size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
     Page*       AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
     Page*       FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages = false);
+
     bool        UpdateFullPages();
     Page *      GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment);
 
     BVIndex     GetIndexInPage(__in Page* page, __in char* address);
-
-
-    bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
-    bool IsInHeap(DListBase<Page> const& buckets, __in void *address);
-    bool IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
+    bool        IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
+    bool        IsInHeap(DListBase<Page> const& buckets, __in void *address);
+    bool        IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
 
     /**
      * Stats
@@ -544,8 +541,8 @@ private:
     /**
      * Allocator stuff
      */
-    CodePageAllocators *                              codePageAllocators;
-    ArenaAllocator*                                   auxiliaryAllocator;
+    CodePageAllocators *   codePageAllocators;
+    ArenaAllocator*        auxiliaryAllocator;
 
     /*
      * Various tracking lists
@@ -557,7 +554,7 @@ private:
     DListBase<Page>        decommittedPages;
     DListBase<Allocation>  decommittedLargeObjects;
 
-    uint lastSecondaryAllocStateChangedCount;
+    uint                   lastSecondaryAllocStateChangedCount;
 #if DBG
     bool inDtor;
 #endif
@@ -565,7 +562,7 @@ private:
 
 // Helpers
 unsigned int log2(size_t number);
-BucketId GetBucketForSize(size_t bytes);
+BucketId GetBucketForSize(__declspec(guard(overflow)) size_t bytes);
 void FillDebugBreak(__out_bcount_full(byteCount) BYTE* buffer, __in size_t byteCount);
 };
 }

+ 26 - 23
lib/Common/Memory/HeapAllocator.h

@@ -91,32 +91,32 @@ struct HeapAllocator
 {
     static const bool FakeZeroLengthArray = false;
 
-    char * Alloc(size_t byteSize)
+    char * Alloc(__declspec(guard(overflow)) size_t byteSize)
     {
         return AllocT<false>(byteSize);
     }
     template <bool noThrow>
-    char * AllocT(size_t byteSize);
+    char * AllocT(__declspec(guard(overflow)) size_t byteSize);
 
     // This exists solely to make the AllocateXXX macros more polymorphic
-    char * AllocLeaf(size_t byteSize)
+    char * AllocLeaf(__declspec(guard(overflow)) size_t byteSize)
     {
         return Alloc(byteSize);
     }
 
-    char * NoThrowAlloc(size_t byteSize)
+    char * NoThrowAlloc(__declspec(guard(overflow)) size_t byteSize)
     {
         return AllocT<true>(byteSize);
     }
 
-    char * AllocZero(size_t byteSize)
+    char * AllocZero(__declspec(guard(overflow)) size_t byteSize)
     {
         char * buffer = Alloc(byteSize);
         memset(buffer, 0, byteSize);
         return buffer;
     }
 
-    char * NoThrowAllocZero(size_t byteSize)
+    char * NoThrowAllocZero(__declspec(guard(overflow)) size_t byteSize)
     {
         char * buffer = NoThrowAlloc(byteSize);
         if (buffer != nullptr)
@@ -130,6 +130,7 @@ struct HeapAllocator
     static HeapAllocator Instance;
     static HeapAllocator * GetNoMemProtectInstance();
 
+
 private:
     HANDLE m_privateHeap;
 
@@ -144,7 +145,9 @@ public:
     HeapAllocator * TrackAllocInfo(TrackAllocData const& data);
     void ClearTrackAllocInfo(TrackAllocData* data = NULL);
 
+
 #ifdef HEAP_TRACK_ALLOC
+
     static void InitializeThread()
     {
         memset(&nextAllocData, 0, sizeof(nextAllocData));
@@ -179,8 +182,8 @@ class NoThrowHeapAllocator
 {
 public:
     static const bool FakeZeroLengthArray = false;
-    char * Alloc(size_t byteSize);
-    char * AllocZero(size_t byteSize);
+    char * Alloc(__declspec(guard(overflow)) size_t byteSize);
+    char * AllocZero(__declspec(guard(overflow)) size_t byteSize);
     void Free(void * buffer, size_t byteSize);
     static NoThrowHeapAllocator Instance;
 
@@ -198,8 +201,8 @@ class NoThrowNoMemProtectHeapAllocator
 {
 public:
     static const bool FakeZeroLengthArray = false;
-    char * Alloc(size_t byteSize);
-    char * AllocZero(size_t byteSize);
+    char * Alloc(__declspec(guard(overflow)) size_t byteSize);
+    char * AllocZero(__declspec(guard(overflow)) size_t byteSize);
     void Free(void * buffer, size_t byteSize);
     static NoThrowNoMemProtectHeapAllocator Instance;
 
@@ -215,7 +218,7 @@ class NoCheckHeapAllocator
 {
 public:
     static const bool FakeZeroLengthArray = false;
-    char * Alloc(size_t byteSize)
+    char * Alloc(__declspec(guard(overflow)) size_t byteSize)
     {
         if (processHeap == NULL)
         {
@@ -230,7 +233,7 @@ public:
         }
         return buffer;
     }
-    char * AllocZero(size_t byteSize)
+    char * AllocZero(__declspec(guard(overflow)) size_t byteSize)
     {
         if (processHeap == NULL)
         {
@@ -295,21 +298,21 @@ private:
 //----------------------------------------
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t))
+operator new(__declspec(guard(overflow)) size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t))
 {
     return ::operator new(byteSize, alloc, true, AllocFunc);
 }
 
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new[](size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t))
+operator new[](__declspec(guard(overflow)) size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t))
 {
     return ::operator new[](byteSize, alloc, true, AllocFunc);
 }
 
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t), size_t plusSize)
+operator new(__declspec(guard(overflow)) size_t byteSize, NoThrowNoMemProtectHeapAllocator * alloc, char * (NoThrowNoMemProtectHeapAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t plusSize)
 {
     return ::operator new(byteSize, alloc, true, AllocFunc, plusSize);
 }
@@ -333,8 +336,8 @@ typedef NoThrowHeapAllocator NoThrowNoMemProtectHeapAllocator;
 // Default operator new/delete overrides
 //----------------------------------------
 #if !defined(USED_IN_STATIC_LIB)
-_Ret_maybenull_ void * __cdecl operator new(size_t byteSize);
-_Ret_maybenull_ void * __cdecl operator new[](size_t byteSize);
+_Ret_maybenull_ void * __cdecl operator new(__declspec(guard(overflow)) size_t byteSize);
+_Ret_maybenull_ void * __cdecl operator new[](__declspec(guard(overflow)) size_t byteSize);
 void __cdecl operator delete(void * obj);
 void __cdecl operator delete[](void * obj);
 #endif
@@ -359,21 +362,21 @@ operator delete(void * obj, HeapAllocator * alloc, char * (HeapAllocator::*Alloc
 //----------------------------------------
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t))
+operator new(__declspec(guard(overflow)) size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t))
 {
     return ::operator new(byteSize, alloc, true, AllocFunc);
 }
 
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new[](size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t))
+operator new[](__declspec(guard(overflow)) size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t))
 {
     return ::operator new[](byteSize, alloc, true, AllocFunc);
 }
 
 template <>
 _Ret_maybenull_ __inline void * __cdecl
-operator new(size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t), size_t plusSize)
+operator new(__declspec(guard(overflow)) size_t byteSize, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAllocator::*AllocFunc)(size_t), size_t plusSize)
 {
     return ::operator new(byteSize, alloc, true, AllocFunc, plusSize);
 }
@@ -393,7 +396,7 @@ operator delete(void * obj, NoThrowHeapAllocator * alloc, char * (NoThrowHeapAll
 
 template <>
 _Ret_notnull_ __inline void * __cdecl
-operator new(size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t))
+operator new(__declspec(guard(overflow)) size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t))
 {
     Assert(byteSize != 0);
     void * buffer = (alloc->*AllocFunc)(byteSize);
@@ -403,7 +406,7 @@ operator new(size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapA
 
 template <>
 _Ret_notnull_ __inline void * __cdecl
-operator new(size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t), size_t plusSize)
+operator new(__declspec(guard(overflow)) size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t), __declspec(guard(overflow)) size_t plusSize)
 {
     Assert(byteSize != 0);
     Assert(plusSize != 0);
@@ -413,7 +416,7 @@ operator new(size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapA
 
 
 _Ret_notnull_ __inline void * __cdecl
-operator new[](size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t))
+operator new[](__declspec(guard(overflow)) size_t byteSize, NoCheckHeapAllocator * alloc, char * (NoCheckHeapAllocator::*AllocFunc)(size_t))
 {
     void * buffer = (alloc->*AllocFunc)(byteSize);
     return buffer;

+ 2 - 2
lib/Common/Memory/HeapAllocatorOperators.cpp

@@ -9,13 +9,13 @@
 //----------------------------------------
 
 _Ret_maybenull_ void * __cdecl
-operator new(size_t byteSize)
+operator new(__declspec(guard(overflow)) size_t byteSize)
 {
     return HeapNewNoThrowArray(char, byteSize);
 }
 
 _Ret_maybenull_ void * __cdecl
-operator new[](size_t byteSize)
+operator new[](__declspec(guard(overflow)) size_t byteSize)
 {
     return HeapNewNoThrowArray(char, byteSize);
 }

+ 9 - 2
lib/Common/Memory/HeapBlockMap.inl

@@ -149,7 +149,7 @@ HeapBlockMap32::Mark(void * candidate, MarkContext * markContext)
     }
 }
 
-template <bool interlocked, bool updateChunk>
+template <bool interlocked, bool largeBlockType>
 __inline
 bool
 HeapBlockMap32::MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& chunk, void * originalCandidate, void * realCandidate)
@@ -166,8 +166,15 @@ HeapBlockMap32::MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& ch
         return true;
     }
 
-    if (updateChunk)
+    if (largeBlockType)
     {
+
+#if defined(_M_IX86_OR_ARM32)
+        // we only check the first MaxLargeObjectMarkOffset byte for marking purpuse. 
+        if ( (size_t)originalCandidate - (size_t)realCandidate > HeapConstants::MaxLargeObjectMarkOffset )
+            return true;
+#endif    
+
 #if defined(_M_X64_OR_ARM64)
         if (HeapBlockMap64::GetNodeIndex(originalCandidate) != HeapBlockMap64::GetNodeIndex(realCandidate))
         {

+ 5 - 5
lib/Common/Memory/HeapBucket.h

@@ -127,12 +127,12 @@ public:
     __inline char * RealAlloc(Recycler * recycler, size_t sizeCat, size_t size);
 
 #ifdef RECYCLER_PAGE_HEAP
-    char * PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow);
+    char * PageHeapAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow);
 #endif
 
     void ExplicitFree(void* object, size_t sizeCat);
 
-    char * SnailAlloc(Recycler * recycler, TBlockAllocatorType * allocator, size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
+    char * SnailAlloc(Recycler * recycler, TBlockAllocatorType * allocator, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
 
     void ResetMarks(ResetMarkFlags flags);
     void ScanNewImplicitRoots(Recycler * recycler);
@@ -161,7 +161,7 @@ protected:
     static bool const IsFinalizableWriteBarrierBucket = TBlockType::RequiredAttributes == FinalizableWithBarrierBit;
 #endif
 
-    void Initialize(HeapInfo * heapInfo, uint sizeCat);
+    void Initialize(HeapInfo * heapInfo, __declspec(guard(overflow)) uint sizeCat);
     void AppendAllocableHeapBlockList(TBlockType * list);
     void DeleteHeapBlockList(TBlockType * list);
     static void DeleteEmptyHeapBlockList(TBlockType * list);
@@ -176,8 +176,8 @@ protected:
     template <class Fn> void ForEachAllocator(Fn fn);
 
     // Allocations
-    char * TryAllocFromNewHeapBlock(Recycler * recycler, TBlockAllocatorType * allocator, size_t sizeCat, size_t size, ObjectInfoBits attributes);
-    char * TryAlloc(Recycler * recycler, TBlockAllocatorType * allocator, size_t sizeCat, ObjectInfoBits attributes);
+    char * TryAllocFromNewHeapBlock(Recycler * recycler, TBlockAllocatorType * allocator, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes);
+    char * TryAlloc(Recycler * recycler, TBlockAllocatorType * allocator, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
     TBlockType * CreateHeapBlock(Recycler * recycler);
     TBlockType * GetUnusedHeapBlock();
 

+ 6 - 0
lib/Common/Memory/HeapConstants.h

@@ -17,6 +17,12 @@ public:
     static const uint MaxMediumObjectSize = 9216;
 #endif
 
+#if defined(_M_IX86_OR_ARM32)
+    // Only if a pointer points to first 8k region of a large object, it will set the mark bit in the chunk->MarkBits
+    // If the pointer points outside of that region, no mark bit will be set
+    static const uint MaxLargeObjectMarkOffset = 8 * 1024; 
+#endif
+
     static const uint ObjectAllocationShift = 4;        // 16
     static const uint ObjectGranularity = 1 << ObjectAllocationShift;
     static const uint BucketCount = (MaxSmallObjectSize >> ObjectAllocationShift);

+ 2 - 0
lib/Common/Memory/LargeHeapBlock.cpp

@@ -8,6 +8,8 @@ CompileAssert(
     sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity ||
     sizeof(LargeObjectHeader) == HeapConstants::ObjectGranularity * 2);
 
+const StackBackTrace* LargeHeapBlock::s_StackTraceAllocFailed = (StackBackTrace*)1;
+
 void *
 LargeObjectHeader::GetAddress() { return ((char *)this) + sizeof(LargeObjectHeader); }
 

+ 8 - 8
lib/Common/Memory/LargeHeapBlock.h

@@ -108,7 +108,7 @@ public:
     LargeHeapBlock * GetNextBlock() { return next; }
     void SetNextBlock(LargeHeapBlock * next) { this->next = next; }
     size_t GetFreeSize() const { return addressEnd - allocAddressEnd; }
-    static LargeHeapBlock * New(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket);
+    static LargeHeapBlock * New(__in char * address, __declspec(guard(overflow)) size_t pageCount, Segment * segment, __declspec(guard(overflow)) uint objectCount, LargeHeapBucket* bucket);
     static void Delete(LargeHeapBlock * heapBlock);
     bool IsInPendingDisposeList() { return isInPendingDisposeList; }
     void SetIsInPendingDisposeList(bool isInPendingDisposeList) { this->isInPendingDisposeList = isInPendingDisposeList; }
@@ -146,10 +146,10 @@ public:
     char* GetBeginAddress() const { return address; }
     char* GetEndAddress() const { return addressEnd; }
 
-    char * Alloc(size_t size, ObjectInfoBits attributes);
-    char * TryAllocFromFreeList(size_t size, ObjectInfoBits attributes);
+    char * Alloc(__declspec(guard(overflow)) size_t size, ObjectInfoBits attributes);
+    char * TryAllocFromFreeList(__declspec(guard(overflow)) size_t size, ObjectInfoBits attributes);
 
-    static size_t GetPagesNeeded(size_t size, bool multiplyRequest);
+    static size_t GetPagesNeeded(__declspec(guard(overflow)) size_t size, bool multiplyRequest);
     static uint GetMaxLargeObjectCount(size_t pageCount, size_t firstAllocationSize);
 
     void EnumerateObjects(ObjectInfoBits infoBits, void (*CallBackFunction)(void * address, size_t size));
@@ -177,7 +177,7 @@ private:
     friend class Recycler;
 #endif
 
-    LargeHeapBlock(__in char * address, size_t pageCount, Segment * segment, uint objectCount, LargeHeapBucket* bucket);
+    LargeHeapBlock(__in char * address, __declspec(guard(overflow)) size_t pageCount, Segment * segment, __declspec(guard(overflow)) uint objectCount, LargeHeapBucket* bucket);
     static LargeObjectHeader * GetHeaderFromAddress(void * address);
     LargeObjectHeader * GetHeader(void * address);
     LargeObjectHeader ** HeaderList();
@@ -197,8 +197,8 @@ private:
     uint GetMarkCount();
     bool GetObjectHeader(void* objectAddress, LargeObjectHeader** ppHeader);
     BOOL IsNewHeapBlock() const { return lastCollectAllocCount == 0; }
-    static size_t GetAllocPlusSize(uint objectCount);
-    char * AllocFreeListEntry(size_t size, ObjectInfoBits attributes, LargeHeapBlockFreeListEntry* entry);
+    static size_t GetAllocPlusSize(__declspec(guard(overflow)) uint objectCount);
+    char * AllocFreeListEntry(__declspec(guard(overflow)) size_t size, ObjectInfoBits attributes, LargeHeapBlockFreeListEntry* entry);
 
 #if ENABLE_CONCURRENT_GC
     bool RescanOnePage(Recycler * recycler, DWORD const writeWatchFlags);
@@ -268,7 +268,7 @@ public:
     __inline bool InPageHeapMode() const { return pageHeapMode != PageHeapMode::PageHeapModeOff; }
     void CapturePageHeapAllocStack();
     void CapturePageHeapFreeStack();
-    const StackBackTrace* s_StackTraceAllocFailed = (StackBackTrace*)1;
+    const static StackBackTrace* s_StackTraceAllocFailed;
 #endif
 
 #if DBG

+ 6 - 2
lib/Common/Memory/LargeHeapBucket.cpp

@@ -158,6 +158,8 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
         AnalysisAssert(false);
     }
 
+
+
     LargeHeapBlock * heapBlock = LargeHeapBlock::New(address, pageCount, segment, 1, nullptr);
     if (!heapBlock)
     {
@@ -170,6 +172,10 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
     heapBlock->heapInfo = this->heapInfo;
     heapBlock->actualPageCount = actualPageCount;
     heapBlock->guardPageAddress = guardPageAddress;
+
+    // fill pattern before set pageHeapMode, so background scan stack may verify the pattern
+    size_t usedSpace = sizeof(LargeObjectHeader) + size;
+    memset(address + usedSpace, 0xF0, pageCount * AutoSystemInfo::PageSize - usedSpace);
     heapBlock->pageHeapMode = heapInfo->pageHeapMode;
 
     if (!recycler->heapBlockMap.SetHeapBlock(address, pageCount, heapBlock, HeapBlock::HeapBlockType::LargeBlockType, 0))
@@ -186,8 +192,6 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size,
     char * memBlock = heapBlock->Alloc(size, attributes);
     Assert(memBlock != nullptr);
 
-    // fill pattern
-    memset(heapBlock->allocAddressEnd, 0xF0, heapBlock->addressEnd - heapBlock->allocAddressEnd);
 
 #pragma prefast(suppress:6250, "This method decommits memory")
     if (::VirtualFree(guardPageAddress, AutoSystemInfo::PageSize * guardPageCount, MEM_DECOMMIT) == FALSE)

+ 8 - 8
lib/Common/Memory/LargeHeapBucket.h

@@ -36,14 +36,14 @@ public:
 
     ~LargeHeapBucket();
 
-    void Initialize(HeapInfo * heapInfo, uint sizeCat, bool supportFreeList = false);
+    void Initialize(HeapInfo * heapInfo, __declspec(guard(overflow)) uint sizeCat, bool supportFreeList = false);
 
-    LargeHeapBlock* AddLargeHeapBlock(size_t size, bool nothrow);
+    LargeHeapBlock* AddLargeHeapBlock(__declspec(guard(overflow)) size_t size, bool nothrow);
 
     template <ObjectInfoBits attributes, bool nothrow>
     char* Alloc(Recycler * recycler, size_t sizeCat);
 #ifdef RECYCLER_PAGE_HEAP
-    char *PageHeapAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow);
+    char *PageHeapAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes, PageHeapMode mode, bool nothrow);
 #endif
     void ExplicitFree(void * object, size_t sizeCat);
 
@@ -93,11 +93,11 @@ public:
 #endif
 
 private:
-    char * SnailAlloc(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
-    char * TryAlloc(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes);
-    char * TryAllocFromNewHeapBlock(Recycler * recycler, size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
-    char * TryAllocFromFreeList(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes);
-    char * TryAllocFromExplicitFreeList(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes);
+    char * SnailAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
+    char * TryAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
+    char * TryAllocFromNewHeapBlock(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, size_t size, ObjectInfoBits attributes, bool nothrow);
+    char * TryAllocFromFreeList(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
+    char * TryAllocFromExplicitFreeList(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
 
     template <class Fn> void ForEachLargeHeapBlock(Fn fn);
     template <class Fn> void ForEachEditingLargeHeapBlock(Fn fn);

+ 22 - 23
lib/Common/Memory/PageAllocator.h

@@ -84,7 +84,7 @@ struct SecondaryAllocation
 class SecondaryAllocator
 {
 public:
-    virtual bool Alloc(ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* xdata) = 0;
+    virtual bool Alloc(ULONG_PTR functionStart, DWORD functionSize, __declspec(guard(overflow)) ushort pdataCount, __declspec(guard(overflow)) ushort xdataSize, SecondaryAllocation* xdata) = 0;
     virtual void Release(const SecondaryAllocation& allocation) = 0;
     virtual void Delete() = 0;
     virtual bool CanAllocate() = 0;
@@ -101,7 +101,7 @@ template<typename TVirtualAlloc>
 class SegmentBase
 {
 public:
-    SegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, size_t pageCount);
+    SegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, __declspec(guard(overflow)) size_t pageCount);
     virtual ~SegmentBase();
 
     size_t GetPageCount() const { return segmentPageCount; }
@@ -213,10 +213,10 @@ public:
     static bool IsAllocationPageAligned(__in char* address, size_t pageCount);
 
     template <typename T, bool notPageAligned>
-    char * AllocDecommitPages(uint pageCount, T freePages, T decommitPages);
+    char * AllocDecommitPages(__declspec(guard(overflow)) uint pageCount, T freePages, T decommitPages);
 
     template <bool notPageAligned>
-    char * AllocPages(uint pageCount);
+    char * AllocPages(__declspec(guard(overflow)) uint pageCount);
 
     void ReleasePages(__in void * address, uint pageCount);
     template <bool onlyUpdateState>
@@ -240,7 +240,7 @@ public:
     void ClearRangeInDecommitPagesBitVector(uint index, uint pageCount);
 
     template <bool notPageAligned>
-    char * DoAllocDecommitPages(uint pageCount);
+    char * DoAllocDecommitPages(__declspec(guard(overflow)) uint pageCount);
     uint GetMaxPageCount();
 
     size_t DecommitFreePages(size_t pageToDecommit);
@@ -411,12 +411,11 @@ public:
 
     //VirtualAllocator APIs
     TVirtualAlloc * GetVirtualAllocator() { return virtualAllocator; }
-
     bool IsPreReservedPageAllocator() { return virtualAllocator != nullptr; }
 
 
-    PageAllocation * AllocPagesForBytes(size_t requestedBytes);
-    PageAllocation * AllocAllocation(size_t pageCount);
+    PageAllocation * AllocPagesForBytes(__declspec(guard(overflow)) size_t requestedBytes);
+    PageAllocation * AllocAllocation(__declspec(guard(overflow)) size_t pageCount);
 
     void ReleaseAllocation(PageAllocation * allocation);
     void ReleaseAllocationNoSuspend(PageAllocation * allocation);
@@ -425,8 +424,8 @@ public:
 
     void Release(void * address, size_t pageCount, void * segment);
 
-    char * AllocPages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
-    char * AllocPagesPageAligned(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char * AllocPages(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char * AllocPagesPageAligned(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
 
     void PartialDecommitPages(__in void * address, size_t pageCountTotal, __in void* decommitAddress, size_t pageCountToDecommit,  __in void * pageSegment);
     void ReleasePages(__in void * address, uint pageCount, __in void * pageSegment);
@@ -481,23 +480,23 @@ public:
     char16 const * debugName;
 #endif
 protected:
-    SegmentBase<TVirtualAlloc> * AllocSegment(size_t pageCount);
+    SegmentBase<TVirtualAlloc> * AllocSegment(__declspec(guard(overflow)) size_t pageCount);
     void ReleaseSegment(SegmentBase<TVirtualAlloc> * segment);
 
     template <bool doPageAlign>
     char * AllocInternal(size_t * pageCount, SegmentBase<TVirtualAlloc> ** segment);
 
     template <bool notPageAligned>
-    char * SnailAllocPages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
-    void OnAllocFromNewSegment(uint pageCount, __in void* pages, SegmentBase<TVirtualAlloc>* segment);
+    char * SnailAllocPages(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    void OnAllocFromNewSegment(__declspec(guard(overflow)) uint pageCount, __in void* pages, SegmentBase<TVirtualAlloc>* segment);
 
     template <bool notPageAligned>
-    char * TryAllocFreePages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
-    char * TryAllocFromZeroPagesList(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment, SLIST_HEADER& zeroPagesList, bool isPendingZeroList);
-    char * TryAllocFromZeroPages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char * TryAllocFreePages(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char * TryAllocFromZeroPagesList(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment, SLIST_HEADER& zeroPagesList, bool isPendingZeroList);
+    char * TryAllocFromZeroPages(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
 
     template <bool notPageAligned>
-    char * TryAllocDecommittedPages(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char * TryAllocDecommittedPages(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
 
     DListBase<PageSegmentBase<TVirtualAlloc>> * GetSegmentList(PageSegmentBase<TVirtualAlloc> * segment);
     void TransferSegment(PageSegmentBase<TVirtualAlloc> * segment, DListBase<PageSegmentBase<TVirtualAlloc>> * fromSegmentList);
@@ -521,7 +520,7 @@ protected:
 #endif
     virtual PageSegmentBase<TVirtualAlloc> * AddPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList);
     static PageSegmentBase<TVirtualAlloc> * AllocPageSegment(DListBase<PageSegmentBase<TVirtualAlloc>>& segmentList, 
-        PageAllocatorBase<TVirtualAlloc> * pageAllocator, bool committed, bool allocated);
+    PageAllocatorBase<TVirtualAlloc> * pageAllocator, bool committed, bool allocated);
 
     // Zero Pages
     void AddPageToZeroQueue(__in void * address, uint pageCount, __in PageSegmentBase<TVirtualAlloc> * pageSegment);
@@ -613,7 +612,7 @@ private:
     void QueuePages(void * address, uint pageCount, PageSegmentBase<TVirtualAlloc> * pageSegment);
 
     template <bool notPageAligned>
-    char* AllocPagesInternal(uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
+    char* AllocPagesInternal(__declspec(guard(overflow)) uint pageCount, PageSegmentBase<TVirtualAlloc> ** pageSegment);
 
 #ifdef PROFILE_MEM
     PageMemoryData * memoryData;
@@ -722,16 +721,16 @@ public:
     HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages, TVirtualAlloc * virtualAllocator);
 
     BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
-    bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation);
+    bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, __declspec(guard(overflow)) ushort pdataCount, __declspec(guard(overflow)) ushort xdataSize, SecondaryAllocation* allocation);
     bool ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
     void TrackDecommittedPages(void * address, uint pageCount, __in void* segment);
     void DecommitPages(__in char* address, size_t pageCount = 1);
 
     // Release pages that has already been decommitted
-    void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment);
-    bool IsAddressFromAllocator(__in void* address);    
+    void    ReleaseDecommitted(void * address, size_t pageCount, __in void * segment);
+    bool    IsAddressFromAllocator(__in void* address);
+    bool    AllocXdata() { return allocXdata; }
 
-    bool AllocXdata() { return allocXdata; }
 private:
     bool         allocXdata;
     void         ReleaseDecommittedSegment(__in SegmentBase<TVirtualAlloc>* segment);

+ 23 - 22
lib/Common/Memory/Recycler.h

@@ -790,6 +790,7 @@ private:
     bool HasPendingTrackObjects() const { return markContext.HasPendingTrackObjects() || parallelMarkContext1.HasPendingTrackObjects() || parallelMarkContext2.HasPendingTrackObjects() || parallelMarkContext3.HasPendingTrackObjects(); }
 
     RecyclerCollectionWrapper * collectionWrapper;
+
     HANDLE mainThreadHandle;
     void * stackBase;
     class SavedRegisterState
@@ -910,7 +911,6 @@ private:
     HANDLE concurrentWorkDoneEvent; // concurrent threads use this event to tell main thread that the work allocated is done
     HANDLE concurrentThread;
 
-
     template <uint parallelId>
     void ParallelWorkFunc();
 
@@ -1056,7 +1056,7 @@ public:
 
     void LogMemProtectHeapSize(bool fromGC);
 
-    char* Realloc(void* buffer, size_t existingBytes, size_t requestedBytes, bool truncate = true);
+    char* Realloc(void* buffer, __declspec(guard(overflow)) size_t existingBytes, __declspec(guard(overflow)) size_t requestedBytes, bool truncate = true);
     void SetTelemetryBlock(RecyclerWatsonTelemetryBlock * telemetryBlock) { this->telemetryBlock = telemetryBlock; }
 
     void Prime();
@@ -1258,22 +1258,22 @@ public:
 #define DEFINE_RECYCLER_ALLOC_TRACE(AllocFunc, AllocWithAttributeFunc, attributes)
 #endif
 #define DEFINE_RECYCLER_ALLOC_BASE(AllocFunc, AllocWithAttributesFunc, attributes) \
-    __inline char * AllocFunc(size_t size) \
+    __inline char * AllocFunc(__declspec(guard(overflow)) size_t size) \
     { \
         return AllocWithAttributesFunc<attributes, /* nothrow = */ false>(size); \
     } \
-    __forceinline char * AllocFunc##Inlined(size_t size) \
+    __forceinline char * AllocFunc##Inlined(__declspec(guard(overflow)) size_t size) \
     { \
         return AllocWithAttributesFunc##Inlined<attributes, /* nothrow = */ false>(size);  \
     } \
     DEFINE_RECYCLER_ALLOC_TRACE(AllocFunc, AllocWithAttributesFunc, attributes);
 
 #define DEFINE_RECYCLER_NOTHROW_ALLOC_BASE(AllocFunc, AllocWithAttributesFunc, attributes) \
-    __inline char * NoThrow##AllocFunc(size_t size) \
+    __inline char * NoThrow##AllocFunc(__declspec(guard(overflow)) size_t size) \
     { \
         return AllocWithAttributesFunc<attributes, /* nothrow = */ true>(size); \
     } \
-    __inline char * NoThrow##AllocFunc##Inlined(size_t size) \
+    __inline char * NoThrow##AllocFunc##Inlined(__declspec(guard(overflow)) size_t size) \
     { \
         return AllocWithAttributesFunc##Inlined<attributes, /* nothrow = */ true>(size);  \
     } \
@@ -1309,7 +1309,7 @@ public:
     DEFINE_RECYCLER_NOTHROW_ALLOC_ZERO(AllocImplicitRoot, ImplicitRootBit);
 
     template <ObjectInfoBits enumClass>
-    char * AllocEnumClass(size_t size)
+    char * AllocEnumClass(__declspec(guard(overflow)) size_t size)
     {
         Assert((enumClass & EnumClassMask) != 0);
         Assert((enumClass & ~EnumClassMask) == 0);
@@ -1317,7 +1317,7 @@ public:
     }
 
     template <ObjectInfoBits infoBits>
-    char * AllocWithInfoBits(size_t size)
+    char * AllocWithInfoBits(__declspec(guard(overflow)) size_t size)
     {
         return AllocWithAttributes<infoBits, /* nothrow = */ false>(size);
     }
@@ -1376,7 +1376,7 @@ public:
     template <typename TBlockAttributes>
     void SetExplicitFreeBitOnSmallBlock(HeapBlock* heapBlock, size_t sizeCat, void* buffer, ObjectInfoBits attributes);
 
-    char* HeapAllocR(HeapInfo* eHeap, size_t size)
+    char* HeapAllocR(HeapInfo* eHeap, __declspec(guard(overflow)) size_t size)
     {
         return RealAlloc<LeafBit, /* nothrow = */ false>(eHeap, size);
     }
@@ -1389,10 +1389,10 @@ public:
     void RootRelease(void* obj, uint *count = nullptr);
 
     template <ObjectInfoBits attributes, bool nothrow>
-    __inline char* RealAlloc(HeapInfo* heap, size_t size);
+    __inline char* RealAlloc(HeapInfo* heap, __declspec(guard(overflow)) size_t size);
 
     template <ObjectInfoBits attributes, bool isSmallAlloc, bool nothrow>
-    __inline char* RealAllocFromBucket(HeapInfo* heap, size_t size);
+    __inline char* RealAllocFromBucket(HeapInfo* heap, __declspec(guard(overflow)) size_t size);
 
     void EnterIdleDecommit();
     void LeaveIdleDecommit();
@@ -1508,23 +1508,23 @@ private:
 
     // Allocation
     template <ObjectInfoBits attributes, bool nothrow>
-    __inline char * AllocWithAttributesInlined(size_t size);
+    __inline char * AllocWithAttributesInlined(__declspec(guard(overflow)) size_t size);
     template <ObjectInfoBits attributes, bool nothrow>
-    char * AllocWithAttributes(size_t size)
+    char * AllocWithAttributes(__declspec(guard(overflow)) size_t size)
     {
         return AllocWithAttributesInlined<attributes, nothrow>(size);
     }
 
     template <ObjectInfoBits attributes, bool nothrow>
-    __inline char* AllocZeroWithAttributesInlined(size_t size);
+    __inline char* AllocZeroWithAttributesInlined(__declspec(guard(overflow)) size_t size);
 
     template <ObjectInfoBits attributes, bool nothrow>
-    char* AllocZeroWithAttributes(size_t size)
+    char* AllocZeroWithAttributes(__declspec(guard(overflow)) size_t size)
     {
         return AllocZeroWithAttributesInlined<attributes, nothrow>(size);
     }
 
-    char* AllocWeakReferenceEntry(size_t size)
+    char* AllocWeakReferenceEntry(__declspec(guard(overflow)) size_t size)
     {
         return AllocWithAttributes<WeakReferenceEntryBits, /* nothrow = */ false>(size);
     }
@@ -1539,10 +1539,10 @@ private:
         return (ticks > tickCountNextDispose && this->hasDisposableObject);
     }
 
-    char* TryLargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes, bool nothrow);
+    char* TryLargeAlloc(HeapInfo* heap, __declspec(guard(overflow)) size_t size, ObjectInfoBits attributes, bool nothrow);
 
     template <bool nothrow>
-    char* LargeAlloc(HeapInfo* heap, size_t size, ObjectInfoBits attributes);
+    char* LargeAlloc(HeapInfo* heap, __declspec(guard(overflow)) size_t size, ObjectInfoBits attributes);
     void OutOfMemory();
 
     // Collection
@@ -1652,6 +1652,7 @@ private:
 #if ENABLE_CONCURRENT_GC
     void BackgroundFinishPartialCollect(RecyclerSweep * recyclerSweep);
 #endif
+
 #endif
 
     size_t RescanMark(DWORD waitTime);
@@ -2219,7 +2220,7 @@ Recycler::RemoveSmallAllocator(SmallHeapBlockAllocatorType * allocator, size_t s
 
 template <ObjectInfoBits attributes, typename SmallHeapBlockAllocatorType>
 char *
-Recycler::SmallAllocatorAlloc(SmallHeapBlockAllocatorType * allocator, size_t sizeCat, size_t size)
+Recycler::SmallAllocatorAlloc(SmallHeapBlockAllocatorType * allocator, __declspec(guard(overflow)) size_t sizeCat, size_t size)
 {
     return autoHeap.SmallAllocatorAlloc<attributes>(this, allocator, sizeCat, size);
 }
@@ -2398,7 +2399,7 @@ struct ForceLeafAllocator<RecyclerNonLeafAllocator>
 }
 
 _Ret_notnull_ inline void * __cdecl
-operator new(size_t byteSize, Recycler * alloc, HeapInfo * heapInfo)
+operator new(__declspec(guard(overflow)) size_t byteSize, Recycler * alloc, HeapInfo * heapInfo)
 {
     return alloc->HeapAllocR(heapInfo, byteSize);
 }
@@ -2410,7 +2411,7 @@ operator delete(void * obj, Recycler * alloc, HeapInfo * heapInfo)
 }
 
 _Ret_notnull_ inline void * __cdecl
-operator new(size_t byteSize, Recycler * recycler, ObjectInfoBits enumClassBits)
+operator new(__declspec(guard(overflow)) size_t byteSize, Recycler * recycler, ObjectInfoBits enumClassBits)
 {
     AssertCanHandleOutOfMemory();
     Assert(byteSize != 0);
@@ -2423,7 +2424,7 @@ operator new(size_t byteSize, Recycler * recycler, ObjectInfoBits enumClassBits)
 
 template<ObjectInfoBits infoBits>
 _Ret_notnull_ inline void * __cdecl
-operator new(size_t byteSize, Recycler * recycler, const InfoBitsWrapper<infoBits>&)
+operator new(__declspec(guard(overflow)) size_t byteSize, Recycler * recycler, const InfoBitsWrapper<infoBits>&)
 {
     AssertCanHandleOutOfMemory();
     Assert(byteSize != 0);

+ 2 - 5
lib/Common/Memory/Recycler.inl

@@ -280,7 +280,7 @@ bool Recycler::IsPageHeapEnabled(size_t size)
 template <ObjectInfoBits attributes>
 void Recycler::VerifyPageHeapFillAfterAlloc(char* memBlock, size_t size)
 {
-    if (IsPageHeapEnabled())
+    if (IsPageHeapEnabled() && memBlock != nullptr)
     {
         HeapBlock* heapBlock = this->FindHeapBlock(memBlock);
 
@@ -402,10 +402,7 @@ Recycler::RealAlloc(HeapInfo* heap, size_t size)
 
     char* addr = LargeAlloc<nothrow>(heap, size, attributes);
 #if DBG
-    if (IsPageHeapEnabled())
-    {
-        this->VerifyPageHeapFillAfterAlloc<attributes>(addr, size);
-    }
+    this->VerifyPageHeapFillAfterAlloc<attributes>(addr, size);
 #endif
     return addr;
 }

+ 2 - 5
lib/Common/Memory/RecyclerFastAllocator.h

@@ -37,7 +37,7 @@ public:
     }
 
     Recycler * GetRecycler() { return recycler; }
-    char * Alloc(size_t size)
+    char * Alloc(__declspec(guard(overflow)) size_t size)
     {
         Assert(recycler != nullptr);
         Assert(!recycler->IsHeapEnumInProgress() || recycler->AllowAllocationDuringHeapEnum());
@@ -73,10 +73,7 @@ public:
         recycler->FillCheckPad(memBlock, sizeof(T), sizeCat);
 #endif
 #if DBG
-        if (recycler->IsPageHeapEnabled())
-        {
-            recycler->VerifyPageHeapFillAfterAlloc<attributes>(memBlock, size);
-        }
+        recycler->VerifyPageHeapFillAfterAlloc<attributes>(memBlock, size);
 #endif
         return memBlock;
     };

+ 2 - 2
lib/Common/Memory/RecyclerWriteBarrierManager.h

@@ -55,7 +55,7 @@ public:
     bool OnThreadInit();
 
     // Called when a page allocator segment is allocated
-    bool OnSegmentAlloc(_In_ char* segmentAddress, size_t numPages);
+    bool OnSegmentAlloc(_In_ char* segmentAddress, __declspec(guard(overflow)) size_t numPages);
 
     // Called when a page allocator segment is freed
     bool OnSegmentFree(_In_ char* segmentAddress, size_t numPages);
@@ -140,7 +140,7 @@ public:
     // For GC
 #ifdef _M_X64_OR_ARM64
     static bool OnThreadInit();
-    static bool OnSegmentAlloc(_In_ char* segment, size_t pageCount);
+    static bool OnSegmentAlloc(_In_ char* segment, __declspec(guard(overflow)) size_t pageCount);
     static bool OnSegmentFree(_In_ char* segment, size_t pageCount);
 #endif
 

+ 3 - 3
lib/Common/Memory/SmallHeapBlockAllocator.h

@@ -15,15 +15,15 @@ public:
     void Initialize();
 
     template <ObjectInfoBits attributes>
-    __inline char * InlinedAlloc(Recycler * recycler, size_t sizeCat);
+    __inline char * InlinedAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat);
 
     // Pass through template parameter to InlinedAllocImpl
     template <bool canFaultInject>
-    __inline char * SlowAlloc(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes);
+    __inline char * SlowAlloc(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
 
     // There are paths where we simply can't OOM here, so we shouldn't fault inject as it creates a bit of a mess
     template <bool canFaultInject>
-    __inline char* InlinedAllocImpl(Recycler * recycler, size_t sizeCat, ObjectInfoBits attributes);
+    __inline char* InlinedAllocImpl(Recycler * recycler, __declspec(guard(overflow)) size_t sizeCat, ObjectInfoBits attributes);
 
     TBlockType * GetHeapBlock() const { return heapBlock; }
     SmallHeapBlockAllocator * GetNext() const { return next; }

+ 4 - 3
lib/Common/Memory/VirtualAllocWrapper.cpp

@@ -178,6 +178,7 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
     }
 
 #if defined(_CONTROL_FLOW_GUARD)
+    bool supportPreReservedRegion = true;
 #if !_M_X64_OR_ARM64
 #if _M_IX86
     // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
@@ -188,15 +189,15 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
 
     if (PreReservedVirtualAllocWrapper::numPreReservedSegment > PreReservedVirtualAllocWrapper::MaxPreReserveSegment)
     {
-        return nullptr;
+        supportPreReservedRegion = false;
     }
 #else
     // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
-    return nullptr;
+    supportPreReservedRegion = false;
 #endif // _M_IX86
 #endif
 
-    if (AutoSystemInfo::Data.IsCFGEnabled())
+    if (AutoSystemInfo::Data.IsCFGEnabled() && supportPreReservedRegion)
     {
         startAddress = VirtualAlloc(NULL, bytes, MEM_RESERVE, PAGE_READWRITE);
         PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), preReservedStartAddress);

+ 4 - 4
lib/Common/Memory/VirtualAllocWrapper.h

@@ -16,7 +16,7 @@ namespace Memory
 class VirtualAllocWrapper
 {
 public:
-    LPVOID  Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
+    LPVOID  Alloc(LPVOID lpAddress, __declspec(guard(overflow)) size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
     BOOL    Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType);
 };
 
@@ -35,20 +35,20 @@ public:
 #else // _M_X64_OR_ARM64
     static const uint PreReservedAllocationSegmentCount = 4096; //(4096 * 64K) == 256MB, if 64k is the AllocationGranularity
 #endif
+
 #if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
     static const unsigned MaxPreReserveSegment = 6;
 #endif
 public:
     PreReservedVirtualAllocWrapper();
     ~PreReservedVirtualAllocWrapper();
-    LPVOID      Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
+    LPVOID      Alloc(LPVOID lpAddress, __declspec(guard(overflow)) size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false);
     BOOL        Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType);
 
     bool        IsInRange(void * address);
     LPVOID      EnsurePreReservedRegion();
 
     LPVOID      GetPreReservedEndAddress();
-
 #if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
     static int  NumPreReservedSegment() { return numPreReservedSegment; }
 #endif
@@ -63,10 +63,10 @@ private:
     LPVOID      EnsurePreReservedRegionInternal();
     bool        IsPreReservedRegionPresent();
     LPVOID      GetPreReservedStartAddress();
+
     BVStatic<PreReservedAllocationSegmentCount>     freeSegments;
     LPVOID                                          preReservedStartAddress;
     CriticalSection                                 cs;
-
 #if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
     static uint  numPreReservedSegment;
 #endif

+ 2 - 2
lib/Parser/Alloc.h

@@ -10,10 +10,10 @@ NoReleaseAllocator - allocator that never releases until it is destroyed
 class NoReleaseAllocator
 {
 public:
-    NoReleaseAllocator(long cbFirst = 256, long cbMax = 0x4000 /*16K*/);
+    NoReleaseAllocator(__declspec(guard(overflow)) long cbFirst = 256, __declspec(guard(overflow)) long cbMax = 0x4000 /*16K*/);
     ~NoReleaseAllocator(void) { FreeAll(); }
 
-    void *Alloc(long cb);
+    void *Alloc(__declspec(guard(overflow)) long cb);
     void FreeAll();
     void Clear() { FreeAll(); }
 

+ 25 - 7
lib/Parser/Parse.cpp

@@ -815,7 +815,8 @@ Symbol* Parser::AddDeclForPid(ParseNodePtr pnode, IdentPtr pid, SymbolType symbo
         && pnode->nop == knopVarDecl
         && blockInfo->pnodeBlock->sxBlock.blockType == PnodeBlockType::Function
         && blockInfo->pBlockInfoOuter != nullptr
-        && blockInfo->pBlockInfoOuter->pnodeBlock->sxBlock.blockType == PnodeBlockType::Parameter)
+        && blockInfo->pBlockInfoOuter->pnodeBlock->sxBlock.blockType == PnodeBlockType::Parameter
+        && blockInfo->pBlockInfoOuter->pnodeBlock->sxBlock.scope->GetCanMergeWithBodyScope())
     {
         blockInfo = blockInfo->pBlockInfoOuter;
     }
@@ -1949,7 +1950,7 @@ void Parser::CheckArgumentsUse(IdentPtr pid, ParseNodePtr pnodeFnc)
 {
     if (pid == wellKnownPropertyPids.arguments)
     {
-        if (pnodeFnc != nullptr)
+        if (pnodeFnc != nullptr && pnodeFnc != m_currentNodeProg)
         {
             pnodeFnc->sxFnc.SetUsesArguments(TRUE);
         }
@@ -2852,7 +2853,6 @@ ParseNodePtr Parser::ParseTerm(BOOL fAllowCall,
             pnode->ichMin = ichMin;
             pnode->ichLim = ichLim;
             pnode->sxPid.SetSymRef(ref);
-            CheckArgumentsUse(pid, m_currentNodeFunc);
         }
         else
         {
@@ -2862,6 +2862,7 @@ ParseNodePtr Parser::ParseTerm(BOOL fAllowCall,
             term.ichMin = static_cast<charcount_t>(iecpMin);
             term.ichLim = static_cast<charcount_t>(iecpLim);
         }
+        CheckArgumentsUse(pid, GetCurrentFunctionNode());
         break;
     }
 
@@ -4562,11 +4563,13 @@ ParseNodePtr Parser::ParseFncDecl(ushort flags, LPCOLESTR pNameHint, const bool
             m_currentNodeNonLambdaDeferredFunc = pnodeFncSaveNonLambda;
         }
         m_currentNodeDeferredFunc = pnodeFncSave;
-        if (m_currentNodeFunc && pnodeFnc->sxFnc.HasWithStmt())
-        {
-            GetCurrentFunctionNode()->sxFnc.SetHasWithStmt(true);
-        }
     }
+
+    if (m_currentNodeFunc && pnodeFnc->sxFnc.HasWithStmt())
+    {
+        GetCurrentFunctionNode()->sxFnc.SetHasWithStmt(true);
+    }
+
     if (m_currentNodeFunc && (pnodeFnc->sxFnc.CallsEval() || pnodeFnc->sxFnc.ChildCallsEval()))
     {
         GetCurrentFunctionNode()->sxFnc.SetChildCallsEval(true);
@@ -5030,6 +5033,12 @@ bool Parser::ParseFncDeclHelper(ParseNodePtr pnodeFnc, ParseNodePtr pnodeFncPare
             }
         }
 
+        if (!fLambda && paramScope != nullptr && !paramScope->GetCanMergeWithBodyScope()
+            && (pnodeFnc->sxFnc.UsesArguments() || pnodeFnc->grfpn & fpnArguments_overriddenByDecl))
+        {
+            Error(ERRNonSimpleParamListArgumentsUse);
+        }
+
         // If the param scope is merged with the body scope we want to use the param scope symbols in the body scope.
         // So add a pid ref for the body using the param scope symbol. Note that in this case the same symbol will occur twice
         // in the same pid ref stack.
@@ -5094,6 +5103,15 @@ bool Parser::ParseFncDeclHelper(ParseNodePtr pnodeFnc, ParseNodePtr pnodeFncPare
                     Assert(paramNode && paramNode->sxVar.sym->GetScope()->GetScopeType() == ScopeType_FunctionBody);
                     paramNode->sxVar.sym->SetHasInit(true);
                 });
+
+                if (!fLambda)
+                {
+                    // In split scope case ideally the arguments object should be in the param scope.
+                    // Right now referring to arguments in the param scope is a SyntaxError, so we have to
+                    // add a duplicate symbol in the body scope and copy over the value in BeginBodySope.
+                    ParseNodePtr argumentsNode = this->CreateVarDeclNode(wellKnownPropertyPids.arguments, STVariable, true, nullptr, false);
+                    Assert(argumentsNode && argumentsNode->sxVar.sym->GetScope()->GetScopeType() == ScopeType_FunctionBody);
+                }
             }
 
             // Keep nested function declarations and expressions in the same list at function scope.

+ 1 - 0
lib/Parser/perrors.h

@@ -100,3 +100,4 @@ LSC_ERROR_MSG(1087, ERRInvalidModuleImportOrExport, "Module import or export sta
 LSC_ERROR_MSG(1088, ERRInvalidExportName, "Unable to resolve module export name")
 
 LSC_ERROR_MSG(1089, ERRLetIDInLexicalDecl, "'let' is not an allowed identifier in lexical declarations")
+LSC_ERROR_MSG(1090, ERRNonSimpleParamListArgumentsUse, "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured")

+ 7 - 2
lib/Runtime/Base/ScriptContext.cpp

@@ -1726,7 +1726,7 @@ if (!sourceList)
                 Assert((loadScriptFlag & LoadScriptFlag_disableAsmJs) != LoadScriptFlag_disableAsmJs);
 
                 pse->Clear();
-                
+
                 loadScriptFlag = (LoadScriptFlag)(loadScriptFlag | LoadScriptFlag_disableAsmJs);
                 return LoadScript(script, cb, pSrcInfo, pse, ppSourceInfo, rootDisplayName, loadScriptFlag);
             }
@@ -4380,6 +4380,11 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
 
     void ScriptContext::SaveStartupProfileAndRelease(bool isSaveOnClose)
     {
+        // No need to save profiler info in JSRT scenario at this time.
+        if (GetThreadContext()->IsJSRT())
+        {
+            return;
+        }
         if (!startupComplete && this->cache->sourceContextInfoMap)
         {
 #if ENABLE_PROFILE_INFO
@@ -4394,7 +4399,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
                 }
             });
 #endif
-    }
+        }
         startupComplete = true;
     }
 

+ 119 - 44
lib/Runtime/ByteCode/ByteCodeEmitter.cpp

@@ -1402,21 +1402,26 @@ void ByteCodeGenerator::DefineUserVars(FuncInfo *funcInfo)
                     if ((!sym->GetHasInit() && !sym->IsInSlot(funcInfo)) ||
                         (funcInfo->bodyScope->GetIsObject() && !funcInfo->GetHasCachedScope()))
                     {
-                        Js::RegSlot reg = sym->GetLocation();
-                        if (reg == Js::Constants::NoRegister)
+                        // If the  current symbol is the duplicate arguments symbol created in the body for split
+                        // scope then load undef only if the arguments symbol is used in the body.
+                        if (!funcInfo->IsInnerArgumentsSymbol(sym) || funcInfo->GetHasArguments())
                         {
-                            Assert(sym->IsInSlot(funcInfo));
-                            reg = funcInfo->AcquireTmpRegister();
-                        }
-                        this->m_writer.Reg1(Js::OpCode::LdUndef, reg);
-                        this->EmitLocalPropInit(reg, sym, funcInfo);
+                            Js::RegSlot reg = sym->GetLocation();
+                            if (reg == Js::Constants::NoRegister)
+                            {
+                                Assert(sym->IsInSlot(funcInfo));
+                                reg = funcInfo->AcquireTmpRegister();
+                            }
+                            this->m_writer.Reg1(Js::OpCode::LdUndef, reg);
+                            this->EmitLocalPropInit(reg, sym, funcInfo);
 
-                        if (ShouldTrackDebuggerMetadata() && !sym->GetHasInit() && !sym->IsInSlot(funcInfo))
-                        {
-                            byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), reg, funcInfo->varRegsCount);
-                        }
+                            if (ShouldTrackDebuggerMetadata() && !sym->GetHasInit() && !sym->IsInSlot(funcInfo))
+                            {
+                                byteCodeFunction->InsertSymbolToRegSlotList(sym->GetName(), reg, funcInfo->varRegsCount);
+                            }
 
-                        funcInfo->ReleaseTmpRegister(reg);
+                            funcInfo->ReleaseTmpRegister(reg);
+                        }
                     }
                 }
                 else if (ShouldTrackDebuggerMetadata())
@@ -1814,9 +1819,18 @@ void ByteCodeGenerator::InitScopeSlotArray(FuncInfo * funcInfo)
         {
             if (sym->NeedsSlotAlloc(funcInfo))
             {
-                // All properties should get correct propertyId here.
-                Assert(sym->HasScopeSlot()); // We can't allocate scope slot now. Any symbol needing scope slot must have allocated it before this point.
-                setPropertyIdForScopeSlotArray(sym->GetScopeSlot(), sym->EnsurePosition(funcInfo));
+                if (funcInfo->IsInnerArgumentsSymbol(sym) && !funcInfo->GetHasArguments())
+                {
+                    // In split scope case we have a duplicate symbol for arguments in the body (innerArgumentsSymbol).
+                    // But if arguments is not referenced in the body we don't have to allocate scope slot for it.
+                    // If we allocate one, then the debugger will assume that the arguments symbol is there and skip creating the fake one.
+                }
+                else
+                {
+                    // All properties should get correct propertyId here.
+                    Assert(sym->HasScopeSlot()); // We can't allocate scope slot now. Any symbol needing scope slot must have allocated it before this point.
+                    setPropertyIdForScopeSlotArray(sym->GetScopeSlot(), sym->EnsurePosition(funcInfo));
+                }
             }
         };
 
@@ -3264,7 +3278,10 @@ void ByteCodeGenerator::EmitOneFunction(ParseNode *pnode)
             DefineFunctions(funcInfo);
         }
 
+        InitSpecialScopeSlots(funcInfo);
+
         DefineUserVars(funcInfo);
+
         if (pnode->sxFnc.HasNonSimpleParameterList())
         {
             this->InitBlockScopedNonTemps(funcInfo->root->sxFnc.pnodeBodyScope, funcInfo);
@@ -3292,9 +3309,13 @@ void ByteCodeGenerator::EmitOneFunction(ParseNode *pnode)
             // We have to do this after the rest param is marked as false for need declaration.
             paramScope->ForEachSymbol([&](Symbol* param) {
                 Symbol* varSym = funcInfo->GetBodyScope()->FindLocalSymbol(param->GetName());
-                Assert(varSym || param->GetIsArguments() || pnode->sxFnc.pnodeName->sxVar.sym == param);
+                Assert(varSym || pnode->sxFnc.pnodeName->sxVar.sym == param);
                 Assert(param->GetIsArguments() || param->IsInSlot(funcInfo));
-                if (varSym && varSym->GetSymbolType() == STVariable && (varSym->IsInSlot(funcInfo) || varSym->GetLocation() != Js::Constants::NoRegister))
+                if (param->GetIsArguments() && !funcInfo->GetHasArguments())
+                {
+                    // Do not copy the arguments to the body if it is not used
+                }
+                else if (varSym && varSym->GetSymbolType() == STVariable && (varSym->IsInSlot(funcInfo) || varSym->GetLocation() != Js::Constants::NoRegister))
                 {
                     // Simulating EmitPropLoad here. We can't directly call the method as we have to use the param scope specifically.
                     // Walking the scope chain is not possible at this time.
@@ -3315,6 +3336,31 @@ void ByteCodeGenerator::EmitOneFunction(ParseNode *pnode)
                     funcInfo->ReleaseTmpRegister(tempReg);
                 }
             });
+
+            // In split scope as the body has a separate closure we have to copy the value of this and other special slots
+            // from param scope to the body scope
+            auto copySpecialSymbolsToBody = [this, funcInfo, paramScope] (Js::PropertyId src, Js::PropertyId dest)
+            {
+                if (dest != Js::Constants::NoProperty)
+                {
+                    Js::RegSlot tempReg = funcInfo->AcquireTmpRegister();
+                    Js::PropertyId slot = src;
+                    Js::ProfileId profileId = funcInfo->FindOrAddSlotProfileId(paramScope, slot);
+                    Js::OpCode op = paramScope->GetIsObject() ? Js::OpCode::LdParamObjSlot : Js::OpCode::LdParamSlot;
+                    slot = slot + (paramScope->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex);
+
+                    this->m_writer.SlotI1(op, tempReg, slot, profileId);
+
+                    op = funcInfo->bodyScope->GetIsObject() ? Js::OpCode::StLocalObjSlot : Js::OpCode::StLocalSlot;
+                    slot = dest + (funcInfo->bodyScope->GetIsObject() ? 0 : Js::ScopeSlots::FirstSlotIndex);
+                    this->m_writer.SlotI1(op, tempReg, slot);
+                    funcInfo->ReleaseTmpRegister(tempReg);
+                }
+            };
+            copySpecialSymbolsToBody(funcInfo->innerThisScopeSlot, funcInfo->thisScopeSlot);
+            copySpecialSymbolsToBody(funcInfo->innerSuperScopeSlot, funcInfo->superScopeSlot);
+            copySpecialSymbolsToBody(funcInfo->innerSuperCtorScopeSlot, funcInfo->superCtorScopeSlot);
+            copySpecialSymbolsToBody(funcInfo->innerNewTargetScopeSlot, funcInfo->newTargetScopeSlot);
         }
 
         if (pnode->sxFnc.pnodeBodyScope != nullptr)
@@ -3593,6 +3639,8 @@ void ByteCodeGenerator::EmitScopeList(ParseNode *pnode, ParseNode *breakOnBodySc
 
                 if (paramScope && !paramScope->GetCanMergeWithBodyScope())
                 {
+                    // Before emitting the body scoped functions let us switch the special scope slot to use the body ones
+                    pnode->sxFnc.funcInfo->UseInnerSpecialScopeSlots();
                     this->EmitScopeList(pnode->sxFnc.pnodeBodyScope->sxBlock.pnodeScopes);
                 }
                 else
@@ -3672,7 +3720,7 @@ void ByteCodeGenerator::EnsureSpecialScopeSlots(FuncInfo* funcInfo, Scope* scope
     {
         if (funcInfo->isThisLexicallyCaptured)
         {
-            funcInfo->EnsureThisScopeSlot(scope);
+            funcInfo->EnsureThisScopeSlot();
         }
 
         if (((!funcInfo->IsLambda() && funcInfo->GetCallsEval())
@@ -3680,18 +3728,18 @@ void ByteCodeGenerator::EnsureSpecialScopeSlots(FuncInfo* funcInfo, Scope* scope
         {
             if (funcInfo->superRegister != Js::Constants::NoRegister)
             {
-                funcInfo->EnsureSuperScopeSlot(scope);
+                funcInfo->EnsureSuperScopeSlot();
             }
 
             if (funcInfo->superCtorRegister != Js::Constants::NoRegister)
             {
-                funcInfo->EnsureSuperCtorScopeSlot(scope);
+                funcInfo->EnsureSuperCtorScopeSlot();
             }
         }
 
         if (funcInfo->isNewTargetLexicallyCaptured)
         {
-            funcInfo->EnsureNewTargetScopeSlot(scope);
+            funcInfo->EnsureNewTargetScopeSlot();
         }
     }
     else
@@ -3704,22 +3752,50 @@ void ByteCodeGenerator::EnsureSpecialScopeSlots(FuncInfo* funcInfo, Scope* scope
 
         if (funcInfo->isThisLexicallyCaptured)
         {
-            funcInfo->EnsureThisScopeSlot(scope);
+            funcInfo->EnsureThisScopeSlot();
         }
 
         if (funcInfo->isSuperLexicallyCaptured)
         {
-            funcInfo->EnsureSuperScopeSlot(scope);
+            funcInfo->EnsureSuperScopeSlot();
         }
 
         if (funcInfo->isSuperCtorLexicallyCaptured)
         {
-            funcInfo->EnsureSuperCtorScopeSlot(scope);
+            funcInfo->EnsureSuperCtorScopeSlot();
         }
 
         if (funcInfo->isNewTargetLexicallyCaptured)
         {
-            funcInfo->EnsureNewTargetScopeSlot(scope);
+            funcInfo->EnsureNewTargetScopeSlot();
+        }
+    }
+}
+
+void ByteCodeGenerator::InitSpecialScopeSlots(FuncInfo* funcInfo)
+{
+    if (funcInfo->bodyScope->GetIsObject())
+    {
+        // In split scope make sure to do init fld for the duplicate special scope slots
+        if (funcInfo->innerThisScopeSlot != Js::Constants::NoProperty)
+        {
+            uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->bodyScope->GetLocation(), Js::PropertyIds::_lexicalThisSlotSymbol, false, true);
+            m_writer.ElementP(Js::OpCode::InitLocalFld, funcInfo->thisPointerRegister, cacheId);
+        }
+        if (funcInfo->innerSuperScopeSlot != Js::Constants::NoProperty)
+        {
+            uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->bodyScope->GetLocation(), Js::PropertyIds::_superReferenceSymbol, false, true);
+            m_writer.ElementP(Js::OpCode::InitLocalFld, funcInfo->superRegister, cacheId);
+        }
+        if (funcInfo->innerSuperCtorScopeSlot != Js::Constants::NoProperty)
+        {
+            uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->bodyScope->GetLocation(), Js::PropertyIds::_superCtorReferenceSymbol, false, true);
+            m_writer.ElementP(Js::OpCode::InitLocalFld, funcInfo->superCtorRegister, cacheId);
+        }
+        if (funcInfo->innerNewTargetScopeSlot != Js::Constants::NoProperty)
+        {
+            uint cacheId = funcInfo->FindOrAddInlineCacheId(funcInfo->bodyScope->GetLocation(), Js::PropertyIds::_lexicalNewTargetSymbol, false, true);
+            m_writer.ElementP(Js::OpCode::InitLocalFld, funcInfo->newTargetRegister, cacheId);
         }
     }
 }
@@ -3900,14 +3976,7 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc)
                 MapFormalsFromPattern(pnodeFnc, [&](ParseNode *pnode) { pnode->sxVar.sym->EnsureScopeSlot(funcInfo); });
             }
 
-            if (paramScope->GetCanMergeWithBodyScope())
-            {
-                this->EnsureSpecialScopeSlots(funcInfo, bodyScope);
-            }
-            else
-            {
-                this->EnsureSpecialScopeSlots(funcInfo, paramScope);
-            }
+            this->EnsureSpecialScopeSlots(funcInfo, bodyScope);
 
             auto ensureFncDeclScopeSlots = [&](ParseNode *pnodeScope)
             {
@@ -3945,7 +4014,8 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc)
                     {
                         sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName());
                     }
-                    if (sym->GetSymbolType() == STVariable && !sym->GetIsArguments())
+                    if (sym->GetSymbolType() == STVariable && !sym->GetIsArguments()
+                        && (!funcInfo->IsInnerArgumentsSymbol(sym) || funcInfo->GetHasArguments()))
                     {
                         sym->EnsureScopeSlot(funcInfo);
                     }
@@ -3963,14 +4033,7 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc)
             ParseNode *pnode;
             Symbol *sym;
 
-            if (paramScope->GetCanMergeWithBodyScope())
-            {
-                this->EnsureSpecialScopeSlots(funcInfo, bodyScope);
-            }
-            else
-            {
-                this->EnsureSpecialScopeSlots(funcInfo, paramScope);
-            }
+            this->EnsureSpecialScopeSlots(funcInfo, bodyScope);
 
             pnodeFnc->sxFnc.MapContainerScopes([&](ParseNode *pnodeScope) { this->EnsureFncScopeSlots(pnodeScope, funcInfo); });
 
@@ -3983,7 +4046,8 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc)
                     {
                         sym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName());
                     }
-                    if (sym->GetSymbolType() == STVariable && sym->NeedsSlotAlloc(funcInfo) && !sym->GetIsArguments())
+                    if (sym->GetSymbolType() == STVariable && sym->NeedsSlotAlloc(funcInfo) && !sym->GetIsArguments()
+                        && (!funcInfo->IsInnerArgumentsSymbol(sym) || funcInfo->GetHasArguments()))
                     {
                         sym->EnsureScopeSlot(funcInfo);
                     }
@@ -4005,6 +4069,17 @@ void ByteCodeGenerator::StartEmitFunction(ParseNode *pnodeFnc)
             MapFormals(pnodeFnc, ensureScopeSlot);
             MapFormalsFromPattern(pnodeFnc, ensureScopeSlot);
 
+            if (!paramScope->GetCanMergeWithBodyScope())
+            {
+                sym = funcInfo->GetArgumentsSymbol();
+                if (sym && funcInfo->GetHasArguments())
+                {
+                    // There is no eval so the arguments may be captured in a lambda. In split scope case
+                    // we have to make sure the param arguments is also put in a slot.
+                    sym->EnsureScopeSlot(funcInfo);
+                }
+            }
+
             if (pnodeFnc->sxFnc.pnodeBody)
             {
                 this->EnsureLetConstScopeSlots(pnodeFnc->sxFnc.pnodeScopes, funcInfo);
@@ -4295,7 +4370,7 @@ void ByteCodeGenerator::StartEmitBlock(ParseNode *pnodeBlock)
         FuncInfo *funcInfo = scope->GetFunc();
         if (scope->IsGlobalEvalBlockScope() && funcInfo->isThisLexicallyCaptured)
         {
-            funcInfo->EnsureThisScopeSlot(funcInfo->GetBodyScope());
+            funcInfo->EnsureThisScopeSlot();
         }
         this->EnsureFncScopeSlots(pnodeBlock->sxBlock.pnodeScopes, funcInfo);
         this->EnsureLetConstScopeSlots(pnodeBlock, funcInfo);

+ 10 - 1
lib/Runtime/ByteCode/ByteCodeGenerator.cpp

@@ -2305,8 +2305,17 @@ void AddVarsToScope(ParseNode *vars, ByteCodeGenerator *byteCodeGenerator)
             vars->sxVar.sym = sym;
             if (sym->GetIsArguments())
             {
-                byteCodeGenerator->TopFuncInfo()->SetArgumentsSymbol(sym);
+                FuncInfo* funcInfo = byteCodeGenerator->TopFuncInfo();
+                funcInfo->SetArgumentsSymbol(sym);
+
+                if (funcInfo->paramScope && !funcInfo->paramScope->GetCanMergeWithBodyScope())
+                {
+                    Symbol* innerArgSym = funcInfo->bodyScope->FindLocalSymbol(sym->GetName());
+                    funcInfo->SetInnerArgumentsSymbol(innerArgSym);
+                    byteCodeGenerator->AssignRegister(innerArgSym);
+                }
             }
+
         }
         else
         {

+ 1 - 0
lib/Runtime/ByteCode/ByteCodeGenerator.h

@@ -179,6 +179,7 @@ public:
     void FinalizeRegisters(FuncInfo * funcInfo, Js::FunctionBody * byteCodeFunction);
     void SetClosureRegisters(FuncInfo * funcInfo, Js::FunctionBody * byteCodeFunction);
     void EnsureSpecialScopeSlots(FuncInfo* funcInfo, Scope* scope);
+    void InitSpecialScopeSlots(FuncInfo* funcInfo);
     void SetHasTry(bool has);
     void SetHasFinally(bool has);
     void SetNumberOfInArgs(Js::ArgSlot argCount);

+ 79 - 8
lib/Runtime/ByteCode/FuncInfo.cpp

@@ -73,9 +73,13 @@ FuncInfo::FuncInfo(
     localPropIdOffset(-1),
     argsPlaceHolderSlotCount(0),
     thisScopeSlot(Js::Constants::NoProperty),
+    innerThisScopeSlot(Js::Constants::NoProperty),
     superScopeSlot(Js::Constants::NoProperty),
+    innerSuperScopeSlot(Js::Constants::NoProperty),
     superCtorScopeSlot(Js::Constants::NoProperty),
+    innerSuperCtorScopeSlot(Js::Constants::NoProperty),
     newTargetScopeSlot(Js::Constants::NoProperty),
+    innerNewTargetScopeSlot(Js::Constants::NoProperty),
     isThisLexicallyCaptured(false),
     isSuperLexicallyCaptured(false),
     isSuperCtorLexicallyCaptured(false),
@@ -87,6 +91,7 @@ FuncInfo::FuncInfo(
     isInstInlineCacheCount(0),
     referencedPropertyIdCount(0),
     argumentsSymbol(nullptr),
+    innerArgumentsSymbol(nullptr),
     nonUserNonTempRegistersToInitialize(alloc),
     constantToRegister(alloc, 17),
     stringToRegister(alloc, 17),
@@ -142,36 +147,102 @@ BOOL FuncInfo::IsBaseClassConstructor() const
     return root->sxFnc.IsBaseClassConstructor();
 }
 
-void FuncInfo::EnsureThisScopeSlot(Scope* scope)
+void FuncInfo::EnsureThisScopeSlot()
 {
-    if (this->thisScopeSlot == Js::Constants::NoRegister)
+    if (this->thisScopeSlot == Js::Constants::NoProperty)
     {
+        // In case of split scope param and body has separate closures. So we have to use different scope slots for them.
+        bool isSplitScope = this->paramScope && !this->paramScope->GetCanMergeWithBodyScope();
+        Scope* scope = isSplitScope ? this->paramScope : this->bodyScope;
         Scope* currentScope = scope->IsGlobalEvalBlockScope() ? this->GetGlobalEvalBlockScope() : scope;
+
         this->thisScopeSlot = currentScope->AddScopeSlot();
+        if (isSplitScope)
+        {
+            this->innerThisScopeSlot = this->bodyScope->AddScopeSlot();
+        }
     }
 }
 
-void FuncInfo::EnsureSuperScopeSlot(Scope* scope)
+void FuncInfo::EnsureSuperScopeSlot()
 {
-    if (this->superScopeSlot == Js::Constants::NoRegister)
+    if (this->superScopeSlot == Js::Constants::NoProperty)
     {
+        // In case of split scope param and body has separate closures. So we have to use different scope slots for them.
+        bool isSplitScope = this->paramScope && !this->paramScope->GetCanMergeWithBodyScope();
+        Scope* scope = isSplitScope ? this->paramScope : this->bodyScope;
+
         this->superScopeSlot = scope->AddScopeSlot();
+        if (isSplitScope)
+        {
+            this->innerSuperScopeSlot = this->bodyScope->AddScopeSlot();
+        }
     }
 }
 
-void FuncInfo::EnsureSuperCtorScopeSlot(Scope* scope)
+void FuncInfo::EnsureSuperCtorScopeSlot()
 {
-    if (this->superCtorScopeSlot == Js::Constants::NoRegister)
+    if (this->superCtorScopeSlot == Js::Constants::NoProperty)
     {
+        // In case of split scope param and body has separate closures. So we have to use different scope slots for them.
+        bool isSplitScope = this->paramScope && !this->paramScope->GetCanMergeWithBodyScope();
+        Scope* scope = isSplitScope ? this->paramScope : this->bodyScope;
+
         this->superCtorScopeSlot = scope->AddScopeSlot();
+        if (isSplitScope)
+        {
+            this->innerSuperCtorScopeSlot = this->bodyScope->AddScopeSlot();
+        }
     }
 }
 
-void FuncInfo::EnsureNewTargetScopeSlot(Scope* scope)
+void FuncInfo::EnsureNewTargetScopeSlot()
 {
-    if (this->newTargetScopeSlot == Js::Constants::NoRegister)
+    if (this->newTargetScopeSlot == Js::Constants::NoProperty)
     {
+        // In case of split scope param and body has separate closures. So we have to use different scope slots for them.
+        bool isSplitScope = this->paramScope && !this->paramScope->GetCanMergeWithBodyScope();
+        Scope* scope = isSplitScope ? this->paramScope : this->bodyScope;
+
         this->newTargetScopeSlot = scope->AddScopeSlot();
+        if (isSplitScope)
+        {
+            this->innerNewTargetScopeSlot = this->bodyScope->AddScopeSlot();
+        }
+    }
+}
+
+void FuncInfo::UseInnerSpecialScopeSlots()
+{
+    Assert(this->paramScope != nullptr && !this->paramScope->GetCanMergeWithBodyScope());
+    Js::PropertyId temp = Js::Constants::NoProperty;
+    if (this->thisScopeSlot != Js::Constants::NoProperty)
+    {
+        Assert(this->innerThisScopeSlot != Js::Constants::NoProperty);
+        temp = this->thisScopeSlot;
+        this->thisScopeSlot = this->innerThisScopeSlot;
+        this->innerThisScopeSlot = temp;
+    }
+    if (this->superScopeSlot != Js::Constants::NoProperty)
+    {
+        Assert(this->innerSuperScopeSlot != Js::Constants::NoProperty);
+        temp = this->superScopeSlot;
+        this->superScopeSlot = this->innerSuperScopeSlot;
+        this->innerSuperScopeSlot = temp;
+    }
+    if (this->superCtorScopeSlot != Js::Constants::NoProperty)
+    {
+        Assert(this->innerSuperCtorScopeSlot != Js::Constants::NoProperty);
+        temp = this->superCtorScopeSlot;
+        this->superCtorScopeSlot = this->innerSuperCtorScopeSlot;
+        this->innerSuperCtorScopeSlot = temp;
+    }
+    if (this->newTargetScopeSlot != Js::Constants::NoProperty)
+    {
+        Assert(this->innerNewTargetScopeSlot != Js::Constants::NoProperty);
+        temp = this->newTargetScopeSlot;
+        this->newTargetScopeSlot = this->innerNewTargetScopeSlot;
+        this->innerNewTargetScopeSlot = temp;
     }
 }
 

+ 26 - 4
lib/Runtime/ByteCode/FuncInfo.h

@@ -172,14 +172,19 @@ public:
     typedef JsUtil::BaseDictionary<SlotKey, Js::ProfileId, ArenaAllocator, PowerOf2SizePolicy, SlotKeyComparer> SlotProfileIdMap;
     SlotProfileIdMap slotProfileIdMap;
     Js::PropertyId thisScopeSlot;
+    Js::PropertyId innerThisScopeSlot; // Used in case of split scope
     Js::PropertyId superScopeSlot;
+    Js::PropertyId innerSuperScopeSlot; // Used in case of split scope
     Js::PropertyId superCtorScopeSlot;
+    Js::PropertyId innerSuperCtorScopeSlot; // Used in case of split scope
     Js::PropertyId newTargetScopeSlot;
+    Js::PropertyId innerNewTargetScopeSlot; // Used in case of split scope
     bool isThisLexicallyCaptured;
     bool isSuperLexicallyCaptured;
     bool isSuperCtorLexicallyCaptured;
     bool isNewTargetLexicallyCaptured;
     Symbol *argumentsSymbol;
+    Symbol *innerArgumentsSymbol;
     JsUtil::List<Js::RegSlot, ArenaAllocator> nonUserNonTempRegistersToInitialize;
 
     // constRegsCount is set to 2 because R0 is the return register, and R1 is the root object.
@@ -289,6 +294,22 @@ public:
         argumentsSymbol = sym;
     }
 
+    Symbol *GetInnerArgumentsSymbol() const
+    {
+        return innerArgumentsSymbol;
+    }
+
+    void SetInnerArgumentsSymbol(Symbol *sym)
+    {
+        Assert(innerArgumentsSymbol == nullptr || innerArgumentsSymbol == sym);
+        innerArgumentsSymbol = sym;
+    }
+
+    bool IsInnerArgumentsSymbol(Symbol* sym)
+    {
+        return innerArgumentsSymbol != nullptr && innerArgumentsSymbol == sym;
+    }
+
     bool GetCallsEval() const {
         return callsEval;
     }
@@ -743,10 +764,11 @@ public:
         return profileId;
     }
 
-    void EnsureThisScopeSlot(Scope* scope);
-    void EnsureSuperScopeSlot(Scope* scope);
-    void EnsureSuperCtorScopeSlot(Scope* scope);
-    void EnsureNewTargetScopeSlot(Scope* scope);
+    void EnsureThisScopeSlot();
+    void EnsureSuperScopeSlot();
+    void EnsureSuperCtorScopeSlot();
+    void EnsureNewTargetScopeSlot();
+    void UseInnerSpecialScopeSlots();
 
     void SetIsThisLexicallyCaptured()
     {

+ 7 - 0
lib/Runtime/ByteCode/Scope.cpp

@@ -95,6 +95,13 @@ void Scope::SetIsObject()
             return false;
         });
     }
+
+    if (this->GetScopeType() == ScopeType_FunctionBody && funcInfo && funcInfo->paramScope
+        && !funcInfo->paramScope->GetIsObject() && !funcInfo->paramScope->GetCanMergeWithBodyScope())
+    {
+        // If this is split scope then mark the param scope also as an object
+        funcInfo->paramScope->SetIsObject();
+    }
 }
 
 void Scope::MergeParamAndBodyScopes(ParseNode *pnodeScope, ByteCodeGenerator *byteCodeGenerator)

+ 2 - 1
lib/Runtime/ByteCode/ScopeInfo.cpp

@@ -14,7 +14,8 @@ namespace Js
         // We don't need to create slot for or save "arguments"
         if (!sym->GetIsArguments()
             // Function expression may not have nonLocalReference, exclude them.
-            && (!sym->GetFuncExpr() || sym->GetHasNonLocalReference()))
+            && (!sym->GetFuncExpr() || sym->GetHasNonLocalReference())
+            && (!mapSymbolData->func->IsInnerArgumentsSymbol(sym) || mapSymbolData->func->GetHasArguments()))
         {
             // Any symbol may have non-local ref from deferred child. Allocate slot for it.
             Assert(sym->GetHasNonLocalReference());

+ 1 - 1
lib/Runtime/ByteCode/Symbol.cpp

@@ -83,7 +83,7 @@ bool Symbol::IsInSlot(FuncInfo *funcInfo, bool ensureSlotAlloc)
         return true;
     }
     // If body and param scopes are not merged then an inner scope slot is used
-    if (!this->GetIsArguments() && this->scope->GetScopeType() == ScopeType_Parameter && !this->scope->GetCanMergeWithBodyScope())
+    if (this->scope->GetScopeType() == ScopeType_Parameter && !this->scope->GetCanMergeWithBodyScope())
     {
         return true;
     }

+ 1 - 1
lib/Runtime/Language/DynamicProfileStorage.h

@@ -18,7 +18,7 @@ public:
     static Js::SourceDynamicProfileManager * Load(__in_z char16 const * filename, Fn loadFn);
     static void SaveRecord(__in_z char16 const * filename, __in_ecount(sizeof(DWORD) + *record) char const * record);
 
-    static char * AllocRecord(DWORD bufferSize);
+    static char * AllocRecord(__declspec(guard(overflow)) DWORD bufferSize);
     static void DeleteRecord(__in_ecount(sizeof(DWORD) + *record) char const * record);
     static char const * GetRecordBuffer(__in_ecount(sizeof(DWORD) + *record) char const * record);
     static char * GetRecordBuffer(__in_ecount(sizeof(DWORD) + *record) char * record);

+ 6 - 1
lib/Runtime/Language/InlineCache.cpp

@@ -909,8 +909,12 @@ namespace Js
     }
 #endif
 
-    bool EquivalentTypeSet::Contains(const Js::Type * type, uint16* pIndex) const
+    bool EquivalentTypeSet::Contains(const Js::Type * type, uint16* pIndex)
     {
+        if (!this->GetSortedAndDuplicatesRemoved())
+        {
+            this->SortAndRemoveDuplicates();
+        }
         for (uint16 ti = 0; ti < this->count; ti++)
         {
             if (this->types[ti] == type)
@@ -1013,6 +1017,7 @@ namespace Js
                 Type* tmp = this->types[j];
                 this->types[j] = this->types[j - 1];
                 this->types[j - 1] = tmp;
+                j--;
             }
         }
 

+ 1 - 1
lib/Runtime/Language/InlineCache.h

@@ -552,7 +552,7 @@ namespace Js
         {
             return this->sortedAndDuplicatesRemoved;
         }
-        bool Contains(const Js::Type * type, uint16 * pIndex = nullptr) const;
+        bool Contains(const Js::Type * type, uint16 * pIndex = nullptr);
 
         static bool AreIdentical(EquivalentTypeSet * left, EquivalentTypeSet * right);
         static bool IsSubsetOf(EquivalentTypeSet * left, EquivalentTypeSet * right);

+ 4 - 4
lib/Runtime/Language/InterpreterStackFrame.cpp

@@ -26,9 +26,9 @@
 /// - X: Nothing
 ///
 /// Examples:
-/// - "A2toA1" reads two registers, each storing a Var, and writes a single
+/// - "A2toA1" reads two registers, each storing an Var, and writes a single
 ///   register with a new Var.
-/// - "A1I1toA2" reads two registers, first a Var and second an Int32, then
+/// - "A1I1toA2" reads two registers, first an Var and second an Int32, then
 ///   writes two Var registers.
 ///
 /// Although these could use lookup tables to standard OpLayout types, this
@@ -1180,6 +1180,7 @@ namespace Js
                 {
                     uint32 scopeSlots = this->executeFunction->scopeSlotArraySize;
                     Assert(scopeSlots != 0);
+                    ScopeSlots((Var*)nextAllocBytes).SetCount(scopeSlots);
                     newInstance->localClosure = nextAllocBytes;
                     nextAllocBytes += (scopeSlots + ScopeSlots::FirstSlotIndex) * sizeof(Var);
                 }
@@ -2145,7 +2146,7 @@ namespace Js
 
     inline void InterpreterStackFrame::OP_SetOutAsmDb( RegSlot outRegisterID, double val )
     {
-        Assert( m_outParams + outRegisterID < m_outSp );
+        Assert(m_outParams + outRegisterID < m_outSp);
         m_outParams[outRegisterID] = JavascriptNumber::NewWithCheck( val, scriptContext );
     }
 
@@ -7565,7 +7566,6 @@ const byte * InterpreterStackFrame::OP_ProfiledLoopBodyStart(const byte * ip)
         }
         SetRegRawSimd(playout->I4_0, result);
     }
-
     // handler for SIMD.Uint32x4.FromFloat32x4
     template <class T>
     void InterpreterStackFrame::OP_SimdUint32x4FromFloat32x4(const unaligned T* playout)

+ 2 - 2
lib/Runtime/Language/JavascriptOperators.h

@@ -557,13 +557,13 @@ namespace Js
         static Var OP_AsyncSpawn(Js::Var aGenerator, Js::Var aThis, ScriptContext* scriptContext);
 
         template <typename T>
-        static void * JitRecyclerAlloc(size_t size, Recycler* recycler)
+        static void * JitRecyclerAlloc(__declspec(guard(overflow)) size_t size, Recycler* recycler)
         {
             TRACK_ALLOC_INFO(recycler, T, Recycler, size - sizeof(T), (size_t)-1);
             return recycler->AllocZero(size);
         }
 
-        static void * AllocMemForVarArray(size_t size, Recycler* recycler);
+        static void * AllocMemForVarArray(__declspec(guard(overflow)) size_t size, Recycler* recycler);
         static void * AllocUninitializedNumber(RecyclerJavascriptNumberAllocator * allocator);
 
         static void ScriptAbort();

+ 4 - 0
lib/Runtime/Language/arm/arm_Thunks.asm

@@ -31,6 +31,10 @@
     ;JavascriptMethod ScriptContext::ProfileModeDeferredDeserialize(ScriptFunction *function)
     IMPORT  |?ProfileModeDeferredDeserialize@ScriptContext@Js@@SAP6APAXPAVRecyclableObject@2@UCallInfo@2@ZZPAVScriptFunction@2@@Z|
 
+#if defined(_CONTROL_FLOW_GUARD)
+    IMPORT __guard_check_icall_fptr
+#endif
+
     TEXTAREA
 
 #ifdef _ENABLE_DYNAMIC_THUNKS

+ 16 - 16
lib/Runtime/Library/ArrayBuffer.h

@@ -58,9 +58,9 @@ namespace Js
         };
 
         template <typename Allocator>
-        ArrayBuffer(uint32 length, DynamicType * type, Allocator allocator);
+        ArrayBuffer(__declspec(guard(overflow)) uint32 length, DynamicType * type, Allocator allocator);
 
-        ArrayBuffer(byte* buffer, uint32 length, DynamicType * type);
+        ArrayBuffer(byte* buffer, __declspec(guard(overflow)) uint32 length, DynamicType * type);
 
         class EntryInfo
         {
@@ -132,8 +132,8 @@ namespace Js
         virtual bool IsValidVirtualBufferLength(uint length) { return false; }
     protected:
         typedef void __cdecl FreeFn(void* ptr);
-        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, uint32 bufferLength) = 0;
-        virtual ArrayBuffer * TransferInternal(uint32 newBufferLength) = 0;
+        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, __declspec(guard(overflow)) uint32 bufferLength) = 0;
+        virtual ArrayBuffer * TransferInternal(__declspec(guard(overflow)) uint32 newBufferLength) = 0;
 
         inline BOOL IsBuiltinProperty(PropertyId);
         static uint32 GetIndexFromVar(Js::Var arg, uint32 length, ScriptContext* scriptContext);
@@ -206,11 +206,11 @@ namespace Js
         DEFINE_MARSHAL_OBJECT_TO_SCRIPT_CONTEXT(JavascriptArrayBuffer);
 
     public:
-        static JavascriptArrayBuffer* Create(uint32 length, DynamicType * type);
-        static JavascriptArrayBuffer* Create(byte* buffer, uint32 length, DynamicType * type);
+        static JavascriptArrayBuffer* Create(__declspec(guard(overflow)) uint32 length, DynamicType * type);
+        static JavascriptArrayBuffer* Create(byte* buffer, __declspec(guard(overflow)) uint32 length, DynamicType * type);
         virtual void Dispose(bool isShutdown) override;
         virtual void Finalize(bool isShutdown) override;
-        static void*__cdecl  AllocWrapper(size_t length)
+        static void*__cdecl  AllocWrapper(__declspec(guard(overflow)) size_t length)
         {
 #if _WIN64
             LPVOID address = VirtualAlloc(nullptr, MAX_ASMJS_ARRAYBUFFER_LENGTH, MEM_RESERVE, PAGE_NOACCESS);
@@ -244,8 +244,8 @@ namespace Js
 
     protected:
         JavascriptArrayBuffer(DynamicType * type);
-        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, uint32 bufferLength) override;
-        virtual ArrayBuffer * TransferInternal(uint32 newBufferLength) override;
+        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, __declspec(guard(overflow)) uint32 bufferLength) override;
+        virtual ArrayBuffer * TransferInternal(__declspec(guard(overflow)) uint32 newBufferLength) override;
     private:
         JavascriptArrayBuffer(uint32 length, DynamicType * type);
         JavascriptArrayBuffer(byte* buffer, uint32 length, DynamicType * type);
@@ -258,17 +258,17 @@ namespace Js
         DEFINE_VTABLE_CTOR(ProjectionArrayBuffer, ArrayBuffer);
         DEFINE_MARSHAL_OBJECT_TO_SCRIPT_CONTEXT(ProjectionArrayBuffer);
         typedef void __stdcall FreeFn(LPVOID ptr);
-        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, uint32 bufferLength) override
+        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, __declspec(guard(overflow)) uint32 bufferLength) override
         {
             return HeapNew(ArrayBufferDetachedState<FreeFn>, buffer, bufferLength, CoTaskMemFree, ArrayBufferAllocationType::CoTask);
         }
-        virtual ArrayBuffer * TransferInternal(uint32 newBufferLength) override;
+        virtual ArrayBuffer * TransferInternal(__declspec(guard(overflow)) uint32 newBufferLength) override;
 
     public:
         // Create constructor. script engine creates a buffer allocated via CoTaskMemAlloc.
-        static ProjectionArrayBuffer* Create(uint32 length, DynamicType * type);
+        static ProjectionArrayBuffer* Create(__declspec(guard(overflow)) uint32 length, DynamicType * type);
         // take over ownership. a CoTaskMemAlloc'ed buffer passed in via projection.
-        static ProjectionArrayBuffer* Create(byte* buffer, uint32 length, DynamicType * type);
+        static ProjectionArrayBuffer* Create(byte* buffer, __declspec(guard(overflow)) uint32 length, DynamicType * type);
         virtual void Dispose(bool isShutdown) override;
         virtual void Finalize(bool isShutdown) override {};
     private:
@@ -283,10 +283,10 @@ namespace Js
         DEFINE_VTABLE_CTOR(ExternalArrayBuffer, ArrayBuffer);
         DEFINE_MARSHAL_OBJECT_TO_SCRIPT_CONTEXT(ExternalArrayBuffer);
     public:
-        ExternalArrayBuffer(byte *buffer, uint32 length, DynamicType *type);
+        ExternalArrayBuffer(byte *buffer, __declspec(guard(overflow)) uint32 length, DynamicType *type);
     protected:
-        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, uint32 bufferLength) override { Assert(UNREACHED); Throw::InternalError(); };
-        virtual ArrayBuffer * TransferInternal(uint32 newBufferLength) override { Assert(UNREACHED); Throw::InternalError(); };
+        virtual ArrayBufferDetachedStateBase* CreateDetachedState(BYTE* buffer, __declspec(guard(overflow)) uint32 bufferLength) override { Assert(UNREACHED); Throw::InternalError(); };
+        virtual ArrayBuffer * TransferInternal(__declspec(guard(overflow)) uint32 newBufferLength) override { Assert(UNREACHED); Throw::InternalError(); };
     };
 }
 

+ 46 - 4
lib/Runtime/Library/JavascriptArray.cpp

@@ -3308,9 +3308,52 @@ namespace Js
         pDestObj = ArraySpeciesCreate(args[0], 0, scriptContext);
         if (pDestObj)
         {
-            isInt = JavascriptNativeIntArray::Is(pDestObj);
-            isFloat = !isInt && JavascriptNativeFloatArray::Is(pDestObj); // if we know it is an int short the condition to avoid a function call
-            isArray = isInt || isFloat || JavascriptArray::Is(pDestObj);
+            // Check the thing that species create made. If it's a native array that can't handle the source
+            // data, convert it. If it's a more conservative kind of array than the source data, indicate that
+            // so that the data will be converted on copy.
+            if (isInt)
+            {
+                if (JavascriptNativeIntArray::Is(pDestObj))
+                {
+                    isArray = true;
+                }
+                else
+                {
+                    isInt = false;
+                    isFloat = JavascriptNativeFloatArray::Is(pDestObj);
+                    isArray = JavascriptArray::Is(pDestObj);
+                }
+            }
+            else if (isFloat)
+            {
+                if (JavascriptNativeIntArray::Is(pDestObj))
+                {
+                    JavascriptNativeIntArray::ToNativeFloatArray(JavascriptNativeIntArray::FromVar(pDestObj));
+                    isArray = true;
+                }
+                else
+                {
+                    isFloat = JavascriptNativeFloatArray::Is(pDestObj);
+                    isArray = JavascriptArray::Is(pDestObj);
+                }
+            }
+            else
+            {
+                if (JavascriptNativeIntArray::Is(pDestObj))
+                {
+                    JavascriptNativeIntArray::ToVarArray(JavascriptNativeIntArray::FromVar(pDestObj));
+                    isArray = true;
+                }
+                else if (JavascriptNativeFloatArray::Is(pDestObj))
+                {
+                    JavascriptNativeFloatArray::ToVarArray(JavascriptNativeFloatArray::FromVar(pDestObj));
+                    isArray = true;
+                }
+                else
+                {
+                    isArray = JavascriptArray::Is(pDestObj);
+                }
+            }
         }
 
         if (pDestObj == nullptr || isArray)
@@ -8149,7 +8192,6 @@ Case0:
                     }
                 }
             }
-
         }
 
         return scriptContext->GetLibrary()->GetTrue();

+ 1 - 1
lib/Runtime/Library/JavascriptProxy.cpp

@@ -1339,7 +1339,7 @@ namespace Js
         Var getPrototypeOfResult;
         if (nullptr == getPrototypeOfMethod || GetScriptContext()->IsHeapEnumInProgress())
         {
-            return target->GetPrototype();
+            return RecyclableObject::FromVar(JavascriptObject::GetPrototypeOf(target, scriptContext));
         }
         CallInfo callInfo(CallFlags_Value, 2);
         Var varArgs[2];

+ 1 - 1
test/AsmJs/rlexe.xml

@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
 <regress-exe>
   <test>
     <default>

+ 1 - 1
test/es6/HTMLComments.js

@@ -25,7 +25,7 @@ WScript.Echo("Code before CRLF--> is reachable");
 WScript.Echo("Code before CR--> is reachable");
--> WScript.Echo("Code after CR--> is unreachable");
 
 // LF
-WScript.Echo("Code before LF--> is reachable");
+WScript.Echo("Code before LF--> is reachable");
 --> WScript.Echo("Code after LF--> is unreachable");
 
 // LS

+ 35 - 0
test/es6/default-splitscope-undodeferparse.js

@@ -62,3 +62,38 @@ function foo3(a = 10, b = function () { return a; }) {
     }
 }
 foo3();
+
+function f9(a, b = function () { a; } ) {
+    if (eval("a") !== 1) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+    if (arguments[0] !== 1) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+    if (eval("arguments[0]") !== 1) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+    arguments = 100;
+    if (eval("a") !== 1) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+    if (eval("arguments") !== 100) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+    if (arguments !== 100) {
+        print("FAILED")
+    } else {
+        print("PASSED");
+    }
+}
+f9(1);

+ 482 - 1
test/es6/default-splitscope.js

@@ -319,6 +319,39 @@ var tests = [
             return f10.call(this);
         }
         assert.areEqual(thisObj, f9.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a double nested split scoped function");
+        
+        function f11(a = this.x * 10, b = () => { a; return this; }) {
+            assert.areEqual(10, a, "this should be accessible in the parameter scope");
+            assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
+            assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
+            return b;
+        }
+        assert.areEqual(thisObj, f11.call(thisObj)(), "Lambda defined in the param scope returns the right this object"); 
+
+        function f12(a = this.x * 10, b = () => { a; return this; }) {
+            var c = 100;
+            assert.areEqual(10, a, "this should be accessible in the parameter scope");
+            assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
+            assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
+            assert.areEqual(thisObj, (() => this)(), "Lambda should capture the this object from body properly");
+            assert.areEqual(100, c, "Body variable should be unaffected by the slot allocation of this object");
+            return b;
+        }
+        assert.areEqual(thisObj, f12.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
+
+        function f13(a = 10, b = () => { a; return this; }) {
+            var c = 100;
+            assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
+            var d = () => this;
+            this.x = 5;
+            assert.isTrue(eval("this.x == 5"), "Eval should be able to access the this object properly after the field is updated");
+            assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
+            assert.isTrue(eval("a == 10"), "Eval should be able to access the first parameter properly");
+            assert.isTrue(eval("b().x == 5"), "Lambda from the param scope should capture the this symbol properly");
+            assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
+            return b;
+        }
+        assert.areEqual(5, f13.call(thisObj)().x, "Lambda defined in the param scope returns the same this object as the one in body"); 
     } 
   },
   { 
@@ -688,6 +721,350 @@ var tests = [
         assert.areEqual(1, f6()(), "Symbol capture at the param scope is unaffected by other references in the body and param");
     }
   },
+  {
+    name : "Split scope and arguments symbol",
+    body : function () {
+        assert.throws(function () { eval("function f(a = arguments, b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f1() { function f2(a = arguments, b = () => a) { } }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope inside another function", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = arguments, b = () => a, c = eval('')) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with eval", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = arguments = [1, 2], b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = 10, b = () => a, c = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = 10, b = () => a, c = a = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a, b = () => { a; arguments}) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = 10, b = (c = arguments) => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in a lambda in split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a, b = () => a, c = () => { return arguments; }) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by a lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f(a = 10, b = () => a, c = () => () => arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f3(a, arguments = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f3({a, arguments = function () { return a; }}) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        assert.throws(function () { eval("function f3({a = arguments}, b = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
+        
+        function f1(a, b = () => a) {
+            eval("");
+            b = () => { return arguments; };
+            assert.areEqual(1, arguments[0], "Arguments object receives the first parameter properly");
+            assert.areEqual(1, b()[0], "First argument receives the right value passed in");
+            assert.areEqual(undefined, b()[1], "Second argument receives the right value passed in");
+            assert.areEqual(2, arguments.length, "Arguments should have only two elements in it");
+        }
+        f1(1, undefined);
+        
+        function f2(a, b = () => { return a; }) {
+            a = 10;
+            assert.areEqual(1, arguments[0], "First argument is properly received");
+            assert.areEqual(2, arguments[2], "Third argument is properly received");
+            assert.areEqual(3, arguments.length, "Only three arguments are passed in");
+            (() => { arguments = [3, 4]; a; })();
+            assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
+            assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
+            assert.areEqual(2, arguments.length, "New array has only elements");
+            
+            return b;
+        }
+        assert.areEqual(1, f2(1, undefined, 2)(), "Param scope method properly captures the first parameter");
+        
+        function f3(a, b = () => { return a; }) {
+            eval("");
+            a = 10;
+            assert.areEqual(1, arguments[0], "First argument is properly received");
+            assert.areEqual(2, arguments[2], "Third argument is properly received");
+            assert.areEqual(3, arguments.length, "Only three arguments are passed in");
+            (() => { arguments = [3, 4]; a; })();
+            assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
+            assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
+            assert.areEqual(2, arguments.length, "New array has only elements");
+            
+            return b;
+        }
+        assert.areEqual(1, f3(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
+        
+        function f4(a, b = function () { a; } ) {
+            var c = 10;
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            eval("");
+        }
+        f4(1);
+        
+        function f5(a, b = function () { a; } ) {
+            var c = 10;
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+            eval("");
+        }
+        f5(1);
+        
+        function f6(a, b = function () { a; } ) {
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+        }
+        f6(1);
+        
+        function f7(a, b = function () { a; } ) {
+            assert.areEqual(5, arguments(), "Function definition is hoisted");
+            function arguments() { return 5; }
+        }
+        f7(1);
+        
+        function f8(a, b = function () { a; } ) {
+            assert.areEqual(5, arguments(), "Function definition is hoisted");
+            function arguments() { return 5; }
+            eval("");
+        }
+        f8(1);
+        
+        function f9(a, b = function () { a; } ) {
+            assert.areEqual(1, eval("a"), "Eval should be able to access the first argument properly");
+            assert.areEqual(1, eval("arguments[0]"), "Eval should be able to access the first argument properly from arguments object");
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+            assert.areEqual(100, eval("arguments"), "Updated value of arguments is visible in eval");
+            assert.areEqual(1, eval("a"), "First argument remains unchanged after the arguments are updated");
+        }
+        f9(1);
+        
+        function f10(a, b = function () { a; } ) {
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            var arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+        }
+        f10(1);
+        
+        function f11(a, b = function () { a; } ) {
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            var arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+            eval("");
+        }
+        f11(1);
+        
+        function f12(a, b = function () { a; } ) {
+            assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
+            b = () => arguments;
+            assert.areEqual(1, b()[0], "Lambda captures the right arguments symbol");
+            var arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+            assert.areEqual(100, b(), "Lambda now gives the updated value");
+            eval("");
+        }
+        f12(1);
+        
+        function f13(a, b = () => { return a; }) {
+            a = 10;
+            assert.areEqual(1, arguments[0], "First argument is properly received");
+            assert.areEqual(2, arguments[2], "Third argument is properly received");
+            assert.areEqual(3, arguments.length, "Only three arguments are passed in");
+            ((c = arguments = [3, 4]) => { a; })();
+            assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
+            assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
+            assert.areEqual(2, arguments.length, "New array has only elements");
+            
+            return b;
+        }
+        assert.areEqual(1, f13(1, undefined, 2)(), "Param scope method properly captures the first parameter");
+        
+        function f14(a, b = () => { return a; }) {
+            eval("");
+            a = 10;
+            assert.areEqual(1, arguments[0], "First argument is properly received");
+            assert.areEqual(2, arguments[2], "Third argument is properly received");
+            assert.areEqual(3, arguments.length, "Only three arguments are passed in");
+            ((c = arguments = [3, 4]) => { a; })();
+            assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
+            assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
+            assert.areEqual(2, arguments.length, "New array has only elements");
+            
+            return b;
+        }
+        assert.areEqual(1, f14(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
+        
+        function f15(a, b = function () { a; }, ...c) {
+            assert.areEqual(1, arguments[0], "Checking first argument");
+            assert.areEqual(undefined, arguments[1], "Checking second argument");
+            assert.areEqual(2, arguments[2], "Checking third argument");
+            assert.areEqual(3, arguments[3], "Checking fourth argument");
+            assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
+            var arguments = 100;
+            assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+            assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
+            eval("");
+        }
+        f15(1, undefined, 2, 3);
+        
+        var f16 = function f17(a, b = function () { a; }, ...c) {
+            if (a === 1) {
+                assert.areEqual(1, arguments[0], "Checking first argument");
+                assert.areEqual(undefined, arguments[1], "Checking second argument");
+                assert.areEqual(2, arguments[2], "Checking third argument");
+                assert.areEqual(3, arguments[3], "Checking fourth argument");
+                assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
+                return f17(undefined, undefined, ...c);
+            } else {
+                assert.areEqual(undefined, arguments[0], "Checking first argument on the recursive call");
+                assert.areEqual(undefined, arguments[1], "Checking second argument on the recursive call");
+                assert.areEqual(2, arguments[2], "Checking third argument on the recursive call");
+                assert.areEqual(3, arguments[3], "Checking fourth argument on the recursive call");
+                assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
+                var arguments = 100;
+                assert.areEqual(100, arguments, "Arguments is updated after the assignment");
+                assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
+                return eval("c");
+            }
+        }
+        assert.areEqual([2, 3], f16(1, undefined, 2, 3), "Rest should remain unaffected when arguments is updated");
+    }  
+  },
+  {
+    name: "Split scope and super call",
+    body: function () {
+        class c1 {
+            constructor() {
+                return { x : 1 };
+            }
+        };
+
+        class c2 extends c1 {
+            constructor(a = 1, b = () => { assert.areEqual(1, super().x, "Super is accessible in the param scope"); return a; }) {
+                var c = 10;
+                a = 20;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+                return {};
+            }
+        }
+        new c2();
+
+        class c3 extends c1 {
+            constructor(a = 1, b = () => { return a; }) {
+                (() => assert.areEqual(1, super().x, "Lambda should be able to access the super method properly in the body"))();
+                a = 10;
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        new c3();
+
+        class c4 extends c1 {
+            constructor(a = 1, b = () => { return a; }) {
+                var c = 10;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+                assert.areEqual(1, eval("super().x"), "Eval should be able to access the super property properly");
+            }
+        }
+        new c4();
+
+        class c5 extends c1 {
+            constructor(a = super().x, b = () => { return a; }) {
+                assert.areEqual(1, a, "First formal calls the super from the param scope");
+                var c = 10;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        new c5();
+    }
+  },
+  {
+    name: "Split scope and super property",
+    body: function () {
+        class c1 {
+            foo () {
+                return 1;
+            }
+        };
+
+        class c2 extends c1 {
+            foo(a = 1, b = () => { assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the param scope"); return a; }) {
+                a = 20;
+                var c = 10;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        (new c2()).foo();
+
+        class c3 extends c1 {
+            foo(a = 1, b = () => { return a; }) {
+                var c = 10;
+                a = 20;
+                (() => assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the body scope"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        (new c3()).foo();
+
+        class c4 extends c1 {
+            foo(a = 1, b = () => { return a; }) {
+                var c = 10;
+                a = 20;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+                assert.areEqual(1, eval("super.foo()"), "Eval should be able to access the super property properly from the body scope");
+            }
+        }
+        (new c4()).foo();
+
+        class c5 extends c1 {
+            foo(a = super.foo(), b = () => { return a; }) {
+                assert.areEqual(1, a, "First formal uses the super property from the param scope");
+                var c = 10;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                a = 20;
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        (new c5()).foo();
+    }
+  },
+  {
+    name: "Split scope and new.target",
+    body: function () {
+        class c1 {
+            constructor(newTarget) {
+                assert.isTrue(newTarget == new.target, "Base class should receive the right value for new.target"); 
+            }
+        };
+
+        class c2 extends c1 {
+            constructor(a = 1, b = () => { assert.isTrue(new.target == c2, "new.target should have the derived class value in the param scope"); return a; }) {
+                super(c2);
+                var c = 10;
+                a = 20;
+                (() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
+                assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
+            }
+        }
+        new c2();
+
+        class c3 extends c1 {
+            constructor(a = 1, b = () => { return a; }) {
+                super(c3);
+                var c = 10;
+                (() => assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope when captured by lambda"))();
+                assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope");
+            }
+        }
+        new c3();
+
+        class c4 extends c1 {
+            constructor(a = 1, b = () => { return a; }) {
+                super(c4);
+                assert.isTrue(eval("new.target == c4"), "new.target should be the derived class inside eval");
+                assert.isTrue(new.target == c4, "new.target should be the derived class in the body scope");
+            }
+        }
+        new c4();
+
+        class c5 extends c1 {
+            constructor(a = new.target, b = () => { return a; }) {
+                super(c5);
+                assert.isTrue(a == c5, "new.target accessed from the param scope should work fine");
+            }
+        }
+        new c5();
+    }
+  },
   { 
     name: "Split parameter scope and eval", 
     body: function () { 
@@ -745,7 +1122,111 @@ var tests = [
         };
         f4.call(1, 2);
     }  
-  }, 
+  },
+  {
+    name: "Split scope and with",
+    body: function () {
+          function f1(a, b, c = function () { a; }) {
+            with ({}) {
+                var d = function () {
+                    return 10;
+                };
+                assert.areEqual(10, d(), "With inside a split scope function should work fine");
+            }
+          }
+          f1();
+          
+          function f2(a, b, c = function () { a; }) {
+            var d = function () {
+                return 10;
+            };
+            with ({}) {
+                assert.areEqual(10, d(), "With inside a split scope function should be able to access the function definition from the body");
+            }
+          }
+          f2();
+          
+          function f3(a, b = function () { return 10; }, c = function () { a; }) {
+            with ({}) {
+                assert.areEqual(10, b(), "With inside a split scope function should be able to access the function definition from the param scope");
+            }
+          }
+          f3();
+
+          function f4(a, b = function () { return 10; }, c = function () { a; }) {
+            var d = {
+                e : function () { return 10; }
+            };
+            e = function () { return 100; };
+            with (d) {
+                assert.areEqual(10, e(), "With should use the function definition inside the object not the one from body");
+            }
+          }
+          f4();
+
+          function f5(a, b = { d : function () { return 10; } }, c = function () { a; }) {
+            var d = { };
+            with (b) {
+                assert.areEqual(10, d(), "With should use the function definition inside the object from the param scope not the one from body");
+            }
+          }
+          f5();
+          
+          var v6 = 100
+          function f6(a, b, c = function () { a; }, e = function () { with({}) { assert.areEqual(100, v6, "With inside param scope should be able to access var from outside"); } }, f = e()) {
+            var v6 = { };
+          }
+          f6();
+
+          function f7(a, b, c = function () { a; }) {
+            with ({}) {
+                assert.areEqual(100, v6, "With inside body scope should be able to access var from outside");
+            }
+          }
+          f7();
+          
+          function f8() {
+            function f9() {
+                return 1;
+            }
+            var v1 = 10;
+            function f10(a = 10, b = function f11() {
+                a;
+                assert.areEqual(10, v1, "Function in the param scope should be able to access the outside variable");
+                with ({}) {
+                    assert.areEqual(1, f9(), "With construct inside a param scoped function should be able to execute functions from outside");
+                }
+            }) {
+                b();
+            };
+            f10();
+          }
+          f8();
+          f8();
+          
+          function f12() {
+            function f13() {
+                return 1;
+            }
+            var v2 = 100;
+            function f14(a = 10, b = function () {
+                assert.areEqual(10, a, "Function in the param scope should be able to access the formal from parent");
+                return function () {
+                    assert.areEqual(10, a, "Function nested in the param scope should be able to access the formal from the split scoped function");
+                    assert.areEqual(100, v2, "Function in the param scope should be able to access the outside variable");
+                    with ({}) {
+                        assert.areEqual(1, f13(), "With construct inside a param scoped function should be able to execute functions from outside");
+                    }
+                };
+            }) {
+                b()();
+            };
+            f14();
+          }
+          f12();
+          f12();
+    }  
+  },
   { 
     name: "Basic eval in parameter scope", 
     body: function () {