Răsfoiți Sursa

xplat: fix some clang undefined sanitizer issues

Utf8Codex.cpp, Alloc.cpp, TypePath.h: fix data alignment.

HeapBlock: refactor to remove "this == nullptr" scenario.

PreReservedPageAllocator: refactor to use a single instance for
VirtualAllocWrapper and remove "this == nullptr" scenario.

Others: fix bad casts.
Jianchun Xu 9 ani în urmă
părinte
comite
f87c1fd32b

+ 5 - 5
lib/Common/Codex/Utf8Codex.cpp

@@ -37,7 +37,7 @@ namespace utf8
 
     inline bool ShouldFastPath(LPCUTF8 pb, LPCOLESTR pch)
     {
-        return (reinterpret_cast<size_t>(pb) & mAlignmentMask) == 0 || (reinterpret_cast<size_t>(pch) & mAlignmentMask) == 0;
+        return (reinterpret_cast<size_t>(pb) & mAlignmentMask) == 0 && (reinterpret_cast<size_t>(pch) & mAlignmentMask) == 0;
     }
 
     inline size_t EncodedBytes(char16 prefix)
@@ -336,14 +336,14 @@ LFourByte:
         uint32 codepoint = 0x10000 + ((highTen << 10) | lowTen);
 
         // This is the maximum valid unicode codepoint
-        // This should be ensured anyway since you can't encode a value higher 
+        // This should be ensured anyway since you can't encode a value higher
         // than this as a surrogate pair, so we assert this here
         CodexAssert(codepoint <= 0x10FFFF);
 
         // Now we need to encode the code point into utf-8
         // Codepoints in the range that gets encoded into a surrogate pair
         // gets encoded into 4 bytes under utf8
-        // Since the codepoint can be represented by 21 bits, the encoding 
+        // Since the codepoint can be represented by 21 bits, the encoding
         // does the following: first 3 bits in the first byte, the next 6 in the
         // second, the next six in the third, and the last six in the 4th byte
         *ptr++ = static_cast<utf8char_t>(codepoint >> 18) | 0xF0;
@@ -506,8 +506,8 @@ LSlowPath:
             while (cch-- > 0)
             {
                 // We increment the source pointer here since at least one utf16 code unit is read here
-                // If the code unit turns out to be the high surrogate in a surrogate pair, then 
-                // EncodeTrueUtf8 will consume the low surrogate code unit too by decrementing cch 
+                // If the code unit turns out to be the high surrogate in a surrogate pair, then
+                // EncodeTrueUtf8 will consume the low surrogate code unit too by decrementing cch
                 // and incrementing source
                 dest = EncodeTrueUtf8(*source++, &source, &cch, dest);
                 if (ShouldFastPath(dest, source)) goto LFastPath;

+ 5 - 5
lib/Common/Memory/HeapBlock.cpp

@@ -8,7 +8,7 @@ template <typename TBlockAttributes>
 SmallNormalHeapBlockT<TBlockAttributes> *
 HeapBlock::AsNormalBlock()
 {
-    Assert(this == nullptr || IsAnyNormalBlock());
+    Assert(IsAnyNormalBlock());
     return static_cast<SmallNormalHeapBlockT<TBlockAttributes> *>(this);
 }
 
@@ -16,7 +16,7 @@ template <typename TBlockAttributes>
 SmallLeafHeapBlockT<TBlockAttributes> *
 HeapBlock::AsLeafBlock()
 {
-    Assert(this == nullptr || IsLeafBlock());
+    Assert(IsLeafBlock());
     return static_cast<SmallLeafHeapBlockT<TBlockAttributes> *>(this);
 }
 
@@ -24,7 +24,7 @@ template <typename TBlockAttributes>
 SmallFinalizableHeapBlockT<TBlockAttributes> *
 HeapBlock::AsFinalizableBlock()
 {
-    Assert(this == nullptr || IsAnyFinalizableBlock());
+    Assert(IsAnyFinalizableBlock());
     return static_cast<SmallFinalizableHeapBlockT<TBlockAttributes> *>(this);
 }
 
@@ -33,7 +33,7 @@ template <typename TBlockAttributes>
 SmallNormalWithBarrierHeapBlockT<TBlockAttributes> *
 HeapBlock::AsNormalWriteBarrierBlock()
 {
-    Assert(this == nullptr || IsNormalWriteBarrierBlock());
+    Assert(IsNormalWriteBarrierBlock());
     return static_cast<SmallNormalWithBarrierHeapBlockT<TBlockAttributes> *>(this);
 }
 
@@ -41,7 +41,7 @@ template <typename TBlockAttributes>
 SmallFinalizableWithBarrierHeapBlockT<TBlockAttributes> *
 HeapBlock::AsFinalizableWriteBarrierBlock()
 {
-    Assert(this == nullptr || IsFinalizableWriteBarrierBlock());
+    Assert(IsFinalizableWriteBarrierBlock());
     return static_cast<SmallFinalizableWithBarrierHeapBlockT<TBlockAttributes> *>(this);
 }
 #endif

+ 3 - 3
lib/Common/Memory/HeapBlock.h

@@ -446,10 +446,10 @@ public:
 
     void ProtectUnusablePages() {}
     void RestoreUnusablePages() {}
-    
-    uint GetUnusablePageCount() 
+
+    uint GetUnusablePageCount()
     {
-        return 0; 
+        return 0;
     }
 
 #ifdef RECYCLER_WRITE_BARRIER

+ 41 - 7
lib/Common/Memory/PageAllocator.cpp

@@ -52,7 +52,7 @@ SegmentBase<T>::~SegmentBase()
 template<typename T>
 bool SegmentBase<T>::IsInPreReservedHeapPageAllocator() const
 {
-    return this->allocator->GetVirtualAllocator() != nullptr;
+    return this->allocator->IsPreReservedPageAllocator();
 }
 
 template<typename T>
@@ -638,6 +638,8 @@ PageAllocatorBase<T>::PageAllocatorBase(AllocationPolicyManager * policyManager,
     this->memoryData = MemoryProfiler::GetPageMemoryData(type);
 #endif
 
+    InitVirtualAllocator(nullptr);
+
     PageTracking::PageAllocatorCreated((PageAllocator*)this);
 }
 
@@ -1111,11 +1113,43 @@ PageAllocatorBase<T>::AllocSegment(size_t pageCount)
     return segment;
 }
 
+template <>
+void PageAllocatorBase<VirtualAllocWrapper>::InitVirtualAllocator(VirtualAllocWrapper * virtualAllocator)
+{
+    Assert(this->virtualAllocator == nullptr || this->virtualAllocator == &VirtualAllocWrapper::Instance);
+    Assert(virtualAllocator == nullptr || virtualAllocator == &VirtualAllocWrapper::Instance);
+
+    this->virtualAllocator = &VirtualAllocWrapper::Instance;  // Init to single instance
+}
+
+template <>
+void PageAllocatorBase<PreReservedVirtualAllocWrapper>::InitVirtualAllocator(PreReservedVirtualAllocWrapper * virtualAllocator)
+{
+    Assert(this->virtualAllocator == nullptr);
+
+    this->virtualAllocator = virtualAllocator;  // Init to given virtualAllocator, may be nullptr
+}
+
+template <>
+bool PageAllocatorBase<VirtualAllocWrapper>::IsPreReservedPageAllocator()
+{
+    Assert(virtualAllocator == &VirtualAllocWrapper::Instance);
+    return false;
+}
+
+template <>
+bool PageAllocatorBase<PreReservedVirtualAllocWrapper>::IsPreReservedPageAllocator()
+{
+    Assert(virtualAllocator != nullptr &&  // intentionally stronger check, must have init to non-null
+        (void*)virtualAllocator != (void*)&VirtualAllocWrapper::Instance);
+    return true;
+}
+
 template <>
 char *
 PageAllocatorBase<VirtualAllocWrapper>::Alloc(size_t * pageCount, SegmentBase<VirtualAllocWrapper> ** segment)
 {
-    Assert(virtualAllocator == nullptr);
+    Assert(!IsPreReservedPageAllocator());
     return AllocInternal<false>(pageCount, segment);
 }
 
@@ -1123,7 +1157,7 @@ template <>
 char *
 PageAllocatorBase<PreReservedVirtualAllocWrapper>::Alloc(size_t * pageCount, SegmentBase<PreReservedVirtualAllocWrapper> ** segment)
 {
-    Assert(virtualAllocator);
+    Assert(IsPreReservedPageAllocator());
     return AllocInternal<false>(pageCount, segment);
 }
 
@@ -1201,7 +1235,7 @@ template<>
 char *
 PageAllocatorBase<VirtualAllocWrapper>::AllocPages(uint pageCount, PageSegmentBase<VirtualAllocWrapper> ** pageSegment)
 {
-    Assert(virtualAllocator == nullptr);
+    Assert(!IsPreReservedPageAllocator());
     return AllocPagesInternal<true /* noPageAligned */>(pageCount, pageSegment);
 }
 
@@ -1209,7 +1243,7 @@ template<>
 char *
 PageAllocatorBase<PreReservedVirtualAllocWrapper>::AllocPages(uint pageCount, PageSegmentBase<PreReservedVirtualAllocWrapper> ** pageSegment)
 {
-    Assert(virtualAllocator);
+    Assert(IsPreReservedPageAllocator());
     return AllocPagesInternal<true /* noPageAligned */>(pageCount, pageSegment);
 }
 
@@ -2304,7 +2338,7 @@ HeapPageAllocator<T>::HeapPageAllocator(AllocationPolicyManager * policyManager,
         processHandle),
     allocXdata(allocXdata)
 {
-    this->virtualAllocator = virtualAllocator;
+    this->InitVirtualAllocator(virtualAllocator);
 }
 
 template<typename T>
@@ -2342,7 +2376,7 @@ HeapPageAllocator<T>::DecommitPages(__in char* address, size_t pageCount = 1)
 {
     Assert(pageCount <= MAXUINT32);
 #pragma prefast(suppress:__WARNING_WIN32UNRELEASEDVADS, "The remainder of the clean-up is done later.");
-    this->virtualAllocator->Free(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
+    this->GetVirtualAllocator()->Free(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT);
     this->LogFreePages(pageCount);
     this->LogDecommitPages(pageCount);
 }

+ 6 - 2
lib/Common/Memory/PageAllocator.h

@@ -442,8 +442,8 @@ public:
     uint GetMaxAllocPageCount();
 
     //VirtualAllocator APIs
-    TVirtualAlloc * GetVirtualAllocator() { return virtualAllocator; }
-    bool IsPreReservedPageAllocator() { return virtualAllocator != nullptr; }
+    TVirtualAlloc * GetVirtualAllocator() { Assert(virtualAllocator != nullptr); return virtualAllocator; }
+    bool IsPreReservedPageAllocator();
 
 
     PageAllocation * AllocPagesForBytes(DECLSPEC_GUARD_OVERFLOW size_t requestedBytes);
@@ -517,6 +517,8 @@ public:
     char16 const * debugName;
 #endif
 protected:
+    void InitVirtualAllocator(TVirtualAlloc * virtualAllocator);
+
     SegmentBase<TVirtualAlloc> * AllocSegment(DECLSPEC_GUARD_OVERFLOW size_t pageCount);
     void ReleaseSegment(SegmentBase<TVirtualAlloc> * segment);
 
@@ -596,7 +598,9 @@ protected:
     bool disableAllocationOutOfMemory;
     bool excludeGuardPages;
     AllocationPolicyManager * policyManager;
+private:
     TVirtualAlloc * virtualAllocator;
+protected:
 
 #ifndef JD_PRIVATE
     Js::ConfigFlagsTable& pageAllocatorFlagTable;

+ 10 - 2
lib/Common/Memory/SmallFinalizableHeapBlock.h

@@ -23,7 +23,11 @@ public:
     static SmallFinalizableHeapBlockT * New(HeapBucketT<SmallFinalizableHeapBlockT> * bucket);
     static void Delete(SmallFinalizableHeapBlockT * block);
 
-    SmallFinalizableHeapBlockT * GetNextBlock() const { return SmallHeapBlockT<TBlockAttributes>::GetNextBlock()->template AsFinalizableBlock<TBlockAttributes>(); }
+    SmallFinalizableHeapBlockT * GetNextBlock() const
+    {
+        HeapBlock* block = SmallHeapBlockT<TBlockAttributes>::GetNextBlock();
+        return block ? block->template AsFinalizableBlock<TBlockAttributes>() : nullptr;
+    }
     void SetNextBlock(SmallFinalizableHeapBlockT * next) { Base::SetNextBlock(next); }
 
     void ProcessMarkedObject(void* candidate, MarkContext * markContext);
@@ -140,7 +144,11 @@ public:
     static SmallFinalizableWithBarrierHeapBlockT * New(HeapBucketT<SmallFinalizableWithBarrierHeapBlockT> * bucket);
     static void Delete(SmallFinalizableWithBarrierHeapBlockT * block);
 
-    SmallFinalizableWithBarrierHeapBlockT * GetNextBlock() const { return ((SmallHeapBlock*) this)->GetNextBlock()->AsFinalizableWriteBarrierBlock<TBlockAttributes>(); }
+    SmallFinalizableWithBarrierHeapBlockT * GetNextBlock() const
+    {
+        HeapBlock* block = SmallHeapBlockT<TBlockAttributes>::GetNextBlock();
+        return block ? block->template AsFinalizableWriteBarrierBlock<TBlockAttributes>() : nullptr;
+    }
 
     virtual bool FindHeapObject(void* objectAddress, Recycler * recycler, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject) override sealed
     {

+ 5 - 1
lib/Common/Memory/SmallLeafHeapBlock.h

@@ -15,7 +15,11 @@ public:
     static const ObjectInfoBits RequiredAttributes = LeafBit;
     typedef TBlockAttributes HeapBlockAttributes;
 
-    SmallLeafHeapBlockT * GetNextBlock() const { return Base::GetNextBlock()->template AsLeafBlock<TBlockAttributes>(); }
+    SmallLeafHeapBlockT * GetNextBlock() const
+    {
+        HeapBlock* block = Base::GetNextBlock();
+        return block ? block->template AsLeafBlock<TBlockAttributes>() : nullptr;
+    }
     void SetNextBlock(SmallLeafHeapBlockT * next) { Base::SetNextBlock(next); }
 
     void ScanNewImplicitRoots(Recycler * recycler);

+ 10 - 2
lib/Common/Memory/SmallNormalHeapBlock.h

@@ -21,7 +21,11 @@ public:
     static SmallNormalHeapBlockT * New(HeapBucketT<SmallNormalHeapBlockT> * bucket);
     static void Delete(SmallNormalHeapBlockT * block);
 
-    SmallNormalHeapBlockT * GetNextBlock() const { return Base::GetNextBlock()->template AsNormalBlock<TBlockAttributes>(); }
+    SmallNormalHeapBlockT * GetNextBlock() const
+    {
+        HeapBlock* block = Base::GetNextBlock();
+        return block ? block->template AsNormalBlock<TBlockAttributes>() : nullptr;
+    }
     void SetNextBlock(SmallNormalHeapBlockT * next) { Base::SetNextBlock(next); }
 
     void ScanInitialImplicitRoots(Recycler * recycler);
@@ -66,7 +70,11 @@ public:
     static SmallNormalWithBarrierHeapBlockT * New(HeapBucketT<SmallNormalWithBarrierHeapBlockT> * bucket);
     static void Delete(SmallNormalWithBarrierHeapBlockT * heapBlock);
 
-    SmallNormalWithBarrierHeapBlockT * GetNextBlock() const { return ((SmallHeapBlock*) this)->GetNextBlock()->AsNormalWriteBarrierBlock<TBlockAttributes>(); }
+    SmallNormalWithBarrierHeapBlockT * GetNextBlock() const
+    {
+        HeapBlock* block = SmallHeapBlockT<TBlockAttributes>::GetNextBlock();
+        return block ? block->template AsNormalWriteBarrierBlock<TBlockAttributes>() : nullptr;
+    }
 
     virtual bool FindHeapObject(void* objectAddress, Recycler * recycler, FindHeapObjectFlags flags, RecyclerHeapObjectInfo& heapObject) override sealed
     {

+ 8 - 13
lib/Common/Memory/VirtualAllocWrapper.cpp

@@ -8,15 +8,16 @@
 * class VirtualAllocWrapper
 */
 
+VirtualAllocWrapper VirtualAllocWrapper::Instance;  // single instance
+
 LPVOID VirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation, HANDLE process)
 {
-    Assert(this == nullptr);
     LPVOID address = nullptr;
 
 #if defined(ENABLE_JIT_CLAMP)
     bool makeExecutable;
 
-    if ((isCustomHeapAllocation) || 
+    if ((isCustomHeapAllocation) ||
         (protectFlags & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE)))
     {
         makeExecutable = true;
@@ -82,7 +83,6 @@ LPVOID VirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocat
 
 BOOL VirtualAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType, HANDLE process)
 {
-    Assert(this == nullptr);
     AnalysisAssert(dwFreeType == MEM_RELEASE || dwFreeType == MEM_DECOMMIT);
     size_t bytes = (dwFreeType == MEM_RELEASE)? 0 : dwSize;
 #pragma warning(suppress: 28160) // Calling VirtualFreeEx without the MEM_RELEASE flag frees memory but not address descriptors (VADs)
@@ -111,7 +111,6 @@ PreReservedVirtualAllocWrapper::PreReservedVirtualAllocWrapper(HANDLE process) :
 
 PreReservedVirtualAllocWrapper::~PreReservedVirtualAllocWrapper()
 {
-    Assert(this);
     if (IsPreReservedRegionPresent())
     {
         BOOL success = VirtualFreeEx(processHandle, preReservedStartAddress, 0, MEM_RELEASE);
@@ -132,14 +131,13 @@ PreReservedVirtualAllocWrapper::~PreReservedVirtualAllocWrapper()
 bool
 PreReservedVirtualAllocWrapper::IsPreReservedRegionPresent()
 {
-    Assert(this);
     return preReservedStartAddress != nullptr;
 }
 
 bool
 PreReservedVirtualAllocWrapper::IsInRange(void * address)
 {
-    if (this == nullptr || !this->IsPreReservedRegionPresent())
+    if (!this->IsPreReservedRegionPresent())
     {
         return false;
     }
@@ -180,7 +178,6 @@ PreReservedVirtualAllocWrapper::IsInRange(void * regionStart, void * address)
 LPVOID
 PreReservedVirtualAllocWrapper::GetPreReservedStartAddress()
 {
-    Assert(this);
     return preReservedStartAddress;
 }
 
@@ -237,7 +234,7 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
 #if !_M_X64_OR_ARM64
 #if _M_IX86
     // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
-   
+
     // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
     // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
     // It doesn't affect functionally, and it should be OK if we exceed.
@@ -266,7 +263,7 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
 #endif
     }
 #endif
-    
+
 
     return startAddress;
 }
@@ -281,7 +278,6 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
 LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation, HANDLE process)
 {
     Assert(process == this->processHandle);
-    Assert(this);
     AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
     AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?");
     Assert(dwSize != 0);
@@ -433,7 +429,6 @@ BOOL
 PreReservedVirtualAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType, HANDLE process)
 {
     Assert(process == this->processHandle);
-    Assert(this);
     {
         AutoCriticalSection autocs(&this->cs);
 
@@ -551,7 +546,7 @@ AutoEnableDynamicCodeGen::AutoEnableDynamicCodeGen(bool enable) : enabled(false)
     //      really does not allow thread opt-out, then the call below will fail
     //      benignly.
     //
-    
+
     if ((processPolicy.ProhibitDynamicCode == 0) || (processPolicy.AllowThreadOptOut == 0))
     {
         return;
@@ -562,7 +557,7 @@ AutoEnableDynamicCodeGen::AutoEnableDynamicCodeGen(bool enable) : enabled(false)
         return;
     }
 
-    // 
+    //
     // If dynamic code is already allowed for this thread, then don't attempt to allow it again.
     //
 

+ 4 - 0
lib/Common/Memory/VirtualAllocWrapper.h

@@ -18,6 +18,10 @@ class VirtualAllocWrapper
 public:
     LPVOID  Alloc(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation = false, HANDLE process = GetCurrentProcess());
     BOOL    Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType, HANDLE process = GetCurrentProcess());
+
+    static VirtualAllocWrapper Instance;  // single instance
+private:
+    VirtualAllocWrapper() {}
 };
 
 /*

+ 2 - 2
lib/Parser/Alloc.cpp

@@ -10,7 +10,7 @@
 #define DEBUG_TRASHMEM
 #endif //DEBUG
 
-#if _WIN64
+#if TARGET_64
 struct __ALIGN_FOO__ {
     int w1;
     double dbl;
@@ -19,7 +19,7 @@ struct __ALIGN_FOO__ {
 #else
 // Force check for 4 byte alignment to support Win98/ME
 #define ALIGN_FULL 4
-#endif // _WIN64
+#endif  // TARGET_64
 
 #define AlignFull(VALUE) (~(~((VALUE) + (ALIGN_FULL-1)) | (ALIGN_FULL-1)))
 

+ 1 - 1
lib/Runtime/Base/FunctionBody.cpp

@@ -69,7 +69,7 @@ namespace Js
 
     // FunctionProxy methods
     FunctionProxy::FunctionProxy(JavascriptMethod entryPoint, Attributes attributes, LocalFunctionId functionId, ScriptContext* scriptContext, Utf8SourceInfo* utf8SourceInfo, uint functionNumber):
-        FunctionInfo(entryPoint, attributes, functionId, (FunctionBody*) this),
+        FunctionInfo(entryPoint, attributes, functionId, this),
         m_isTopLevel(false),
         m_isPublicLibraryCode(false),
         m_scriptContext(scriptContext),

+ 1 - 1
lib/Runtime/Base/FunctionInfo.cpp

@@ -6,7 +6,7 @@
 
 namespace Js
 {
-    FunctionInfo::FunctionInfo(JavascriptMethod entryPoint, Attributes attributes, LocalFunctionId functionId, FunctionBody* functionBodyImpl)
+    FunctionInfo::FunctionInfo(JavascriptMethod entryPoint, Attributes attributes, LocalFunctionId functionId, FunctionProxy* functionBodyImpl)
         : originalEntryPoint(entryPoint), attributes(attributes), functionBodyImpl(functionBodyImpl), functionId(functionId)
     {
 #if !DYNAMIC_INTERPRETER_THUNK

+ 2 - 2
lib/Runtime/Base/FunctionInfo.h

@@ -22,7 +22,7 @@ namespace Js
             ErrorOnNew                     = 0x00001,
             SkipDefaultNewObject           = 0x00002,
             DoNotProfile                   = 0x00004,
-            HasNoSideEffect                = 0x00008, // calling function doesnt cause an implicit flags to be set,
+            HasNoSideEffect                = 0x00008, // calling function doesn't cause an implicit flags to be set,
                                                       // the callee will detect and set implicit flags on its individual operations
             NeedCrossSiteSecurityCheck     = 0x00010,
             DeferredDeserialize            = 0x00020, // The function represents something that needs to be deserialized on use
@@ -39,7 +39,7 @@ namespace Js
             Module                         = 0x20000, // The function is the function body wrapper for a module
             EnclosedByGlobalFunc           = 0x40000,
         };
-        FunctionInfo(JavascriptMethod entryPoint, Attributes attributes = None, LocalFunctionId functionId = Js::Constants::NoFunctionId, FunctionBody* functionBodyImpl = NULL);
+        FunctionInfo(JavascriptMethod entryPoint, Attributes attributes = None, LocalFunctionId functionId = Js::Constants::NoFunctionId, FunctionProxy* functionBodyImpl = NULL);
 
         static DWORD GetFunctionBodyImplOffset() { return offsetof(FunctionInfo, functionBodyImpl); }
         static DWORD GetAttributesOffset() { return offsetof(FunctionInfo, attributes); }

+ 1 - 1
lib/Runtime/Library/JavascriptExternalFunction.h

@@ -8,7 +8,7 @@ typedef HRESULT(__cdecl *InitializeMethod)(Js::Var instance);
 
 namespace Js
 {
-    typedef Var (__stdcall *StdCallJavascriptMethod)(RecyclableObject *callee, bool isConstructCall, Var *args, USHORT cargs, void *callbackState);
+    typedef Var (__stdcall *StdCallJavascriptMethod)(Var callee, bool isConstructCall, Var *args, USHORT cargs, void *callbackState);
     typedef int JavascriptTypeId;
 
     class JavascriptExternalFunction : public RuntimeFunction

+ 3 - 3
lib/Runtime/Types/PathTypeHandler.cpp

@@ -1465,7 +1465,7 @@ namespace Js
                     oldTypeToPromotedTypeMap = RecyclerNew(instance->GetRecycler(), TypeTransitionMap, instance->GetRecycler(), 2);
                     newPrototype->SetInternalProperty(Js::InternalPropertyIds::TypeOfPrototypeObjectDictionary, (Var)oldTypeToPromotedTypeMap, PropertyOperationFlags::PropertyOperation_Force, nullptr);
                 }
-                
+
                 // oldType is kind of weakReference here
                 oldTypeToPromotedTypeMap->Item(reinterpret_cast<uintptr_t>(oldType), cachedDynamicType);
 
@@ -1482,7 +1482,7 @@ namespace Js
                         Output::Print(_u("TypeSharing: Updating prototype object's DictionarySlot cache in __proto__.\n"));
 #if DBG
                     }
-#endif 
+#endif
                     Output::Flush();
                 }
 
@@ -1725,7 +1725,7 @@ namespace Js
 
         bool populateInlineCache = true;
 
-        PathTypeHandler* newTypeHandler = (PathTypeHandler*)instance->GetTypeHandler();
+        PathTypeHandlerBase* newTypeHandler = (PathTypeHandlerBase*)instance->GetTypeHandler();
 
         if (slotIndex >= newTypeHandler->typePath->GetMaxInitializedLength())
         {

+ 4 - 3
lib/Runtime/Types/TypePath.cpp

@@ -16,7 +16,7 @@ namespace Js {
     {
         Assert(size <= MaxPathTypeHandlerLength);
         size = max(size, InitialTypePathSize);
-        
+
 
         if (PHASE_OFF1(Js::TypePathDynamicSizePhase))
         {
@@ -51,8 +51,9 @@ namespace Js {
            return Constants::NoSlot;
         }
         PropertyIndex propIndex = Constants::NoSlot;
-        if (this->GetData()->map.TryGetValue(propId, &propIndex, assignments)) {
-           if (propIndex<typePathLength) {
+        if (this->GetData()->map.TryGetValue(propId, &propIndex,
+                static_cast<const PropertyRecord **>(assignments))) {
+            if (propIndex<typePathLength) {
                 return propIndex;
             }
         }

+ 11 - 6
lib/Runtime/Types/TypePath.h

@@ -9,21 +9,25 @@ namespace Js
     class TinyDictionary
     {
         static const int PowerOf2_BUCKETS = 8;
+        static const int BUCKETS_DWORDS = PowerOf2_BUCKETS / sizeof(DWORD);
         static const byte NIL = 0xff;
 
-        byte buckets[PowerOf2_BUCKETS];
+        DWORD bucketsData[BUCKETS_DWORDS];  // use DWORDs to enforce alignment
         byte next[0];
 
 public:
         TinyDictionary()
         {
-            DWORD* init = (DWORD*)buckets;
+            CompileAssert(BUCKETS_DWORDS * sizeof(DWORD) == PowerOf2_BUCKETS);
+            CompileAssert(BUCKETS_DWORDS == 2);
+            DWORD* init = bucketsData;
             init[0] = init[1] = 0xffffffff;
         }
 
         void Add(PropertyId key, byte value)
         {
-            uint32 bucketIndex = key&(PowerOf2_BUCKETS-1);
+            byte* buckets = reinterpret_cast<byte*>(bucketsData);
+            uint32 bucketIndex = key & (PowerOf2_BUCKETS - 1);
 
             byte i = buckets[bucketIndex];
             buckets[bucketIndex] = value;
@@ -34,7 +38,8 @@ public:
         template <class Data>
         inline bool TryGetValue(PropertyId key, PropertyIndex* index, const Data& data)
         {
-            uint32 bucketIndex = key&(PowerOf2_BUCKETS-1);
+            byte* buckets = reinterpret_cast<byte*>(bucketsData);
+            uint32 bucketIndex = key & (PowerOf2_BUCKETS - 1);
 
             for (byte i = buckets[bucketIndex] ; i != NIL ; i = next[i])
             {
@@ -108,9 +113,9 @@ public:
         const PropertyRecord * assignments[0];
 
 
-        TypePath() : 
+        TypePath() :
 #ifdef SUPPORT_FIXED_FIELDS_ON_PATH_TYPES
-            singletonInstance(nullptr), 
+            singletonInstance(nullptr),
 #endif
             data(nullptr)
         {