Răsfoiți Sursa

[linux] Build Chakra.Common.Memory

This change gets Chakra.Common.Memory compiling on linux. It includes
the following:
1. Disable concurrent and partial GC on non-Windows build
2. Disable stack trace collection in page heap mode by wrapping under
the existing STACK_BACK_TRACE macro
3. Added a declaration for a wrapper method GetCurrentThreadStackBounds,
which would abstract out the getting of thread stack bounds on at least
x86 base platforms. Replaced cases where we were using kernel APIs to
get the stack bounds with this API (not yet implemented)
4. Disable static valid pointer map on Linux
5. Replace string usage in valid pointer map generation code with
CH_WSTRs/wchar16
6. Added a few template specialization forward declarations

There are xplat-todos sprinkled around this change- I'll collate them
and start opening issues where it makes sense.
Hitesh Kanwathirtha 10 ani în urmă
părinte
comite
2998ff1cd8

+ 14 - 2
lib/Common/CommonDefines.h

@@ -100,8 +100,19 @@
 #define SUPPORT_FIXED_FIELDS_ON_PATH_TYPES          // *** TODO: Won't build if disabled currently
 
 // GC features
+
+// Concurrent and Partial GC are disabled on non-Windows builds
+// xplat-todo: re-enable this in the future
+// These are disabled because these GC features depend on hardware
+// write-watch support that the Windows Memory Manager provides.
+#ifdef _WIN32
 #define ENABLE_CONCURRENT_GC 1
-#define ENABLE_PARTIAL_GC 1  
+#define ENABLE_PARTIAL_GC 1
+#else
+#define ENABLE_CONCURRENT_GC 0
+#define ENABLE_PARTIAL_GC 0
+#endif
+
 #define BUCKETIZE_MEDIUM_ALLOCATIONS 1              // *** TODO: Won't build if disabled currently
 #define SMALLBLOCK_MEDIUM_ALLOC 1                   // *** TODO: Won't build if disabled currently
 #define LARGEHEAPBLOCK_ENCODING 1                   // Large heap block metadata encoding
@@ -462,8 +473,9 @@
 #define ENABLE_TRACE
 #endif
 
-#if DBG || defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT) || defined(TRACK_DISPATCH) || defined(ENABLE_TRACE) || defined(RECYCLER_PAGE_HEAP)
+// xplat-todo: Capture stack backtrace on non-win32 platforms
 #ifdef _WIN32
+#if DBG || defined(CHECK_MEMORY_LEAK) || defined(LEAK_REPORT) || defined(TRACK_DISPATCH) || defined(ENABLE_TRACE) || defined(RECYCLER_PAGE_HEAP)
 #define STACK_BACK_TRACE
 #endif
 #endif

+ 15 - 3
lib/Common/CommonPal.h

@@ -29,7 +29,7 @@ typedef wchar_t wchar16;
 
 #else // !_WIN32
 
-#include "inc/pal.h"
+#include "pal.h"
 #include "inc/rt/palrt.h"
 #include "inc/rt/no_sal2.h"
 
@@ -191,8 +191,6 @@ PALIMPORT VOID PALAPI InitializeSListHead(IN OUT PSLIST_HEADER ListHead);
 PALIMPORT PSLIST_ENTRY PALAPI InterlockedPushEntrySList(IN OUT PSLIST_HEADER ListHead, IN OUT PSLIST_ENTRY  ListEntry);
 PALIMPORT PSLIST_ENTRY PALAPI InterlockedPopEntrySList(IN OUT PSLIST_HEADER ListHead);
 
-#define WRITE_WATCH_FLAG_RESET 1
-
 // xplat-todo: implement these for JIT and Concurrent/Partial GC
 uintptr_t _beginthreadex(
    void *security,
@@ -207,6 +205,12 @@ BOOL WINAPI GetModuleHandleEx(
   _Out_    HMODULE *phModule
 );
 
+// xplat-todo: implement this function to get the stack bounds of the current
+// thread
+// For Linux, we could use pthread_getattr_np to get the stack limit (end)
+// and then use the stack size to calculate the stack base
+int GetCurrentThreadStackBounds(char** stackBase, char** stackEnd);
+
 // xplat-todo: cryptographically secure PRNG?
 errno_t rand_s(unsigned int* randomValue);
 
@@ -239,3 +243,11 @@ errno_t rand_s(unsigned int* randomValue);
 #else
 #define _NOEXCEPT noexcept
 #endif
+
+// xplat-todo: can we get rid of this for clang?
+// Including xmmintrin.h right now creates a ton of
+// compile errors, so temporarily defining this for clang
+// to avoid including that header
+#ifndef _MSC_VER
+#define _MM_HINT_T0 3
+#endif

+ 4 - 1
lib/Common/Exceptions/Throw.cpp

@@ -60,7 +60,7 @@ extern "C"{
 }
 
 namespace Js {
-#ifdef GENERATE_DUMP
+#if defined(GENERATE_DUMP) && defined(STACK_BACK_TRACE)
     StackBackTrace * Throw::stackBackTrace = nullptr;
 #endif
     void Throw::FatalInternalError()
@@ -245,8 +245,11 @@ namespace Js {
     void Throw::LogAssert()
     {
         IsInAssert = true;
+
+#ifdef STACK_BACK_TRACE
         // This should be the last thing to happen in the process. Therefore, leaks are not an issue.
         stackBackTrace = StackBackTrace::Capture(&NoCheckHeapAllocator::Instance, Throw::StackToSkip, Throw::StackTraceDepth);
+#endif
     }
 
 #ifdef ENABLE_DEBUG_CONFIG_OPTIONS

+ 5 - 0
lib/Common/Exceptions/Throw.h

@@ -4,7 +4,9 @@
 //-------------------------------------------------------------------------------------------------------
 #pragma once
 
+#ifdef STACK_BACK_TRACE
 class StackBackTrace;
+#endif
 
 namespace Js {
 
@@ -29,9 +31,12 @@ namespace Js {
         static void GenerateDumpForAssert(LPCWSTR filePath);
     private:
         static CriticalSection csGenerateDump;
+#ifdef STACK_BACK_TRACE
         __declspec(thread) static  StackBackTrace * stackBackTrace;
+        
         static const int StackToSkip = 2;
         static const int StackTraceDepth = 40;
+#endif
 #endif
     };
 

+ 1 - 1
lib/Common/Memory/ArenaAllocator.h

@@ -158,7 +158,7 @@ public:
     static const bool FakeZeroLengthArray = true;
     static const size_t MaxSmallObjectSize = 1024;
 
-    ArenaAllocatorBase(__in LPCWSTR name, PageAllocator * pageAllocator, void (*outOfMemoryFunc)(), void (*recoverMemoryFunc)() = JsUtil::ExternalApi::RecoverUnusedMemory);
+    ArenaAllocatorBase(__in wchar16 const* name, PageAllocator * pageAllocator, void (*outOfMemoryFunc)(), void (*recoverMemoryFunc)() = JsUtil::ExternalApi::RecoverUnusedMemory);
     ~ArenaAllocatorBase();
 
     void Reset()

+ 8 - 6
lib/Common/Memory/AutoAllocatorObjectPtr.h

@@ -15,7 +15,7 @@ private:
     AllocatorType* m_allocator;
 
 public:
-    AutoAllocatorObjectPtr(T* ptr, AllocatorType* allocator) : BasePtr(ptr), m_allocator(allocator)
+    AutoAllocatorObjectPtr(T* ptr, AllocatorType* allocator) : BasePtr<T>(ptr), m_allocator(allocator)
     {
         Assert(allocator);
     }
@@ -28,10 +28,10 @@ public:
 private:
     void Clear()
     {
-        if (ptr != nullptr)
+        if (this->ptr != nullptr)
         {
-            DeleteObject<TAllocator>(m_allocator, ptr);
-            ptr = nullptr;
+            DeleteObject<TAllocator>(m_allocator, this->ptr);
+            this->ptr = nullptr;
         }
     }
 };
@@ -77,11 +77,13 @@ private:
 //      TAllocator      The allocator type used to allocate/free the objects.
 //      ArrayAllocator  The allocator type used to allocate/free the array.
 //
-template <typename T, typename TAllocator, typename ArrayAllocator = ForceNonLeafAllocator<TAllocator>::AllocatorType>
+template <typename T, typename TAllocator, typename ArrayAllocator = typename ForceNonLeafAllocator<TAllocator>::AllocatorType>
 class AutoAllocatorObjectArrayPtr : public AutoAllocatorArrayPtr<T*, ArrayAllocator>
 {
+    typedef AutoAllocatorArrayPtr<T*, ArrayAllocator> Base;
+    
 public:
-    AutoAllocatorObjectArrayPtr(T** ptr, size_t elementCount, AllocatorType* allocator) :
+    AutoAllocatorObjectArrayPtr(T** ptr, size_t elementCount, typename Base::AllocatorType* allocator) :
         AutoAllocatorArrayPtr(ptr, elementCount, allocator)
     {
     }

+ 14 - 25
lib/Common/Memory/CMakeLists.txt

@@ -1,45 +1,37 @@
 add_library (Chakra.Common.Memory
+    # xplat-todo: Include platform\XDataAllocator.cpp
+    # Needed on windows, need a replacement for linux to do
+    # amd64 stack walking
     Allocator.cpp
     ArenaAllocator.cpp
+
+    # xplat-todo: This is needed for allocating jitted code memory
+    # CustomHeap.cpp
+
     CommonMemoryPch.cpp
     EtwMemoryTracking.cpp
     ForcedMemoryConstraints.cpp
     HeapAllocator.cpp
     HeapAllocatorOperators.cpp
-
-    # xplat-todo: Fix me
-    #HeapBlock.cpp
-
+    HeapBlock.cpp
     HeapBlockMap.cpp
     HeapBucket.cpp
-
-    # xplat-todo: Fix me
-    #HeapInfo.cpp
-
+    HeapInfo.cpp
     IdleDecommitPageAllocator.cpp
-
-    # xplat-todo: Fix me
-    #LargeHeapBlock.cpp
-
+    LargeHeapBlock.cpp
     LargeHeapBucket.cpp
     LeakReport.cpp
     MarkContext.cpp
     MemoryLogger.cpp
     MemoryTracking.cpp
     PageAllocator.cpp
-
-    # xplat-todo: Fix me
-    #Recycler.cpp
-
+    Recycler.cpp
     RecyclerHeuristic.cpp
     RecyclerObjectDumper.cpp
     RecyclerObjectGraphDumper.cpp
     RecyclerPageAllocator.cpp
-    RecyclerSweep.cpp
-
-    # xplat-todo: Fix me
-    #RecyclerWriteBarrierManager.cpp
-
+    RecyclerSweep.cpp    
+    RecyclerWriteBarrierManager.cpp
     SmallFinalizableHeapBlock.cpp
     SmallFinalizableHeapBucket.cpp
     SmallHeapBlockAllocator.cpp
@@ -47,10 +39,7 @@ add_library (Chakra.Common.Memory
     SmallLeafHeapBucket.cpp
     SmallNormalHeapBlock.cpp
     SmallNormalHeapBucket.cpp
-
-    # xplat-todo: Are all the APIs used here ported?
-    # StressTest.cpp
-
+    StressTest.cpp
     VirtualAllocWrapper.cpp
     )
 

+ 2 - 0
lib/Common/Memory/EtwMemoryTracking.cpp

@@ -4,6 +4,8 @@
 //-------------------------------------------------------------------------------------------------------
 #include "CommonMemoryPch.h"
 
+// xplat-todo: Need to figure out equivalent method for allocation tracing
+// on platforms other than Windows
 #ifdef ETW_MEMORY_TRACKING
 #include "microsoft-scripting-jscript9.internalevents.h"
 

+ 12 - 2
lib/Common/Memory/HeapBlock.cpp

@@ -54,6 +54,7 @@ HeapBlock::SetNeedOOMRescan(Recycler * recycler)
     recycler->SetNeedOOMRescan();
 }
 
+#ifdef STACK_BACK_TRACE
 #ifdef RECYCLER_PAGE_HEAP
 void
 HeapBlock::CapturePageHeapAllocStack()
@@ -97,6 +98,7 @@ HeapBlock::CapturePageHeapFreeStack()
     }
 }
 #endif
+#endif
 
 //========================================================================================================
 // SmallHeapBlock
@@ -178,6 +180,7 @@ SmallHeapBlockT<TBlockAttributes>::~SmallHeapBlockT()
 #endif
 
 #ifdef RECYCLER_PAGE_HEAP
+#ifdef STACK_BACK_TRACE
     if (this->pageHeapAllocStack != nullptr)
     {
         this->pageHeapAllocStack->Delete(&NoCheckHeapAllocator::Instance);
@@ -192,6 +195,7 @@ SmallHeapBlockT<TBlockAttributes>::~SmallHeapBlockT()
         this->pageHeapFreeStack = nullptr;
     }
 #endif
+#endif
 }
 
 template <class TBlockAttributes>
@@ -319,6 +323,7 @@ SmallHeapBlockT<TBlockAttributes>::Init(ushort objectSize, ushort objectCount)
     Assert(!this->isIntegratedBlock);
 
 #ifdef RECYCLER_PAGE_HEAP
+#ifdef STACK_BACK_TRACE
     if (this->pageHeapAllocStack != nullptr)
     {
         this->pageHeapAllocStack->Delete(&NoCheckHeapAllocator::Instance);
@@ -331,7 +336,8 @@ SmallHeapBlockT<TBlockAttributes>::Init(ushort objectSize, ushort objectCount)
     {
         this->pageHeapFreeStack->Delete(&NoCheckHeapAllocator::Instance);
         this->pageHeapFreeStack = nullptr;
-    }
+    }    
+#endif
 #endif
 }
 
@@ -637,6 +643,7 @@ SmallHeapBlockT<TBlockAttributes>::Reset()
 #endif
 
 #ifdef RECYCLER_PAGE_HEAP
+#ifdef STACK_BACK_TRACE
     if (this->pageHeapFreeStack != nullptr)
     {
         this->pageHeapFreeStack->Delete(&NoCheckHeapAllocator::Instance);
@@ -648,7 +655,7 @@ SmallHeapBlockT<TBlockAttributes>::Reset()
         this->pageHeapAllocStack->Delete(&NoCheckHeapAllocator::Instance);
         this->pageHeapAllocStack = nullptr;
     }
-
+#endif
 #endif
 
     // There is no page associated with this heap block,
@@ -2225,9 +2232,12 @@ SmallHeapBlockT<TBlockAttributes>::IsWithBarrier() const
 }
 #endif
 
+namespace Memory
+{
 // Instantiate the template
 template class SmallHeapBlockT<SmallAllocationBlockAttributes>;
 template class SmallHeapBlockT<MediumAllocationBlockAttributes>;
+};
 
 #define TBlockTypeAttributes SmallAllocationBlockAttributes
 #include "SmallBlockDeclarations.inl"

+ 16 - 1
lib/Common/Memory/HeapBlock.h

@@ -285,14 +285,19 @@ protected:
     PageHeapMode pageHeapMode;
     DWORD guardPageOldProtectFlags;
     char* guardPageAddress;
+
+#ifdef STACK_BACK_TRACE
     StackBackTrace* pageHeapAllocStack;
     StackBackTrace* pageHeapFreeStack;
-
+#endif
+    
 public:
     __inline bool InPageHeapMode() const { return pageHeapMode != PageHeapMode::PageHeapModeOff; }
+#ifdef STACK_BACK_TRACE
     void CapturePageHeapAllocStack();
     void CapturePageHeapFreeStack();
 #endif
+#endif
 
 public:
     template <typename Fn>
@@ -303,7 +308,9 @@ public:
         heapBlockType(heapBlockType),
         needOOMRescan(false)
 #ifdef RECYCLER_PAGE_HEAP
+#ifdef STACK_BACK_TRACE
         , pageHeapAllocStack(nullptr), pageHeapFreeStack(nullptr)
+#endif
 #endif
     {
         Assert(GetHeapBlockType() <= HeapBlock::HeapBlockType::BlockTypeCount);
@@ -720,6 +727,14 @@ private:
 #endif
 };
 
+// Forward declare specializations
+template<>
+SmallHeapBlockT<MediumAllocationBlockAttributes>::SmallHeapBlockT(HeapBucket * bucket, ushort objectSize, ushort objectCount, HeapBlockType heapBlockType);
+
+template <>
+uint
+SmallHeapBlockT<MediumAllocationBlockAttributes>::GetObjectBitDeltaForBucketIndex(uint bucketIndex);
+
 // Declare the class templates
 typedef SmallHeapBlockT<SmallAllocationBlockAttributes>  SmallHeapBlock;
 typedef SmallHeapBlockT<MediumAllocationBlockAttributes> MediumHeapBlock;

+ 67 - 63
lib/Common/Memory/HeapInfo.cpp

@@ -3,16 +3,17 @@
 // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
 //-------------------------------------------------------------------------------------------------------
 #include "CommonMemoryPch.h"
-#include "Memory\PageHeapBlockTypeFilter.h"
+#include "Memory/PageHeapBlockTypeFilter.h"
 #if defined(_M_IX86_OR_ARM32)
-#include "ValidPointersMap\vpm.32b.h"
+#include "ValidPointersMap/vpm.32b.h"
 #elif defined(_M_X64_OR_ARM64)
-#include "ValidPointersMap\vpm.64b.h"
+#include "ValidPointersMap/vpm.64b.h"
 #else
 #error "Platform is not handled"
 #endif
 
-template __forceinline char* HeapInfo::RealAlloc<NoBit, false>(Recycler * recycler, size_t sizeCat);
+template <>
+__forceinline char* HeapInfo::RealAlloc<NoBit, false>(Recycler * recycler, size_t sizeCat);
 
 HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>  HeapInfo::smallAllocValidPointersMap;
 HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes> HeapInfo::mediumAllocValidPointersMap;
@@ -82,7 +83,7 @@ void HeapInfo::ValidPointersMap<TBlockAttributes>::GenerateValidPointersMap(Vali
         ushort * validPointers = buffer;
         buffer += TBlockAttributes::MaxSmallObjectCount;
 
-        SmallHeapBlockT<TBlockAttributes>::SmallHeapBlockBitVector * invalidBitVector = &invalidTable[i];
+        typename SmallHeapBlockT<TBlockAttributes>::SmallHeapBlockBitVector * invalidBitVector = &invalidTable[i];
         invalidBitVector->SetAll();
 
         uint bucketSize;
@@ -153,74 +154,74 @@ HRESULT HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::GenerateVali
     }
     GenerateValidPointersMap(*valid, *invalid, *blockMap);
 
-    IfErrorGotoCleanup(fwprintf(file, L"const ushort HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::validPointersBuffer[HeapConstants::BucketCount][HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::rowSize] = \n{\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("const ushort HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::validPointersBuffer[HeapConstants::BucketCount][HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::rowSize] = \n{\n")));
     // Generate the full buffer.
     for (unsigned i = 0; i < HeapConstants::BucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n        "));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n        ")));
         for (unsigned j = 0; j < rowSize; ++j)
         {
             IfErrorGotoCleanup(fwprintf(
                 file,
-                (j < rowSize - 1) ? L"0x%04hX, " : L"0x%04hX",
+                (j < rowSize - 1) ? CH_WSTR("0x%04hX, ") : CH_WSTR("0x%04hX"),
                 (*valid)[i][j]));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? L"\n    },\n" : L"\n    }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n    }\n"))));
     }
-    IfErrorGotoCleanup(fwprintf(file, L"};\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("};\n")));
 
     // Generate the invalid bitvectors.
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"const BVUnit HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsData[HeapConstants::BucketCount][SmallHeapBlockT<SmallAllocationBlockAttributes>::SmallHeapBlockBitVector::wordCount] = {\n"));
+        CH_WSTR("const BVUnit HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsData[HeapConstants::BucketCount][SmallHeapBlockT<SmallAllocationBlockAttributes>::SmallHeapBlockBitVector::wordCount] = {\n")));
     for (unsigned i = 0; i < HeapConstants::BucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n        "));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n        ")));
 
         for (unsigned j = 0; j < (*invalid)[i].wordCount; ++j)
         {
-            const wchar_t *format = (j < (*invalid)[i].wordCount - 1) ?
+            const wchar16 *format = (j < (*invalid)[i].wordCount - 1) ?
 #if defined(_M_IX86_OR_ARM32)
-                L"0x%08X, " : L"0x%08X"
+                CH_WSTR("0x%08X, ") : CH_WSTR("0x%08X")
 #elif defined(_M_X64_OR_ARM64)
-                L"0x%016I64X, " : L"0x%016I64X"
+                CH_WSTR("0x%016I64X, ") : CH_WSTR("0x%016I64X")
 #else
 #error "Platform is not handled"
 #endif
                 ;
             IfErrorGotoCleanup(fwprintf(file, format, (*invalid)[i].GetRawData()[j]));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? L"\n    },\n" : L"\n    }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n    }\n"))));
     }
 
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"};\n"
-        L"// The following is used to construct the InvalidBitsTable statically without forcing BVStatic to be an aggregate\n"
-        L"const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::InvalidBitsTable * const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsBuffers =\n"
-        L"    reinterpret_cast<const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::InvalidBitsTable *>(&HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsData);\n"));
+        CH_WSTR("};\n")
+        CH_WSTR("// The following is used to construct the InvalidBitsTable statically without forcing BVStatic to be an aggregate\n")
+        CH_WSTR("const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::InvalidBitsTable * const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsBuffers =\n")
+        CH_WSTR("    reinterpret_cast<const HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::InvalidBitsTable *>(&HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::invalidBitsData);\n")));
 
     // Generate the block map table
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"const SmallHeapBlockT<SmallAllocationBlockAttributes>::BlockInfo  HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::blockInfoBuffer[SmallAllocationBlockAttributes::BucketCount][SmallAllocationBlockAttributes::PageCount] = {\n"));
+        CH_WSTR("const SmallHeapBlockT<SmallAllocationBlockAttributes>::BlockInfo  HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::blockInfoBuffer[SmallAllocationBlockAttributes::BucketCount][SmallAllocationBlockAttributes::PageCount] = {\n")));
     for (unsigned i = 0; i < HeapConstants::BucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    // Bucket: %u, Size: %d\n", i, (int) (HeapConstants::ObjectGranularity + (i * SmallAllocationBlockAttributes::BucketGranularity))));
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n"));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    // Bucket: %u, Size: %d\n"), i, (int) (HeapConstants::ObjectGranularity + (i * SmallAllocationBlockAttributes::BucketGranularity))));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n")));
 
         for (unsigned j = 0; j < SmallAllocationBlockAttributes::PageCount; ++j)
         {
-            IfErrorGotoCleanup(fwprintf(file, L"        { "));
+            IfErrorGotoCleanup(fwprintf(file, CH_WSTR("        { ")));
 
-            const wchar_t *format = L"0x%04hX, 0x%04hX";
+            const wchar16 *format = CH_WSTR("0x%04hX, 0x%04hX");
             IfErrorGotoCleanup(fwprintf(file, format, (*blockMap)[i][j].lastObjectIndexOnPage, (*blockMap)[i][j].pageObjectCount));
-            IfErrorGotoCleanup(fwprintf(file, (j < SmallAllocationBlockAttributes::PageCount - 1 ? L" },\n" : L" }\n")));
+            IfErrorGotoCleanup(fwprintf(file, (j < SmallAllocationBlockAttributes::PageCount - 1 ? CH_WSTR(" },\n") : CH_WSTR(" }\n"))));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? L"\n    },\n" : L"\n        }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::BucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n        }\n"))));
     }
 
-    IfErrorGotoCleanup(fwprintf(file, L"};\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("};\n")));
 
 cleanup:
 #undef IfErrorGotoCleanup
@@ -250,74 +251,74 @@ HRESULT HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::GenerateVal
     }
     GenerateValidPointersMap(*valid, *invalid, *blockMap);
 
-    IfErrorGotoCleanup(fwprintf(file, L"const ushort HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::validPointersBuffer[MediumAllocationBlockAttributes::BucketCount][HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::rowSize] = \n{\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("const ushort HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::validPointersBuffer[MediumAllocationBlockAttributes::BucketCount][HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::rowSize] = \n{\n")));
     // Generate the full buffer.
     for (unsigned i = 0; i < HeapConstants::MediumBucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n        "));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n        ")));
         for (unsigned j = 0; j < rowSize; ++j)
         {
             IfErrorGotoCleanup(fwprintf(
                 file,
-                (j < rowSize - 1) ? L"0x%04hX, " : L"0x%04hX",
+                (j < rowSize - 1) ? CH_WSTR("0x%04hX, ") : CH_WSTR("0x%04hX"),
                 (*valid)[i][j]));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? L"\n    },\n" : L"\n    }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n    }\n"))));
     }
-    IfErrorGotoCleanup(fwprintf(file, L"};\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("};\n")));
 
     // Generate the invalid bitvectors.
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"const BVUnit HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsData[MediumAllocationBlockAttributes::BucketCount][SmallHeapBlockT<MediumAllocationBlockAttributes>::SmallHeapBlockBitVector::wordCount] = {\n"));
+        CH_WSTR("const BVUnit HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsData[MediumAllocationBlockAttributes::BucketCount][SmallHeapBlockT<MediumAllocationBlockAttributes>::SmallHeapBlockBitVector::wordCount] = {\n")));
     for (unsigned i = 0; i < HeapConstants::MediumBucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n        "));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n        ")));
 
         for (unsigned j = 0; j < (*invalid)[i].wordCount; ++j)
         {
-            const wchar_t *format = (j < (*invalid)[i].wordCount - 1) ?
+            const wchar16 *format = (j < (*invalid)[i].wordCount - 1) ?
 #if defined(_M_IX86_OR_ARM32)
-                L"0x%08X, " : L"0x%08X"
+                CH_WSTR("0x%08X, ") : CH_WSTR("0x%08X")
 #elif defined(_M_X64_OR_ARM64)
-                L"0x%016I64X, " : L"0x%016I64X"
+                CH_WSTR("0x%016I64X, ") : CH_WSTR("0x%016I64X")
 #else
 #error "Platform is not handled"
 #endif
                 ;
             IfErrorGotoCleanup(fwprintf(file, format, (*invalid)[i].GetRawData()[j]));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? L"\n    },\n" : L"\n    }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n    }\n"))));
     }
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"};\n"
-        L"// The following is used to construct the InvalidBitsTable statically without forcing BVStatic to be an aggregate\n"
-        L"const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::InvalidBitsTable * const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsBuffers =\n"
-        L"    reinterpret_cast<const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::InvalidBitsTable *>(&HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsData);\n"));
+        CH_WSTR("};\n")
+        CH_WSTR("// The following is used to construct the InvalidBitsTable statically without forcing BVStatic to be an aggregate\n")
+        CH_WSTR("const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::InvalidBitsTable * const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsBuffers =\n")
+        CH_WSTR("    reinterpret_cast<const HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::InvalidBitsTable *>(&HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::invalidBitsData);\n")));
 
     // Generate the block map table
     IfErrorGotoCleanup(fwprintf(
         file,
-        L"const SmallHeapBlockT<MediumAllocationBlockAttributes>::BlockInfo  HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::blockInfoBuffer[MediumAllocationBlockAttributes::BucketCount][MediumAllocationBlockAttributes::PageCount] = {\n"));
+        CH_WSTR("const SmallHeapBlockT<MediumAllocationBlockAttributes>::BlockInfo  HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::blockInfoBuffer[MediumAllocationBlockAttributes::BucketCount][MediumAllocationBlockAttributes::PageCount] = {\n")));
 
     for (unsigned i = 0; i < HeapConstants::MediumBucketCount; ++i)
     {
-        IfErrorGotoCleanup(fwprintf(file, L"    // Bucket: %u, Size: %d\n", i, (int)(HeapConstants::MaxSmallObjectSize + ((i + 1) * MediumAllocationBlockAttributes::BucketGranularity))));
-        IfErrorGotoCleanup(fwprintf(file, L"    {\n"));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    // Bucket: %u, Size: %d\n"), i, (int)(HeapConstants::MaxSmallObjectSize + ((i + 1) * MediumAllocationBlockAttributes::BucketGranularity))));
+        IfErrorGotoCleanup(fwprintf(file, CH_WSTR("    {\n")));
 
         for (unsigned j = 0; j < MediumAllocationBlockAttributes::PageCount; ++j)
         {
-            IfErrorGotoCleanup(fwprintf(file, L"        { "));
+            IfErrorGotoCleanup(fwprintf(file, CH_WSTR("        { ")));
 
-            const wchar_t *format = L"0x%04hX, 0x%04hX";
+            const wchar16 *format = CH_WSTR("0x%04hX, 0x%04hX");
             IfErrorGotoCleanup(fwprintf(file, format, (*blockMap)[i][j].lastObjectIndexOnPage, (*blockMap)[i][j].pageObjectCount));
-            IfErrorGotoCleanup(fwprintf(file, (j < MediumAllocationBlockAttributes::PageCount - 1 ? L" },\n" : L" }\n")));
+            IfErrorGotoCleanup(fwprintf(file, (j < MediumAllocationBlockAttributes::PageCount - 1 ? CH_WSTR(" },\n") : CH_WSTR(" }\n"))));
         }
-        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? L"\n    },\n" : L"\n        }\n")));
+        IfErrorGotoCleanup(fwprintf(file, (i < HeapConstants::MediumBucketCount - 1 ? CH_WSTR("\n    },\n") : CH_WSTR("\n        }\n"))));
     }
 
-    IfErrorGotoCleanup(fwprintf(file, L"};\n"));
+    IfErrorGotoCleanup(fwprintf(file, CH_WSTR("};\n")));
 
 cleanup:
 #undef IfErrorGotoCleanup
@@ -333,23 +334,23 @@ HRESULT HeapInfo::ValidPointersMap<TBlockAttributes>::GenerateValidPointersMapHe
     HRESULT hr = E_FAIL;
     FILE * file = nullptr;
 
-    if (_wfopen_s(&file, vpmFullPath, L"w") == 0 && file != nullptr)
+    if (_wfopen_s(&file, vpmFullPath, CH_WSTR("w")) == 0 && file != nullptr)
     {
-        const wchar_t * header =
-            L"//-------------------------------------------------------------------------------------------------------\n"
-            L"// Copyright (C) Microsoft. All rights reserved.\n"
-            L"// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.\n"
-            L"//-------------------------------------------------------------------------------------------------------\n"
-            L"// Generated via jshost -GenerateValidPointersMapHeader\n"
+        const wchar16 * header =
+            CH_WSTR("//-------------------------------------------------------------------------------------------------------\n")
+            CH_WSTR("// Copyright (C) Microsoft. All rights reserved.\n")
+            CH_WSTR("// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.\n")
+            CH_WSTR("//-------------------------------------------------------------------------------------------------------\n")
+            CH_WSTR("// Generated via jshost -GenerateValidPointersMapHeader\n")
 #if defined(_M_IX86_OR_ARM32)
-            L"// Target platforms: 32bit - x86 & arm\n"
+            CH_WSTR("// Target platforms: 32bit - x86 & arm\n")
 #elif defined(_M_X64_OR_ARM64)
-            L"// Target platform: 64bit - amd64 & arm64\n"
+            CH_WSTR("// Target platform: 64bit - amd64 & arm64\n")
 #else
 #error "Platform is not handled"
 #endif
-            L"#if USE_STATIC_VPM\n"
-            L"\n";
+            CH_WSTR("#if USE_STATIC_VPM\n")
+            CH_WSTR("\n");
         if (fwprintf(file, header) >= 0)
         {
             hr = ValidPointersMap<SmallAllocationBlockAttributes>::GenerateValidPointersMapForBlockType(file);
@@ -358,7 +359,7 @@ HRESULT HeapInfo::ValidPointersMap<TBlockAttributes>::GenerateValidPointersMapHe
                 hr = ValidPointersMap<MediumAllocationBlockAttributes>::GenerateValidPointersMapForBlockType(file);
             }
 
-            fwprintf(file, L"#endif // USE_STATIC_VPM\n");
+            fwprintf(file, CH_WSTR("#endif // USE_STATIC_VPM\n"));
         }
 
         fclose(file);
@@ -1782,8 +1783,11 @@ BOOL MediumAllocationBlockAttributes::IsAlignedObjectSize(size_t sizeCat)
     return HeapInfo::IsAlignedMediumObjectSize(sizeCat);
 }
 
+namespace Memory
+{
 template class HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>;
 template class ValidPointers<SmallAllocationBlockAttributes>;
 
 template class HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>;
 template class ValidPointers<MediumAllocationBlockAttributes>;
+};

+ 14 - 0
lib/Common/Memory/HeapInfo.h

@@ -277,7 +277,14 @@ private:
     template <typename TBlockAttributes>
     class ValidPointersMap
     {
+        // xplat-todo: fix up vpm.64b.h generation to generate correctly 
+        // templatized code
+#ifdef _WIN32
 #define USE_STATIC_VPM 1 // Disable to force generation at runtime
+#else
+#define USE_STATIC_VPM 0
+#endif
+        
     private:
         static const uint rowSize = TBlockAttributes::MaxSmallObjectCount * 2;
         typedef ushort ValidPointersMapRow[rowSize];
@@ -547,6 +554,13 @@ HeapInfo::SmallAllocatorAlloc(Recycler * recycler, SmallHeapBlockAllocatorType *
     return bucket.SnailAlloc(recycler, allocator, sizeCat, attributes, /* nothrow = */ false);
 }
 
+// Forward declaration of explicit specialization before instantiation
+template <>
+HRESULT HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::GenerateValidPointersMapForBlockType(FILE* file);
+template <>
+HRESULT HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::GenerateValidPointersMapForBlockType(FILE* file);
+
+// Template instantiation
 extern template class HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>;
 extern template class HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>;
 

+ 2 - 2
lib/Common/Memory/LargeHeapBlock.cpp

@@ -194,7 +194,7 @@ LargeHeapBlock::~LargeHeapBlock()
         "ReleasePages needs to be called before delete");
     RECYCLER_PERF_COUNTER_DEC(LargeHeapBlockCount);
 
-#ifdef RECYCLER_PAGE_HEAP
+#if defined(RECYCLER_PAGE_HEAP) && defined(STACK_BACK_TRACE)
     if (this->pageHeapAllocStack != nullptr)
     {
         this->pageHeapAllocStack->Delete(&NoCheckHeapAllocator::Instance);
@@ -461,7 +461,7 @@ LargeHeapBlock::AllocFreeListEntry(size_t size, ObjectInfoBits attributes, Large
     header->objectIndex = headerIndex;
     header->objectSize = originalSize;
     header->SetAttributes(this->heapInfo->recycler->Cookie, (attributes & StoredObjectInfoBitMask));
-    header->markOnOOMRescan = nullptr;
+    header->markOnOOMRescan = false;
     header->SetNext(this->heapInfo->recycler->Cookie, nullptr);
 
     HeaderList()[headerIndex] = header;

+ 2 - 0
lib/Common/Memory/LargeHeapBucket.cpp

@@ -234,7 +234,9 @@ LargeHeapBucket::PageHeapAlloc(Recycler * recycler, size_t size, ObjectInfoBits
     Assert(memBlock != nullptr);
     if (recycler->ShouldCapturePageHeapAllocStack())
     {
+#ifdef STACK_BACK_TRACE
         heapBlock->CapturePageHeapAllocStack();
+#endif
     }
 
     return memBlock;

+ 1 - 0
lib/Common/Memory/MarkContext.inl

@@ -2,6 +2,7 @@
 // Copyright (C) Microsoft. All rights reserved.
 // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
 //-------------------------------------------------------------------------------------------------------
+
 __inline
 bool MarkContext::AddMarkedObject(void * objectAddress, size_t objectSize)
 {

+ 19 - 7
lib/Common/Memory/Recycler.cpp

@@ -19,9 +19,9 @@
 #include "arm64.h"
 #endif
 
-#include "core\BinaryFeatureControl.h"
-#include "Common\ThreadService.h"
-#include "Memory\AutoAllocatorObjectPtr.h"
+#include "Core/BinaryFeatureControl.h"
+#include "Common/ThreadService.h"
+#include "Memory/AutoAllocatorObjectPtr.h"
 
 DEFINE_RECYCLER_TRACKER_PERF_COUNTER(RecyclerWeakReferenceBase);
 
@@ -570,6 +570,7 @@ Recycler::SetIsThreadBound()
     Assert(mainThreadHandle == nullptr);
     ::DuplicateHandle(::GetCurrentProcess(), ::GetCurrentThread(), ::GetCurrentProcess(),  &mainThreadHandle,
         0, FALSE, DUPLICATE_SAME_ACCESS);
+
     stackBase = GetStackBase();
 }
 
@@ -1093,8 +1094,10 @@ bool Recycler::ExplicitFreeInternal(void* buffer, size_t size, size_t sizeCat)
 #ifdef RECYCLER_PAGE_HEAP
     if (this->IsPageHeapEnabled() && this->ShouldCapturePageHeapFreeStack())
     {
+#ifdef STACK_BACK_TRACE
         heapBlock->CapturePageHeapFreeStack();
-
+#endif
+        
         // Don't do actual explicit free in page heap mode
         return false;
     }
@@ -1321,11 +1324,20 @@ void Recycler::TrackNativeAllocatedMemoryBlock(Recycler * recycler, void * memBl
  * FindRoots
  *------------------------------------------------------------------------------------------------*/
 
-
+// xplat-todo: Unify these two variants of GetStackBase
+#ifdef _WIN32
 static void* GetStackBase()
 {
     return ((NT_TIB *)NtCurrentTeb())->StackBase;
 }
+#else
+static void* GetStackBase()
+{
+    char *stackBase, *stackTop;
+    ::GetCurrentThreadStackBounds(&stackBase, &stackTop);
+    return (void*) stackBase;
+}
+#endif
 
 #if _M_IX86
 // REVIEW: For x86, do we care about scanning esp/ebp?
@@ -1467,7 +1479,7 @@ Recycler::ScanStack()
     SAVE_THREAD_CONTEXT();
     void * stackTop = this->savedThreadContext.GetStackTop();
 
-    void * stackStart = GetStackBase();
+    void * stackStart = GetStackBase(); 
     Assert(stackStart > stackTop);
     size_t stackScanned = (size_t)((char *)stackStart - (char *)stackTop);
 
@@ -7648,7 +7660,7 @@ Recycler::VerifyMark(void * candidate)
 #endif
 
 ArenaAllocator *
-Recycler::CreateGuestArena(wchar_t const * name, void (*outOfMemoryFunc)())
+Recycler::CreateGuestArena(wchar16 const * name, void (*outOfMemoryFunc)())
 {
     // Note, guest arenas use the large block allocator.
     return guestArenaList.PrependNode(&HeapAllocator::Instance, name, &recyclerLargeBlockPageAllocator, outOfMemoryFunc);

+ 4 - 2
lib/Common/Memory/Recycler.h

@@ -728,7 +728,7 @@ private:
 
     struct GuestArenaAllocator : public ArenaAllocator
     {
-        GuestArenaAllocator(__in LPCWSTR name, PageAllocator * pageAllocator, void (*outOfMemoryFunc)())
+        GuestArenaAllocator(__in wchar16 const*  name, PageAllocator * pageAllocator, void (*outOfMemoryFunc)())
             : ArenaAllocator(name, pageAllocator, outOfMemoryFunc), pendingDelete(false)
         {
         }
@@ -1189,7 +1189,7 @@ public:
     HeapInfo* CreateHeap();
     void DestroyHeap(HeapInfo* heapInfo);
 
-    ArenaAllocator * CreateGuestArena(wchar_t const * name, void (*outOfMemoryFunc)());
+    ArenaAllocator * CreateGuestArena(wchar16 const * name, void (*outOfMemoryFunc)());
     void DeleteGuestArena(ArenaAllocator * arenaAllocator);
 
     ArenaData ** RegisterExternalGuestArena(ArenaData* guestArena)
@@ -2080,7 +2080,9 @@ public:
         {
             Assert(recycler->IsPageHeapEnabled());
 
+#ifdef STACK_BACK_TRACE
             this->m_heapBlock->CapturePageHeapFreeStack();
+#endif
         }
 #endif
 

+ 1 - 1
lib/Common/Memory/Recycler.inl

@@ -445,7 +445,7 @@ Recycler::NotifyFree(T * heapBlock)
         this->isForceSweeping = true;
         heapBlock->isForceSweeping = true;
 #endif
-        heapBlock->SweepObjects<pageheap, SweepMode_InThread>(this);
+        heapBlock->template SweepObjects<pageheap, SweepMode_InThread>(this);
 #if DBG || defined(RECYCLER_STATS)
         heapBlock->isForceSweeping = false;
         this->isForceSweeping = false;

+ 8 - 7
lib/Common/Memory/RecyclerWriteBarrierManager.cpp

@@ -46,13 +46,19 @@ X64WriteBarrierCardTableManager::OnThreadInit()
     // We page in the card table sections for the current threads stack reservation
     // So any writes to stack allocated vars can also have the write barrier set
 
+    // xplat-dodo: Replace this on Windows too with GetCurrentThreadStackBounds
+#ifdef _WIN32
     NT_TIB* teb = (NT_TIB*) ::NtCurrentTeb();
 
     char* stackBase = (char*) teb->StackBase;
     char* stackEnd  = (char*) teb->StackLimit;
-
+#else
+    char* stackBase = nullptr;
+    char* stackEnd = nullptr;
+    ::GetCurrentThreadStackBounds(&stackBase, &stackEnd);
+#endif
+    
     size_t numPages = (stackBase - stackEnd) / AutoSystemInfo::PageSize;
-
     // stackEnd is the lower boundary
     return OnSegmentAlloc(stackEnd, numPages);
 }
@@ -225,11 +231,6 @@ X64WriteBarrierCardTableManager::Initialize()
 
         LPVOID cardTableSpace = ::VirtualAlloc(NULL, _cardTableNumEntries, MEM_RESERVE, PAGE_READWRITE);
 
-        if (cardTableSpace == nullptr)
-        {
-            return false;
-        }
-
         _cardTable = (BYTE*) cardTableSpace;
     }
 

+ 2 - 0
lib/Common/Memory/SmallHeapBlockAllocator.h

@@ -166,7 +166,9 @@ SmallHeapBlockAllocator<TBlockType>::PageHeapAlloc(Recycler * recycler, size_t s
 
         if (recycler->ShouldCapturePageHeapAllocStack())
         {
+#ifdef STACK_BACK_TRACE
             smallBlock->CapturePageHeapAllocStack();
+#endif
         }
     }