Просмотр исходного кода

[1.8>master] [MERGE #4373 @agarwal-sandeep] Code Quality: Replace usage of _M_IX86_OR_ARM32 and _M_X64_OR_ARM64 with TARGET_32 and TARGET_64

Merge pull request #4373 from agarwal-sandeep:arm64codequality
Sandeep Agarwal 8 лет назад
Родитель
Сommit
9e6f4e74dc
66 измененных файлов с 153 добавлено и 156 удалено
  1. 3 3
      CMakeLists.txt
  2. 1 1
      lib/Backend/CodeGenAllocators.cpp
  3. 1 1
      lib/Backend/CodeGenAllocators.h
  4. 1 1
      lib/Backend/EmitBuffer.cpp
  5. 1 1
      lib/Backend/Encoder.cpp
  6. 1 1
      lib/Backend/Func.cpp
  7. 2 2
      lib/Backend/Func.h
  8. 4 7
      lib/Backend/JITOutput.cpp
  9. 1 1
      lib/Backend/JITOutput.h
  10. 1 1
      lib/Backend/JITThunkEmitter.cpp
  11. 1 1
      lib/Backend/JITThunkEmitter.h
  12. 1 1
      lib/Backend/Lower.cpp
  13. 1 1
      lib/Backend/Lower.h
  14. 2 2
      lib/Backend/NativeCodeGenerator.cpp
  15. 2 2
      lib/Backend/NativeCodeGenerator.h
  16. 5 5
      lib/Backend/Opnd.cpp
  17. 1 1
      lib/Backend/Opnd.h
  18. 3 3
      lib/Backend/ServerThreadContext.cpp
  19. 2 2
      lib/Backend/ServerThreadContext.h
  20. 1 1
      lib/Common/Common/Jobs.cpp
  21. 1 1
      lib/Common/Common/NumberUtilities.cpp
  22. 1 1
      lib/Common/Common/NumberUtilities.inl
  23. 3 5
      lib/Common/CommonDefines.h
  24. 1 1
      lib/Common/DataStructures/SparseBitVector.h
  25. 3 3
      lib/Common/DataStructures/UnitBitVector.h
  26. 1 1
      lib/Common/Exceptions/ReportError.cpp
  27. 1 1
      lib/Common/Exceptions/ReportError.h
  28. 1 1
      lib/Common/Memory/ArenaAllocator.cpp
  29. 2 2
      lib/Common/Memory/ArenaAllocator.h
  30. 1 0
      lib/Common/Memory/Chakra.Common.Memory.vcxproj
  31. 1 0
      lib/Common/Memory/Chakra.Common.Memory.vcxproj.filters
  32. 1 1
      lib/Common/Memory/ForcedMemoryConstraints.cpp
  33. 8 8
      lib/Common/Memory/HeapBlockMap.cpp
  34. 4 4
      lib/Common/Memory/HeapBlockMap.h
  35. 4 4
      lib/Common/Memory/HeapBlockMap.inl
  36. 3 3
      lib/Common/Memory/HeapConstants.h
  37. 8 8
      lib/Common/Memory/HeapInfo.cpp
  38. 2 2
      lib/Common/Memory/LargeHeapBlock.h
  39. 6 6
      lib/Common/Memory/PageAllocator.cpp
  40. 5 5
      lib/Common/Memory/PageAllocator.h
  41. 2 2
      lib/Common/Memory/RecyclerWriteBarrierManager.cpp
  42. 6 6
      lib/Common/Memory/RecyclerWriteBarrierManager.h
  43. 4 4
      lib/Common/Memory/SectionAllocWrapper.cpp
  44. 5 5
      lib/Common/Memory/SectionAllocWrapper.h
  45. 4 4
      lib/Common/Memory/VirtualAllocWrapper.cpp
  46. 5 5
      lib/Common/Memory/VirtualAllocWrapper.h
  47. 4 4
      lib/JITIDL/JITTypes.h
  48. 1 1
      lib/JITServer/JITServer.cpp
  49. 3 3
      lib/Runtime/Base/Constants.h
  50. 2 2
      lib/Runtime/Base/FunctionBody.cpp
  51. 1 1
      lib/Runtime/Base/FunctionBody.h
  52. 1 1
      lib/Runtime/Base/ScriptContext.cpp
  53. 2 2
      lib/Runtime/Base/ThreadContext.cpp
  54. 2 2
      lib/Runtime/Base/ThreadContext.h
  55. 1 1
      lib/Runtime/ByteCode/ByteCodeWriter.cpp
  56. 3 3
      lib/Runtime/Language/InlineCache.h
  57. 3 3
      lib/Runtime/Library/CompoundString.cpp
  58. 1 1
      lib/Runtime/Library/ConcatString.h
  59. 2 2
      lib/Runtime/Library/JavascriptArray.cpp
  60. 1 1
      lib/Runtime/Library/JavascriptArray.h
  61. 1 1
      lib/Runtime/Library/JavascriptArray.inl
  62. 1 1
      lib/Runtime/Library/JavascriptLibrary.cpp
  63. 1 1
      lib/Runtime/Library/JavascriptString.cpp
  64. 1 1
      lib/Runtime/Library/MathLibrary.cpp
  65. 2 2
      lib/Runtime/Types/ArrayObject.h
  66. 2 2
      lib/Runtime/Types/TypePath.h

+ 3 - 3
CMakeLists.txt

@@ -160,7 +160,7 @@ endif()
 
 set(CLR_CMAKE_PLATFORM_XPLAT 1)
 if(CC_TARGETS_AMD64)
-    add_definitions(-D_M_X64_OR_ARM64)
+    add_definitions(-DTARGET_64)
     add_compile_options(-msse4.2)
 
     if(NOT CMAKE_BUILD_TYPE STREQUAL Release)
@@ -168,7 +168,7 @@ if(CC_TARGETS_AMD64)
     endif()
 elseif(CC_TARGETS_X86)
     add_definitions(-D__i686__)
-    add_definitions(-D_M_IX86_OR_ARM32)
+    add_definitions(-DTARGET_32)
     add_compile_options(-arch i386)
     add_compile_options(-msse3)
 
@@ -177,7 +177,7 @@ elseif(CC_TARGETS_X86)
     )
 elseif(CC_TARGETS_ARM)
     add_definitions(-D__arm__)
-    add_definitions(-D_M_IX86_OR_ARM32)
+    add_definitions(-DTARGET_32)
     add_definitions(-D_M_ARM32_OR_ARM64)
     if(CC_TARGET_OS_OSX)
         add_compile_options(-arch arm)

+ 1 - 1
lib/Backend/CodeGenAllocators.cpp

@@ -9,7 +9,7 @@ CodeGenAllocators<TAlloc, TPreReservedAlloc>::CodeGenAllocators(AllocationPolicy
 : pageAllocator(policyManager, Js::Configuration::Global.flags, PageAllocatorType_BGJIT, 0)
 , allocator(_u("NativeCode"), &pageAllocator, Js::Throw::OutOfMemory)
 , emitBufferManager(&allocator, codePageAllocators, scriptContext, threadContext, _u("JIT code buffer"), processHandle)
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
 , canCreatePreReservedSegment(false)
 #endif
 {

+ 1 - 1
lib/Backend/CodeGenAllocators.h

@@ -13,7 +13,7 @@ private:
     NoRecoverMemoryArenaAllocator  allocator;
 public:
     EmitBufferManager<TAlloc, TPreReservedAlloc, CriticalSection> emitBufferManager;
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     bool canCreatePreReservedSegment;
 #endif
 

+ 1 - 1
lib/Backend/EmitBuffer.cpp

@@ -217,7 +217,7 @@ EmitBufferManager<TAlloc, TPreReservedAlloc, SyncObject>::FreeAllocation(void* a
                 this->scriptContext->GetThreadContext()->SubCodeSize(allocation->bytesCommitted);
             }
 
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
             if (allocation->allocation->thunkAddress)
             {
                 if (JITManager::GetJITManager()->IsJITServer())

+ 1 - 1
lib/Backend/Encoder.cpp

@@ -396,7 +396,7 @@ Encoder::Encode()
     }
 #endif
 
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 #ifdef _M_X64
     PrologEncoder &unwindInfo = m_func->m_prologEncoder;
     unwindInfo.FinalizeUnwindInfo((BYTE*)m_func->GetJITOutput()->GetCodeAddress(), (DWORD)codeSize);

+ 1 - 1
lib/Backend/Func.cpp

@@ -1157,7 +1157,7 @@ bool Func::CanAllocInPreReservedHeapPageSegment ()
         && GetInProcCodeGenAllocators()->canCreatePreReservedSegment
 #endif
         );
-#elif _M_X64_OR_ARM64
+#elif TARGET_64
         && true);
 #else
         && false); //Not yet implemented for architectures other than x86 and amd64.

+ 2 - 2
lib/Backend/Func.h

@@ -344,9 +344,9 @@ public:
 static const uint32 c_debugFillPattern4 = 0xcececece;
 static const unsigned __int64 c_debugFillPattern8 = 0xcececececececece;
 
-#if defined(_M_IX86) || defined (_M_ARM)
+#if defined(TARGET_32)
     static const uint32 c_debugFillPattern = c_debugFillPattern4;
-#elif defined(_M_X64) || defined(_M_ARM64)
+#elif defined(TARGET_64)
     static const unsigned __int64 c_debugFillPattern = c_debugFillPattern8;
 #else
 #error unsupported platform

+ 4 - 7
lib/Backend/JITOutput.cpp

@@ -218,7 +218,7 @@ JITOutput::RecordInlineeFrameOffsetsInfo(unsigned int offsetsArrayOffset, unsign
     m_outputData->inlineeFrameOffsetArrayCount = offsetsArrayCount;
 }
 
-#if _M_X64_OR_ARM64
+#if TARGET_64
 void
 JITOutput::RecordUnwindInfo(BYTE *unwindInfo, size_t size, BYTE * xdataAddr, BYTE* localXdataAddr)
 {
@@ -255,13 +255,12 @@ JITOutput::FinalizeNativeCode()
     if (JITManager::GetJITManager()->IsJITServer())
     {
         m_func->GetOOPCodeGenAllocators()->emitBufferManager.CompletePreviousAllocation(m_oopAlloc);
-#if defined(_CONTROL_FLOW_GUARD)
-#if _M_IX86 || _M_X64_OR_ARM64
+
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
         if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
         {
             allocation->thunkAddress = m_func->GetOOPThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
         }
-#endif
 #endif
     }
     else
@@ -275,13 +274,11 @@ JITOutput::FinalizeNativeCode()
         m_func->GetInProcJITEntryPointInfo()->SetNumberChunks(numberChunks);
 #endif
 
-#if defined(_CONTROL_FLOW_GUARD)
-#if _M_IX86 || _M_X64_OR_ARM64
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
         if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
         {
             allocation->thunkAddress = m_func->GetInProcThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
         }
-#endif
 #endif
     }
     m_outputData->thunkAddress = allocation->thunkAddress;

+ 1 - 1
lib/Backend/JITOutput.h

@@ -42,7 +42,7 @@ public:
     void RecordNativeCode(const BYTE* sourceBuffer, BYTE* localCodeAddress);
     void RecordInlineeFrameOffsetsInfo(unsigned int offsetsArrayOffset, unsigned int offsetsArrayCount);
 
-#if _M_X64_OR_ARM64
+#if TARGET_64
     void RecordUnwindInfo(BYTE *unwindInfo, size_t size, BYTE * xdataAddr, BYTE* localXdataAddr);
 #elif _M_ARM
     size_t RecordUnwindInfo(size_t offset, const BYTE *unwindInfo, size_t size, BYTE * xdataAddr);

+ 1 - 1
lib/Backend/JITThunkEmitter.cpp

@@ -5,7 +5,7 @@
 
 #include "Backend.h"
 
-#if defined(ENABLE_NATIVE_CODEGEN) && defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(ENABLE_NATIVE_CODEGEN) && defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
 
 template class JITThunkEmitter<VirtualAllocWrapper>;
 

+ 1 - 1
lib/Backend/JITThunkEmitter.h

@@ -4,7 +4,7 @@
 //-------------------------------------------------------------------------------------------------------
 #pragma once
 
-#if defined(ENABLE_NATIVE_CODEGEN) && defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(ENABLE_NATIVE_CODEGEN) && defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
 template <typename TAlloc>
 class JITThunkEmitter
 {

+ 1 - 1
lib/Backend/Lower.cpp

@@ -81,7 +81,7 @@ Lowerer::Lower()
 
                 IRType opnd1Type;
 
-#if defined(_M_IX86) || defined (_M_ARM)
+#if defined(TARGET_32)
                 opnd1Type = TyInt32;
                 opnd2 = IR::IntConstOpnd::New(Func::c_debugFillPattern4, opnd1Type, m_func);
 #else

+ 1 - 1
lib/Backend/Lower.h

@@ -21,7 +21,7 @@ enum RoundMode : BYTE {
 
 #if defined(_M_IX86) || defined(_M_AMD64)
 #include "LowerMDShared.h"
-#elif defined(_M_ARM) || defined(_M_ARM64)
+#elif defined(_M_ARM32_OR_ARM64)
 #include "LowerMD.h"
 #endif
 

+ 2 - 2
lib/Backend/NativeCodeGenerator.cpp

@@ -331,7 +331,7 @@ void DoFunctionRelocations(BYTE *function, DWORD functionOffset, DWORD functionS
                     }
                     break;
 
-#elif defined(_M_X64_OR_ARM64)
+#elif defined(TARGET_64)
                 case IMAGE_REL_BASED_DIR64:
                     {
                         ULONGLONG *patchAddr64 = (ULONGLONG *) (function + blockOffset + offset - functionOffset);
@@ -1067,7 +1067,7 @@ NativeCodeGenerator::CodeGen(PageAllocator * pageAllocator, CodeGenWorkItem* wor
         workItem->GetEntryPoint()->GetJitTransferData()->SetIsReady();
     }
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     XDataAllocation * xdataInfo = HeapNewZ(XDataAllocation);
     xdataInfo->address = (byte*)jitWriteData.xdataAddr;
     XDataAllocator::Register(xdataInfo, jitWriteData.codeAddress, jitWriteData.codeSize);

+ 2 - 2
lib/Backend/NativeCodeGenerator.h

@@ -138,7 +138,7 @@ private:
         {
             this->foregroundAllocators = CreateAllocators(pageAllocator);
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
             if (this->scriptContext->webWorkerId != Js::Constants::NonWebWorkerContextId)
             {
                 this->foregroundAllocators->canCreatePreReservedSegment = true;
@@ -164,7 +164,7 @@ private:
         if (!this->backgroundAllocators)
         {
             this->backgroundAllocators = CreateAllocators(pageAllocator);
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
             this->backgroundAllocators->canCreatePreReservedSegment = true;
 #endif
         }

+ 5 - 5
lib/Backend/Opnd.cpp

@@ -458,7 +458,7 @@ Opnd::GetImmediateValue(Func* func)
     }
 }
 
-#if TARGET_32 && !defined(_M_IX86)
+#if defined(_M_ARM)
 int32
 Opnd::GetImmediateValueAsInt32(Func * func)
 {
@@ -3026,7 +3026,7 @@ Opnd::DumpAddress(void *address, bool printToConsole, bool skipMaskedAddress)
     }
     else
     {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
         Output::Print(_u("0x%012I64X"), address);
 #else
         Output::Print(_u("0x%08X"), address);
@@ -3574,7 +3574,7 @@ Opnd::GetAddrDescription(__out_ecount(count) char16 *const description, const si
         {
         case IR::AddrOpndKindConstantAddress:
         {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
             char16 const * format = _u("0x%012I64X");
 #else
             char16 const * format = _u("0x%08X");
@@ -3585,7 +3585,7 @@ Opnd::GetAddrDescription(__out_ecount(count) char16 *const description, const si
         case IR::AddrOpndKindDynamicVar:
             if (Js::TaggedInt::Is(address))
             {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
                 char16 const * format = _u("0x%012I64X (value: %d)");
 #else
                 char16 const * format = _u("0x%08X  (value: %d)");
@@ -3645,7 +3645,7 @@ Opnd::GetAddrDescription(__out_ecount(count) char16 *const description, const si
             break;
         case IR::AddrOpndKindConstantVar:
         {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
             char16 const * format = _u("0x%012I64X%s");
 #else
             char16 const * format = _u("0x%08X%s");

+ 1 - 1
lib/Backend/Opnd.h

@@ -254,7 +254,7 @@ public:
     void                SetIsDead(const bool isDead = true)   { this->m_isDead = isDead; }
     bool                GetIsDead()   { return this->m_isDead; }
     int64               GetImmediateValue(Func * func);
-#if TARGET_32 && !defined(_M_IX86)
+#if defined(_M_ARM)
     // Helper for 32bits systems without int64 const operand support
     int32               GetImmediateValueAsInt32(Func * func);
 #endif

+ 3 - 3
lib/Backend/ServerThreadContext.cpp

@@ -18,7 +18,7 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL * data, HANDLE pro
     m_sectionAllocator(processHandle),
     m_thunkPageAllocators(nullptr, /* allocXData */ false, &m_sectionAllocator, nullptr, processHandle),
     m_codePageAllocators(nullptr, ALLOC_XDATA, &m_sectionAllocator, &m_preReservedSectionAllocator, processHandle),
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     m_jitThunkEmitter(this, &m_sectionAllocator, processHandle),
 #endif
     m_codeGenAlloc(nullptr, nullptr, this, &m_codePageAllocators, processHandle),
@@ -30,7 +30,7 @@ ServerThreadContext::ServerThreadContext(ThreadContextDataIDL * data, HANDLE pro
 {
     m_pid = GetProcessId(processHandle);
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     m_codeGenAlloc.canCreatePreReservedSegment = data->allowPrereserveAlloc != FALSE;
 #endif
     m_numericPropertyBV = HeapNew(BVSparse<HeapAllocator>, &HeapAllocator::Instance);
@@ -139,7 +139,7 @@ ServerThreadContext::GetCodeGenAllocators()
     return &m_codeGenAlloc;
 }
 
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
 OOPJITThunkEmitter *
 ServerThreadContext::GetJITThunkEmitter()
 {

+ 2 - 2
lib/Backend/ServerThreadContext.h

@@ -38,7 +38,7 @@ public:
     virtual ptrdiff_t GetCRTBaseAddressDifference() const override;
 
     OOPCodeGenAllocators * GetCodeGenAllocators();
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     OOPJITThunkEmitter * GetJITThunkEmitter();
 #endif
     CustomHeap::OOPCodePageAllocators * GetThunkPageAllocators();
@@ -67,7 +67,7 @@ private:
     CustomHeap::OOPCodePageAllocators m_thunkPageAllocators;
     CustomHeap::OOPCodePageAllocators  m_codePageAllocators;
     OOPCodeGenAllocators m_codeGenAlloc;
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     OOPJITThunkEmitter m_jitThunkEmitter;
 #endif
     // only allocate with this from foreground calls (never from CodeGen calls)

+ 1 - 1
lib/Common/Common/Jobs.cpp

@@ -1226,7 +1226,7 @@ namespace JsUtil
     {
         Assert(lpParam);
 
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 #ifdef RECYCLER_WRITE_BARRIER
         Memory::RecyclerWriteBarrierManager::OnThreadInit();
 #endif

+ 1 - 1
lib/Common/Common/NumberUtilities.cpp

@@ -147,7 +147,7 @@ namespace Js
 
     bool NumberUtilities::IsFinite(double value)
     {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         return 0 != (~(ToSpecial(value)) & 0x7FF0000000000000ull);
 #else
         return 0 != (~Js::NumberUtilities::LuHiDbl(value) & 0x7FF00000);

+ 1 - 1
lib/Common/Common/NumberUtilities.inl

@@ -136,7 +136,7 @@ namespace Js
 
     NUMBER_UTIL_INLINE bool NumberUtilities::IsNan(double value)
     {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         // NaN is a range of values; all bits on the exponent are 1's and some nonzero significant.
         // no distinction on signed NaN's
         uint64 nCompare = ToSpecial(value);

+ 3 - 5
lib/Common/CommonDefines.h

@@ -63,12 +63,10 @@
 #endif
 
 #if defined(_M_IX86) || defined(_M_ARM)
-#define _M_IX86_OR_ARM32 1
 #define TARGET_32 1
 #endif
 
 #if defined(_M_X64) || defined(_M_ARM64)
-#define _M_X64_OR_ARM64 1
 #define TARGET_64 1
 #endif
 
@@ -272,7 +270,7 @@
 #define ENABLE_BACKGROUND_JOB_PROCESSOR 1
 #define ENABLE_COPYONACCESS_ARRAY 1
 #ifndef DYNAMIC_INTERPRETER_THUNK
-#if defined(_M_IX86_OR_ARM32) || defined(_M_X64_OR_ARM64)
+#if defined(TARGET_32) || defined(TARGET_64)
 #define DYNAMIC_INTERPRETER_THUNK 1
 #else
 #define DYNAMIC_INTERPRETER_THUNK 0
@@ -682,7 +680,7 @@
 // Platform dependent flags
 //----------------------------------------------------------------------------------------------------
 #ifndef INT32VAR
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 #define INT32VAR 1
 #else
 #define INT32VAR 0
@@ -690,7 +688,7 @@
 #endif
 
 #ifndef FLOATVAR
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 #define FLOATVAR 1
 #else
 #define FLOATVAR 0

+ 1 - 1
lib/Common/DataStructures/SparseBitVector.h

@@ -205,7 +205,7 @@ BVSparseNode<TAllocator>::BVSparseNode(BVIndex beginIndex, BVSparseNode<TAllocat
 {
     // Performance assert, BVSparseNode is heavily used in the backend, do perf
     // measurement before changing this.
-#if defined(_M_ARM64) || defined(_M_X64)
+#if defined(TARGET_64)
     CompileAssert(sizeof(BVSparseNode) == 24);
 #else
     CompileAssert(sizeof(BVSparseNode) == 16);

+ 3 - 3
lib/Common/DataStructures/UnitBitVector.h

@@ -30,7 +30,7 @@ GetFirstBitSet(DWORD *Index, UnitWord32 Mask)
 inline BOOLEAN
 GetFirstBitSet(DWORD *Index, UnitWord64 Mask)
 {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     return _BitScanForward64(Index, Mask);
 #else
     //_BitScanForward64 intrinsic is not available in x86 & ARM
@@ -56,7 +56,7 @@ GetLastBitSet(DWORD *Index, UnitWord32 Mask)
 inline BOOLEAN
 GetLastBitSet(DWORD *Index, UnitWord64 Mask)
 {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     return _BitScanReverse64(Index, Mask);
 #else
     //_BitScanReverse64 intrinsic is not available in x86 & ARM
@@ -499,7 +499,7 @@ public:
 typedef BVUnitT<UnitWord32> BVUnit32;
 typedef BVUnitT<UnitWord64> BVUnit64;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     typedef BVUnit64 BVUnit;
 #else
     typedef BVUnit32 BVUnit;

+ 1 - 1
lib/Common/Exceptions/ReportError.cpp

@@ -73,7 +73,7 @@ _NOINLINE void FailedToBox_OOM_fatal_error(
     ReportFatalException(context, E_UNEXPECTED, Fatal_FailedToBox_OUTOFMEMORY, scenario);
 }
 
-#if defined(RECYCLER_WRITE_BARRIER) && defined(_M_X64_OR_ARM64)
+#if defined(RECYCLER_WRITE_BARRIER) && defined(TARGET_64)
 _NOINLINE void X64WriteBarrier_OOM_fatal_error()
 {
     int scenario = 3;

+ 1 - 1
lib/Common/Exceptions/ReportError.h

@@ -51,7 +51,7 @@ void Amd64StackWalkerOutOfContexts_fatal_error(
 void FailedToBox_OOM_fatal_error(
     __in ULONG_PTR context);
 
-#if defined(RECYCLER_WRITE_BARRIER) && defined(_M_X64_OR_ARM64)
+#if defined(RECYCLER_WRITE_BARRIER) && defined(TARGET_64)
 void X64WriteBarrier_OOM_fatal_error();
 #endif
 

+ 1 - 1
lib/Common/Memory/ArenaAllocator.cpp

@@ -460,7 +460,7 @@ ReleaseHeapMemory()
 
 template _ALWAYSINLINE char *ArenaAllocatorBase<InPlaceFreeListPolicy, 0, 0, 0>::AllocInternal(size_t requestedBytes);
 
-#if !(defined(__clang__) && defined(_M_IX86_OR_ARM32))
+#if !(defined(__clang__) && defined(TARGET_32))
 // otherwise duplicate instantination of AllocInternal Error
 template _ALWAYSINLINE char *ArenaAllocatorBase<InPlaceFreeListPolicy, 3, 0, 0>::AllocInternal(size_t requestedBytes);
 #endif

+ 2 - 2
lib/Common/Memory/ArenaAllocator.h

@@ -136,7 +136,7 @@ protected:
 // Implements most of memory management operations over ArenaData.
 // The TFreeListPolicy handles free-listing for "small objects". There
 // is no support for free-listing for "large objects".
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 // Some data structures such as jmp_buf expect to be 16 byte aligned on AMD64.
 template <class TFreeListPolicy, size_t ObjectAlignmentBitShiftArg = 4, bool RequireObjectAlignment = false, size_t MaxObjectSize = 0>
 #else
@@ -600,7 +600,7 @@ public:
     CompileAssert(sizeof(CacheLayout) == sizeof(FreeObject));
     CompileAssert(offsetof(CacheLayout, strongRef) == offsetof(FreeObject, next));
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     CompileAssert(sizeof(CacheLayout) == 32);
     static const size_t ObjectAlignmentBitShift = 5;
 #else

+ 1 - 0
lib/Common/Memory/Chakra.Common.Memory.vcxproj

@@ -157,6 +157,7 @@
       <ExcludedFromBuild>true</ExcludedFromBuild>
     </None>
     <None Include="HeapBlock.inl" />
+    <None Include="HeapBlockMap.inl" />
     <None Include="HeapBucket.inl" />
     <None Include="LargeHeapBucket.inl" />
     <None Include="MarkContext.inl" />

+ 1 - 0
lib/Common/Memory/Chakra.Common.Memory.vcxproj.filters

@@ -119,6 +119,7 @@
   </ItemGroup>
   <ItemGroup>
     <None Include="HeapBlock.inl" />
+    <None Include="HeapBlockMap.inl" />
     <None Include="HeapBucket.inl" />
     <None Include="LargeHeapBucket.inl" />
     <None Include="MarkContext.inl" />

+ 1 - 1
lib/Common/Memory/ForcedMemoryConstraints.cpp

@@ -20,7 +20,7 @@ ForcedMemoryConstraint::Apply()
 void ForcedMemoryConstraint::FragmentAddressSpace(size_t usableSize)
 {
     // AMD64 address space is too big
-#if !defined(_M_X64_OR_ARM64)
+#if !defined(TARGET_64)
     uint const allocationGranularity = 64 * 1024;     // 64 KB
     Assert(allocationGranularity == AutoSystemInfo::Data.dwAllocationGranularity);
     uint64 const addressEnd = ((uint64)4) * 1024 * 1024 * 1024;

+ 8 - 8
lib/Common/Memory/HeapBlockMap.cpp

@@ -12,7 +12,7 @@ const uint Memory::HeapBlockMap32::L1Count;
 const uint Memory::HeapBlockMap32::L2Count;
 #endif
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 HeapBlockMap32::HeapBlockMap32(__in char * startAddress) :
     startAddress(startAddress),
 #else
@@ -22,7 +22,7 @@ HeapBlockMap32::HeapBlockMap32() :
 {
     memset(map, 0, sizeof(map));
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     Assert(((size_t)startAddress) % TotalSize == 0);
 #endif
 }
@@ -581,7 +581,7 @@ HeapBlockMap32::ForEachSegment(Recycler * recycler, Fn func)
             PageAllocator* segmentPageAllocator = (PageAllocator*)currentSegment->GetAllocator();
 
             Assert(segmentPageAllocator == block->GetPageAllocator(recycler));
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
             // On 64 bit, the segment may span multiple HeapBlockMap32 structures.
             // Limit the processing to the portion of the segment in this HeapBlockMap32.
             // We'll process other portions when we visit the other HeapBlockMap32 structures.
@@ -624,7 +624,7 @@ HeapBlockMap32::ResetDirtyPages(Recycler * recycler)
 #endif
 
 #ifdef RECYCLER_WRITE_BARRIER
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         if (segment->IsWriteBarrierEnabled())
 #endif
         {
@@ -917,7 +917,7 @@ HeapBlockMap32::Rescan(Recycler * recycler, bool resetWriteWatch)
                     Assert(dirtyPage >= segmentStart);
                     Assert(dirtyPage < segmentStart + segmentLength);
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
                     Assert(HeapBlockMap64::GetNodeStartAddress(dirtyPage) == this->startAddress);
 #endif
 
@@ -941,7 +941,7 @@ HeapBlockMap32::Rescan(Recycler * recycler, bool resetWriteWatch)
                 char * pageAddress = segmentStart + (i * AutoSystemInfo::PageSize);
                 Assert((size_t)(pageAddress - segmentStart) < segmentLength);
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
                 Assert(HeapBlockMap64::GetNodeStartAddress(pageAddress) == this->startAddress);
 #endif
 
@@ -1003,7 +1003,7 @@ HeapBlockMap32::OOMRescan(Recycler * recycler)
                 char * pageAddress = segmentStart + (i * AutoSystemInfo::PageSize);
                 Assert((size_t)(pageAddress - segmentStart) < segmentLength);
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
                 Assert(HeapBlockMap64::GetNodeStartAddress(pageAddress) == this->startAddress);
 #endif
 
@@ -1172,7 +1172,7 @@ HeapBlockMap32::Cleanup(bool concurrentFindImplicitRoot)
     }
 }
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 
 HeapBlockMap64::HeapBlockMap64():
     list(nullptr)

+ 4 - 4
lib/Common/Memory/HeapBlockMap.h

@@ -21,7 +21,7 @@ public:
     static const uint PageMarkBitCount = PageSize / HeapConstants::ObjectGranularity;
     static const uint L2ChunkMarkBitCount = L2Count * PageMarkBitCount;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     static const size_t TotalSize = 0x100000000;        // 4GB
 #endif
 
@@ -33,7 +33,7 @@ public:
     // so set it to the MaxPageCount for PageSegments.
     static const uint MaxGetWriteWatchPages = PageSegment::MaxPageCount;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     HeapBlockMap32(__in char * startAddress);
 #else
     HeapBlockMap32();
@@ -206,7 +206,7 @@ private:
     L2MapChunk * map[L1Count];
     bool anyHeapBlockRescannedDuringOOM;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     // On 64 bit, this structure only maps one particular 32 bit space.
     // Store the startAddress of that 32 bit space so we know which it is.
     // This value should always be 4GB aligned.
@@ -228,7 +228,7 @@ private:
 };
 
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 
 class HeapBlockMap64
 {

+ 4 - 4
lib/Common/Memory/HeapBlockMap.inl

@@ -262,13 +262,13 @@ HeapBlockMap32::MarkInteriorInternal(MarkContext * markContext, L2MapChunk *& ch
     if (largeBlockType)
     {
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
         // we only check the first MaxLargeObjectMarkOffset byte for marking purpuse. 
         if ( (size_t)originalCandidate - (size_t)realCandidate > HeapConstants::MaxLargeObjectMarkOffset )
             return true;
 #endif    
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         if (HeapBlockMap64::GetNodeIndex(originalCandidate) != HeapBlockMap64::GetNodeIndex(realCandidate))
         {
             // We crossed a node boundary (very rare) so we should just re-start from the real candidate.
@@ -439,7 +439,7 @@ HeapBlockMap32::MarkInterior(void * candidate, MarkContext * markContext)
     }
 }
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 
 //
 // 64-bit Mark
@@ -502,4 +502,4 @@ HeapBlockMap64::MarkInterior(void * candidate, MarkContext * markContext)
     // No Node found; must be an invalid reference. Do nothing.
 }
 
-#endif // defined(_M_X64_OR_ARM64)
+#endif // defined(TARGET_64)

+ 3 - 3
lib/Common/Memory/HeapConstants.h

@@ -7,7 +7,7 @@
 class HeapConstants
 {
 public:
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
     static const uint MaxSmallObjectSize = 512;
 #else
     static const uint MaxSmallObjectSize = 768;
@@ -19,7 +19,7 @@ public:
     static const uint MaxMediumObjectSize = 9216;
 #endif
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
     // Only if a pointer points to first 8k region of a large object, it will set the mark bit in the chunk->MarkBits
     // If the pointer points outside of that region, no mark bit will be set
     static const uint MaxLargeObjectMarkOffset = 8 * 1024; 
@@ -48,7 +48,7 @@ class SmallAllocationBlockAttributes
 public:
     static const size_t MinObjectSize = HeapConstants::ObjectGranularity;
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
     static const size_t PageCount = 2;
 #else
     static const size_t PageCount = 4;

+ 8 - 8
lib/Common/Memory/HeapInfo.cpp

@@ -4,9 +4,9 @@
 //-------------------------------------------------------------------------------------------------------
 #include "CommonMemoryPch.h"
 #include "Memory/PageHeapBlockTypeFilter.h"
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
 #include "ValidPointersMap/vpm.32b.h"
-#elif defined(_M_X64_OR_ARM64)
+#elif defined(TARGET_64)
 #include "ValidPointersMap/vpm.64b.h"
 #else
 #error "Platform is not handled"
@@ -246,9 +246,9 @@ HRESULT HeapInfo::ValidPointersMap<SmallAllocationBlockAttributes>::GenerateVali
         for (unsigned j = 0; j < (*invalid)[i].wordCount; ++j)
         {
             const char16 *format = (j < (*invalid)[i].wordCount - 1) ?
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
                 _u("0x%08X, ") : _u("0x%08X")
-#elif defined(_M_X64_OR_ARM64)
+#elif defined(TARGET_64)
                 _u("0x%016I64X, ") : _u("0x%016I64X")
 #else
 #error "Platform is not handled"
@@ -345,9 +345,9 @@ HRESULT HeapInfo::ValidPointersMap<MediumAllocationBlockAttributes>::GenerateVal
         for (unsigned j = 0; j < (*invalid)[i].wordCount; ++j)
         {
             const char16 *format = (j < (*invalid)[i].wordCount - 1) ?
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
                 _u("0x%08X, ") : _u("0x%08X")
-#elif defined(_M_X64_OR_ARM64)
+#elif defined(TARGET_64)
                 _u("0x%016I64X, ") : _u("0x%016I64X")
 #else
 #error "Platform is not handled"
@@ -409,9 +409,9 @@ HRESULT HeapInfo::ValidPointersMap<TBlockAttributes>::GenerateValidPointersMapHe
             _u("// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.\n")
             _u("//-------------------------------------------------------------------------------------------------------\n")
             _u("// Generated via jshost -GenerateValidPointersMapHeader\n")
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
             _u("// Target platforms: 32bit - x86 & arm\n")
-#elif defined(_M_X64_OR_ARM64)
+#elif defined(TARGET_64)
             _u("// Target platform: 64bit - amd64 & arm64\n")
 #else
 #error "Platform is not handled"

+ 2 - 2
lib/Common/Memory/LargeHeapBlock.h

@@ -4,7 +4,7 @@
 //-------------------------------------------------------------------------------------------------------
 #pragma once
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 #define UINT_PAD_64BIT(x) uint x
 #else
 #define UINT_PAD_64BIT(x)
@@ -56,7 +56,7 @@ public:
     void SetAttributes(uint cookie, unsigned char attributes);
     unsigned char GetAttributes(uint cookie);
 };
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 static_assert(sizeof(LargeObjectHeader) == 0x20, "Incorrect LargeObjectHeader size");
 #else
 static_assert(sizeof(LargeObjectHeader) == 0x10, "Incorrect LargeObjectHeader size");

+ 6 - 6
lib/Common/Memory/PageAllocator.cpp

@@ -36,7 +36,7 @@ SegmentBase<T>::SegmentBase(PageAllocatorBase<T> * allocator, size_t pageCount,
     leadingGuardPageCount(0),
     secondaryAllocPageCount(allocator->secondaryAllocPageCount),
     secondaryAllocator(nullptr)
-#if defined(_M_X64_OR_ARM64) && defined(RECYCLER_WRITE_BARRIER)
+#if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER)
     , isWriteBarrierAllowed(false)
     , isWriteBarrierEnabled(enableWriteBarrier)
 #endif
@@ -62,7 +62,7 @@ SegmentBase<T>::~SegmentBase()
         char* originalAddress = this->address - (leadingGuardPageCount * AutoSystemInfo::PageSize);
         GetAllocator()->GetVirtualAllocator()->Free(originalAddress, GetPageCount() * AutoSystemInfo::PageSize, MEM_RELEASE);
         GetAllocator()->ReportFree(this->segmentPageCount * AutoSystemInfo::PageSize); //Note: We reported the guard pages free when we decommitted them during segment initialization
-#if defined(_M_X64_OR_ARM64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
+#if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
 #if ENABLE_DEBUG_CONFIG_OPTIONS
         if (CONFIG_FLAG(StrictWriteBarrierCheck) && this->isWriteBarrierEnabled)
         {
@@ -84,7 +84,7 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
     if (!excludeGuardPages)
     {
         addGuardPages = (this->segmentPageCount * AutoSystemInfo::PageSize) > VirtualAllocThreshold;
-#if _M_IX86_OR_ARM32
+#if TARGET_32
         unsigned int randomNumber2 = static_cast<unsigned int>(Math::Rand());
         addGuardPages = addGuardPages && (randomNumber2 % 4 == 1);
 #endif
@@ -160,7 +160,7 @@ SegmentBase<T>::Initialize(DWORD allocFlags, bool excludeGuardPages)
     }
 
 #ifdef RECYCLER_WRITE_BARRIER
-#if defined(_M_X64_OR_ARM64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
+#if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER_BYTE)
     bool registerBarrierResult = true;
 #if ENABLE_DEBUG_CONFIG_OPTIONS
     if (CONFIG_FLAG(StrictWriteBarrierCheck))
@@ -2187,7 +2187,7 @@ void
 PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::AddUsedBytes(size_t bytes)
 {
     usedBytes += bytes;
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     size_t lastTotalUsedBytes = ::InterlockedExchangeAdd64((volatile LONG64 *)&totalUsedBytes, bytes);
 #else
     DWORD lastTotalUsedBytes = ::InterlockedExchangeAdd(&totalUsedBytes, bytes);
@@ -2221,7 +2221,7 @@ PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment>::SubUsedBytes(size_t by
 
     usedBytes -= bytes;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     size_t lastTotalUsedBytes = ::InterlockedExchangeAdd64((volatile LONG64 *)&totalUsedBytes, -(LONG64)bytes);
 #else
     DWORD lastTotalUsedBytes = ::InterlockedExchangeSubtract(&totalUsedBytes, bytes);

+ 5 - 5
lib/Common/Memory/PageAllocator.h

@@ -65,7 +65,7 @@ typedef void* FunctionTableHandle;
 
 #endif
 
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 #define XDATA_RESERVE_PAGE_COUNT   (2)       // Number of pages per page segment (32 pages) reserved for xdata.
 #else
 #define XDATA_RESERVE_PAGE_COUNT   (0)       // ARM uses the heap, so it's not required.
@@ -83,7 +83,7 @@ struct SecondaryAllocation
     }
 };
 
-#if defined(_M_X64) || defined(_M_ARM64)
+#if defined(TARGET_64)
 struct XDataInfo
 {
     RUNTIME_FUNCTION pdata;
@@ -182,7 +182,7 @@ public:
 
     SecondaryAllocator* GetSecondaryAllocator() { return secondaryAllocator; }
 
-#if defined(_M_X64_OR_ARM64) && defined(RECYCLER_WRITE_BARRIER)
+#if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER)
     bool IsWriteBarrierAllowed()
     {
         return isWriteBarrierAllowed;
@@ -194,9 +194,9 @@ public:
 #endif
 
 protected:
-#if _M_IX86_OR_ARM32
+#if TARGET_32
     static const uint VirtualAllocThreshold =  524288; // 512kb As per spec
-#else // _M_X64_OR_ARM64
+#else // TARGET_64
     static const uint VirtualAllocThreshold = 1048576; // 1MB As per spec : when we cross this threshold of bytes, we should add guard pages
 #endif
     static const uint maxGuardPages = 15;

+ 2 - 2
lib/Common/Memory/RecyclerWriteBarrierManager.cpp

@@ -27,7 +27,7 @@ namespace Memory
 }
 #endif
 #ifdef RECYCLER_WRITE_BARRIER_BYTE
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 X64WriteBarrierCardTableManager RecyclerWriteBarrierManager::x64CardTableManager;
 X64WriteBarrierCardTableManager::CommittedSectionBitVector X64WriteBarrierCardTableManager::committedSections(&HeapAllocator::Instance);
 
@@ -46,7 +46,7 @@ DWORD RecyclerWriteBarrierManager::cardTable[1 * 1024 * 1024];
 #endif
 
 #ifdef RECYCLER_WRITE_BARRIER_BYTE
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 
 bool
 X64WriteBarrierCardTableManager::OnThreadInit()

+ 6 - 6
lib/Common/Memory/RecyclerWriteBarrierManager.h

@@ -55,7 +55,7 @@ namespace Memory
 
 #endif
 
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 #ifdef RECYCLER_WRITE_BARRIER_BYTE
 
 #define X64_WB_DIAG 1
@@ -169,7 +169,7 @@ public:
 #if ENABLE_DEBUG_CONFIG_OPTIONS
     static bool IsCardTableCommited(_In_ uintptr_t index)
     {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
         return x64CardTableManager.IsCardTableCommited(index) != FALSE;
 #else
         return true;
@@ -177,7 +177,7 @@ public:
     }
     static bool IsCardTableCommitedAddress(_In_ void* address)
     {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
         return x64CardTableManager.IsCardTableCommited(address) != FALSE;
 #else
         return true;
@@ -188,7 +188,7 @@ public:
     // For JIT
     static uintptr_t GetCardTableIndex(void * address);
 #ifdef RECYCLER_WRITE_BARRIER_BYTE
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
     static BYTE * GetAddressOfCardTable() { return x64CardTableManager.GetAddressOfCardTable(); }
 #else
     static BYTE * GetAddressOfCardTable() { return cardTable; }
@@ -198,7 +198,7 @@ public:
 #endif
 
     // For GC
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
     static bool OnThreadInit();
     static bool OnSegmentAlloc(_In_ char* segment, DECLSPEC_GUARD_OVERFLOW size_t pageCount);
     static bool OnSegmentFree(_In_ char* segment, size_t pageCount);
@@ -219,7 +219,7 @@ public:
 private:
 
 #ifdef RECYCLER_WRITE_BARRIER_BYTE
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
     // On AMD64, we use a different scheme
     // As of Windows 8.1, the process user-mode address space is 128TB
     // We still use a write barrier page size of 4KB

+ 4 - 4
lib/Common/Memory/SectionAllocWrapper.cpp

@@ -113,7 +113,7 @@ PVOID MapView(HANDLE process, HANDLE sectionHandle, size_t size, size_t offset,
     return address;
 }
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 SectionMap32::SectionMap32(__in char * startAddress) :
     startAddress(startAddress),
 #else
@@ -123,7 +123,7 @@ SectionMap32::SectionMap32() :
 {
     memset(map, 0, sizeof(map));
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     Assert(((size_t)startAddress) % TotalSize == 0);
 #endif
 }
@@ -668,7 +668,7 @@ BOOL SectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType
 /*
 * class PreReservedVirtualAllocWrapper
 */
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
 // TODO: this should be on runtime process
 uint PreReservedSectionAllocWrapper::numPreReservedSegment = 0;
 #endif
@@ -691,7 +691,7 @@ PreReservedSectionAllocWrapper::~PreReservedSectionAllocWrapper()
         CloseSectionHandle(this->section);
         PreReservedHeapTrace(_u("MEM_RELEASE the PreReservedSegment. Start Address: 0x%p, Size: 0x%x * 0x%x bytes"), this->preReservedStartAddress, PreReservedAllocationSegmentCount,
             AutoSystemInfo::Data.GetAllocationGranularityPageSize());
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
         Assert(numPreReservedSegment > 0);
         InterlockedDecrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
 #endif

+ 5 - 5
lib/Common/Memory/SectionAllocWrapper.h

@@ -147,13 +147,13 @@ private:
 class PreReservedSectionAllocWrapper
 {
 public:
-#if _M_IX86_OR_ARM32
+#if TARGET_32
     static const uint PreReservedAllocationSegmentCount = 256; // (256 * 64K) == 16 MB, if 64k is the AllocationGranularity
-#else // _M_X64_OR_ARM64
+#else // TARGET_64
     static const uint PreReservedAllocationSegmentCount = 4096; //(4096 * 64K) == 256MB, if 64k is the AllocationGranularity
 #endif
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static const unsigned MaxPreReserveSegment = 6;
 #endif
 
@@ -171,7 +171,7 @@ public:
 
     LPVOID      GetPreReservedEndAddress();
     static LPVOID GetPreReservedEndAddress(void * regionStart);
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static int  NumPreReservedSegment() { return numPreReservedSegment; }
 #endif
 
@@ -189,7 +189,7 @@ private:
     BVStatic<PreReservedAllocationSegmentCount>     freeSegments;
     LPVOID                                          preReservedStartAddress;
     CriticalSection                                 cs;
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static uint  numPreReservedSegment;
 #endif
 

+ 4 - 4
lib/Common/Memory/VirtualAllocWrapper.cpp

@@ -87,7 +87,7 @@ BOOL VirtualAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType
 /*
 * class PreReservedVirtualAllocWrapper
 */
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
 uint PreReservedVirtualAllocWrapper::numPreReservedSegment = 0;
 #endif
 
@@ -110,7 +110,7 @@ PreReservedVirtualAllocWrapper::~PreReservedVirtualAllocWrapper()
             // OOP JIT TODO: check if we need to cleanup the context related to this content process
         }
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
         Assert(numPreReservedSegment > 0);
         InterlockedDecrement(&PreReservedVirtualAllocWrapper::numPreReservedSegment);
 #endif
@@ -220,7 +220,7 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
 
 #if defined(_CONTROL_FLOW_GUARD)
     bool supportPreReservedRegion = true;
-#if !_M_X64_OR_ARM64
+#if !TARGET_64
 #if _M_IX86
     // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
 
@@ -244,7 +244,7 @@ LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegionInternal()
         PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), preReservedStartAddress);
         preReservedStartAddress = startAddress;
 
-#if !_M_X64_OR_ARM64
+#if !TARGET_64
         if (startAddress)
         {
             InterlockedIncrement(&PreReservedVirtualAllocWrapper::numPreReservedSegment);

+ 5 - 5
lib/Common/Memory/VirtualAllocWrapper.h

@@ -42,13 +42,13 @@ private:
 class PreReservedVirtualAllocWrapper
 {
 public:
-#if _M_IX86_OR_ARM32
+#if TARGET_32
     static const uint PreReservedAllocationSegmentCount = 256; // (256 * 64K) == 16 MB, if 64k is the AllocationGranularity
-#else // _M_X64_OR_ARM64
+#else // TARGET_64
     static const uint PreReservedAllocationSegmentCount = 4096; //(4096 * 64K) == 256MB, if 64k is the AllocationGranularity
 #endif
 
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static const unsigned MaxPreReserveSegment = 6;
 #endif
 public:
@@ -65,7 +65,7 @@ public:
 
     LPVOID      GetPreReservedEndAddress();
     static LPVOID GetPreReservedEndAddress(void * regionStart);
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static int  NumPreReservedSegment() { return numPreReservedSegment; }
 #endif
 
@@ -83,7 +83,7 @@ private:
     BVStatic<PreReservedAllocationSegmentCount>     freeSegments;
     LPVOID                                          preReservedStartAddress;
     CriticalSection                                 cs;
-#if !_M_X64_OR_ARM64 && _CONTROL_FLOW_GUARD
+#if !TARGET_64 && _CONTROL_FLOW_GUARD
     static uint  numPreReservedSegment;
 #endif
 };

+ 4 - 4
lib/JITIDL/JITTypes.h

@@ -20,7 +20,7 @@ cpp_quote("#define USE_RPC_HANDLE_MARSHALLING 1")
 #define USE_RPC_HANDLE_MARSHALLING 1
 #endif
 
-#if defined(_M_IX86) || defined(_M_ARM)
+#if defined(TARGET_32)
 #ifdef __midl
 #define CHAKRA_WB_PTR int
 #else
@@ -28,7 +28,7 @@ cpp_quote("#define USE_RPC_HANDLE_MARSHALLING 1")
 #endif
 #define CHAKRA_PTR int
 #define BV_SHIFT 5
-#elif defined(_M_X64) || defined(_M_ARM64)
+#elif defined(TARGET_64)
 #ifdef __midl
 #define CHAKRA_WB_PTR __int64
 #else
@@ -48,13 +48,13 @@ cpp_quote("#define USE_RPC_HANDLE_MARSHALLING 1")
 #define IDL_PAD2(num) IDL_Field(short) struct_pad_##num;
 #define IDL_PAD4(num) IDL_Field(int) struct_pad_##num;
 
-#if defined(_M_X64) || defined(_M_ARM64)
+#if defined(TARGET_64)
 #define X64_PAD4(num) IDL_Field(int) struct_pad_##num;
 #else
 #define X64_PAD4(num)
 #endif
 
-#if defined(_M_IX86) || defined(_M_ARM)
+#if defined(TARGET_32)
 #define X86_PAD4(num) IDL_Field(int) struct_pad_##num;
 #else
 #define X86_PAD4(num)

+ 1 - 1
lib/JITServer/JITServer.cpp

@@ -267,7 +267,7 @@ ServerInitializeThreadContext(
         {
             *prereservedRegionAddr = (intptr_t)contextInfo->GetPreReservedSectionAllocator()->EnsurePreReservedRegion();
         }
-#if _M_IX86 || _M_X64_OR_ARM64
+#if !defined(_M_ARM)
         *jitThunkAddr = (intptr_t)contextInfo->GetJITThunkEmitter()->EnsureInitialized();
 #endif
 #endif

+ 3 - 3
lib/Runtime/Base/Constants.h

@@ -4,7 +4,7 @@
 //-------------------------------------------------------------------------------------------------------
 #pragma once
 
-#ifdef  _M_X64_OR_ARM64
+#ifdef  TARGET_64
 # define WIN64_STACK_FACTOR 3
 #else
 # define WIN64_STACK_FACTOR 1
@@ -113,10 +113,10 @@ namespace Js
         static const unsigned MinStackDefault = 1 * 0x0400 * WIN64_STACK_FACTOR;
         static const unsigned ExtraStack = 2 * 0x0400 * WIN64_STACK_FACTOR;
 
-#if _M_IX86_OR_ARM32
+#if TARGET_32
         static const unsigned MaxThreadJITCodeHeapSize = 28 * 1024 * 1024;
         static const unsigned MaxProcessJITCodeHeapSize = 55 * 1024 * 1024;
-#elif _M_X64_OR_ARM64
+#elif TARGET_64
         // larger address space means we can keep this higher on 64 bit architectures
         static const unsigned MaxThreadJITCodeHeapSize = 800 * 1024 * 1024;
         static const unsigned MaxProcessJITCodeHeapSize = 1024 * 1024 * 1024;

+ 2 - 2
lib/Runtime/Base/FunctionBody.cpp

@@ -3435,7 +3435,7 @@ namespace Js
 #if ENABLE_NATIVE_CODEGEN
         JavascriptMethod originalEntryPoint = this->GetOriginalEntryPoint_Unchecked();
         return
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
             (
 #if ENABLE_OOP_NATIVE_CODEGEN
             JITManager::GetJITManager()->IsOOPJITEnabled()
@@ -8834,7 +8834,7 @@ namespace Js
         {
             // Unregister xdataInfo before OnCleanup() which may release xdataInfo->address
 #if ENABLE_NATIVE_CODEGEN
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
             if (this->xdataInfo != nullptr)
             {
                 XDataAllocator::Unregister(this->xdataInfo);

+ 1 - 1
lib/Runtime/Base/FunctionBody.h

@@ -3703,7 +3703,7 @@ namespace Js
         Field(bool) strictMode;
         Field(uint16) length;
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         Field(uint32) unused;
 #endif
         Field(void*) scopes[];

+ 1 - 1
lib/Runtime/Base/ScriptContext.cpp

@@ -4837,7 +4837,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
         }
 
         bool allowPrereserveAlloc = true;
-#if !_M_X64_OR_ARM64
+#if !TARGET_64
         if (this->webWorkerId != Js::Constants::NonWebWorkerContextId)
         {
             allowPrereserveAlloc = false;

+ 2 - 2
lib/Runtime/Base/ThreadContext.cpp

@@ -172,7 +172,7 @@ ThreadContext::ThreadContext(AllocationPolicyManager * allocationPolicyManager,
     thunkPageAllocators(allocationPolicyManager, /* allocXData */ false, /* virtualAllocator */ nullptr, GetCurrentProcess()),
 #endif
     codePageAllocators(allocationPolicyManager, ALLOC_XDATA, GetPreReservedVirtualAllocator(), GetCurrentProcess()),
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     jitThunkEmitter(this, &VirtualAllocWrapper::Instance , GetCurrentProcess()),
 #endif
 #endif
@@ -793,7 +793,7 @@ Recycler* ThreadContext::EnsureRecycler()
         try
         {
 #ifdef RECYCLER_WRITE_BARRIER
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
             if (!RecyclerWriteBarrierManager::OnThreadInit())
             {
                 Js::Throw::OutOfMemory();

+ 2 - 2
lib/Runtime/Base/ThreadContext.h

@@ -717,7 +717,7 @@ private:
     CustomHeap::InProcCodePageAllocators thunkPageAllocators;
 #endif
     CustomHeap::InProcCodePageAllocators codePageAllocators;
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     InProcJITThunkEmitter jitThunkEmitter;
 #endif
 #endif
@@ -880,7 +880,7 @@ public:
 #endif
     CustomHeap::InProcCodePageAllocators * GetCodePageAllocators() { return &codePageAllocators; }
 
-#if defined(_CONTROL_FLOW_GUARD) && (_M_IX86 || _M_X64_OR_ARM64)
+#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
     InProcJITThunkEmitter * GetJITThunkEmitter() { return &jitThunkEmitter; }
 #endif
 #endif // ENABLE_NATIVE_CODEGEN

+ 1 - 1
lib/Runtime/ByteCode/ByteCodeWriter.cpp

@@ -2515,7 +2515,7 @@ StoreCommon:
 
     ByteCodeLabel ByteCodeWriter::DefineLabel()
     {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         if (m_labelOffsets->Count() == INT_MAX)
         {
             // Reach our limit

+ 3 - 3
lib/Runtime/Language/InlineCache.h

@@ -11,7 +11,7 @@
 #define TypeHasAuxSlotTag(_t) \
     (!!(reinterpret_cast<size_t>(_t) & InlineCacheAuxSlotTypeTag))
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
 #define PolymorphicInlineCacheShift 5 // On 32 bit architectures, the least 5 significant bits of a DynamicTypePointer is 0
 #else
 #define PolymorphicInlineCacheShift 6 // On 64 bit architectures, the least 6 significant bits of a DynamicTypePointer is 0
@@ -353,7 +353,7 @@ namespace Js
 #endif
     };
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
     CompileAssert(sizeof(InlineCache) == 0x10);
 #else
     CompileAssert(sizeof(InlineCache) == 0x20);
@@ -683,7 +683,7 @@ namespace Js
         }
     };
 
-#if defined(_M_IX86_OR_ARM32)
+#if defined(TARGET_32)
     CompileAssert(sizeof(IsInstInlineCache) == 0x10);
 #else
     CompileAssert(sizeof(IsInstInlineCache) == 0x20);

+ 3 - 3
lib/Runtime/Library/CompoundString.cpp

@@ -731,7 +731,7 @@ namespace Js
         Assert(packedSubstringInfoRef);
         Assert(packedSubstringInfo2Ref);
 
-    #if defined(_M_X64_OR_ARM64)
+    #if defined(TARGET_64)
         // On 64-bit architectures, two nonnegative 32-bit ints fit completely in a tagged pointer
         *packedSubstringInfoRef =
             reinterpret_cast<void *>(
@@ -790,7 +790,7 @@ namespace Js
         const uintptr_t packedSubstringInfo = reinterpret_cast<uintptr_t>(pointer);
         Assert(packedSubstringInfo & 1);
 
-    #if defined(_M_X64_OR_ARM64)
+    #if defined(TARGET_64)
         // On 64-bit architectures, two nonnegative 32-bit ints fit completely in a tagged pointer
         Assert(!pointer2);
         *startIndexRef = static_cast<CharCount>(packedSubstringInfo >> 32);
@@ -1114,7 +1114,7 @@ namespace Js
                 Assert(pointerIndex != 0);
                 void *pointer2 = blockPointers[--pointerIndex];
                 JavascriptString *s;
-    #if defined(_M_X64_OR_ARM64)
+    #if defined(TARGET_64)
                 Assert(!IsPackedInfo(pointer2));
     #else
                 if(IsPackedInfo(pointer2))

+ 1 - 1
lib/Runtime/Library/ConcatString.h

@@ -228,7 +228,7 @@ namespace Js
     };
 
     // Make sure the padding doesn't add tot he size of ConcatStringWrapping
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     CompileAssert(sizeof(ConcatStringWrapping<_u('"'), _u('"')>) == 64);
 #else
     CompileAssert(sizeof(ConcatStringWrapping<_u('"'), _u('"')>) == 32);

+ 2 - 2
lib/Runtime/Library/JavascriptArray.cpp

@@ -27,7 +27,7 @@ namespace Js
         { 5, 0, 0 },    // allocate space for 5 elements for array of length 4,5
         { 8, 0, 0 },    // allocate space for 8 elements for array of length 6,7,8
     };
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
     const Var JavascriptArray::MissingItem = (Var)0x8000000280000002;
     uint JavascriptNativeIntArray::allocationBuckets[][AllocationBucketsInfoSize] =
     {
@@ -6883,7 +6883,7 @@ Case0:
             // -    FloatArray for AMD64
             // We convert the entire array back and forth once here O(n), rather than doing the costly conversion down the call stack which is O(nlogn)
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
             if(compFn && JavascriptNativeFloatArray::Is(arr))
             {
                 arr = JavascriptNativeFloatArray::ConvertToVarArray((JavascriptNativeFloatArray*)arr);

+ 1 - 1
lib/Runtime/Library/JavascriptArray.h

@@ -135,7 +135,7 @@ namespace Js
         static const uint8 MissingElementsCountIndex = 1;
         // 2nd column in allocationBuckets that stores allocation size for given bucket
         static const uint8 AllocationSizeIndex = 2;
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         static const uint8 AllocationBucketsCount = 3;
 #else
         static const uint8 AllocationBucketsCount = 2;

+ 1 - 1
lib/Runtime/Library/JavascriptArray.inl

@@ -1795,7 +1795,7 @@ SECOND_PASS:
             sizeof(T) + sizeof(SparseArraySegment<typename T::TElement>) + InlinePropertySlots * sizeof(Var);
         size_t totalSize = UInt32Math::MulAdd<sizeof(typename T::TElement), objectSize>(inlineElementSlots);
 
-    #if defined(_M_X64_OR_ARM64)
+    #if defined(TARGET_64)
         // On x64, the total size won't be anywhere near AllocSizeMath::MaxMemory on x64, so no need to check
         totalSize = HeapInfo::GetAlignedSizeNoCheck(totalSize);
     #else

+ 1 - 1
lib/Runtime/Library/JavascriptLibrary.cpp

@@ -3642,7 +3642,7 @@ namespace Js
         defaultPropertyDescriptor.SetEnumerable(false);
         defaultPropertyDescriptor.SetConfigurable(false);
 
-#if !defined(_M_X64_OR_ARM64)
+#if !defined(TARGET_64)
 
         VirtualTableRecorder<Js::JavascriptNumber>::RecordVirtualTableAddress(vtableAddresses, VTableValue::VtableJavascriptNumber);
 #else

+ 1 - 1
lib/Runtime/Library/JavascriptString.cpp

@@ -256,7 +256,7 @@ namespace Js
     {
         size_t cchActual = wcslen(content);
 
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
         if (!IsValidCharCount(cchActual))
         {
             // Limit javascript string to 31-bit length

+ 1 - 1
lib/Runtime/Library/MathLibrary.cpp

@@ -85,7 +85,7 @@ namespace Js
 
             if (TaggedInt::Is(arg))
             {
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
                 __int64 result = ::_abs64(TaggedInt::ToInt32(arg));
 #else
                 __int32 result = ::abs(TaggedInt::ToInt32(arg));

+ 2 - 2
lib/Runtime/Types/ArrayObject.h

@@ -6,7 +6,7 @@
 
 namespace Js
 {
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 // This base class has a 4-byte length field. Change struct pack to 4 on 64bit to avoid 4 padding bytes here.
 #pragma pack(push, 4)
 #endif
@@ -51,7 +51,7 @@ namespace Js
         virtual JavascriptEnumerator * GetIndexEnumerator(EnumeratorFlags flags, ScriptContext* requestContext) = 0;
     };
 
-#ifdef _M_X64_OR_ARM64
+#ifdef TARGET_64
 #pragma pack(pop)
 #endif
 

+ 2 - 2
lib/Runtime/Types/TypePath.h

@@ -63,13 +63,13 @@ public:
     public:
         // This is the space between the end of the TypePath and the allocation granularity that can be used for assignments too.
 #ifdef SUPPORT_FIXED_FIELDS_ON_PATH_TYPES
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 #define TYPE_PATH_ALLOC_GRANULARITY_GAP 0
 #else
 #define TYPE_PATH_ALLOC_GRANULARITY_GAP 2
 #endif
 #else
-#if defined(_M_X64_OR_ARM64)
+#if defined(TARGET_64)
 #define TYPE_PATH_ALLOC_GRANULARITY_GAP 1
 #else
 #define TYPE_PATH_ALLOC_GRANULARITY_GAP 3