Browse Source

[MERGE #193] Type consistency: Use uintptr_t / intptr_t instead of uintptr / intptr

Merge pull request #193 from obastemur:fix_ptr_t
Fixes https://github.com/Microsoft/ChakraCore/issues/168

Replaces all the instances of `intptr`/`uintptr` to `intptr_t`/`uintptr_t`
Jianchun Xu 10 years ago
parent
commit
62b6debbd4

+ 2 - 2
lib/Backend/Lower.cpp

@@ -18248,7 +18248,7 @@ Lowerer::GenerateFunctionTypeFromFixedFunctionObject(IR::Instr *insertInstrPt, I
         IR::AddrOpnd* functionObjAddrOpnd = functionObjOpnd->AsAddrOpnd();
         // functionTypeRegOpnd = MOV [fixed function address + type offset]
         functionObjAddrOpnd->m_address;
-        functionTypeOpnd = IR::MemRefOpnd::New((void *)((intptr)functionObjAddrOpnd->m_address + Js::RecyclableObject::GetOffsetOfType()), TyMachPtr, this->m_func,
+        functionTypeOpnd = IR::MemRefOpnd::New((void *)((intptr_t)functionObjAddrOpnd->m_address + Js::RecyclableObject::GetOffsetOfType()), TyMachPtr, this->m_func,
             IR::AddrOpndKindDynamicObjectTypeRef);
     }
     else
@@ -22026,7 +22026,7 @@ Lowerer::LowerLdEnv(IR::Instr * instr)
     {
         Assert(functionObjOpnd->IsAddrOpnd());
         IR::AddrOpnd* functionObjAddrOpnd = functionObjOpnd->AsAddrOpnd();
-        IR::MemRefOpnd* functionEnvMemRefOpnd = IR::MemRefOpnd::New((void *)((intptr)functionObjAddrOpnd->m_address + Js::ScriptFunction::GetOffsetOfEnvironment()),
+        IR::MemRefOpnd* functionEnvMemRefOpnd = IR::MemRefOpnd::New((void *)((intptr_t)functionObjAddrOpnd->m_address + Js::ScriptFunction::GetOffsetOfEnvironment()),
             TyMachPtr, this->m_func, IR::AddrOpndKindDynamicFunctionEnvironmentRef);
         instr->SetSrc1(functionEnvMemRefOpnd);
     }

+ 2 - 2
lib/Backend/Opnd.cpp

@@ -3255,10 +3255,10 @@ Opnd::GetAddrDescription(__out_ecount(count) wchar_t *const description, const s
         case IR::AddrOpndKindDynamicObjectTypeRef:
             DumpAddress(address, printToConsole, skipMaskedAddress);
             {
-                Js::RecyclableObject * dynamicObject = (Js::RecyclableObject *)((intptr)address - Js::RecyclableObject::GetOffsetOfType());
+                Js::RecyclableObject * dynamicObject = (Js::RecyclableObject *)((intptr_t)address - Js::RecyclableObject::GetOffsetOfType());
                 if (Js::JavascriptFunction::Is(dynamicObject))
                 {
-                    DumpFunctionInfo(&buffer, &n, Js::JavascriptFunction::FromVar((void *)((intptr)address - Js::RecyclableObject::GetOffsetOfType()))->GetFunctionInfo(),
+                    DumpFunctionInfo(&buffer, &n, Js::JavascriptFunction::FromVar((void *)((intptr_t)address - Js::RecyclableObject::GetOffsetOfType()))->GetFunctionInfo(),
                         printToConsole, L"FunctionObjectTypeRef");
                 }
                 else

+ 1 - 1
lib/Backend/amd64/LinearScanMD.cpp

@@ -103,7 +103,7 @@ void
 LinearScanMD::LegalizeConstantUse(IR::Instr * instr, IR::Opnd * opnd)
 {
     Assert(opnd->IsAddrOpnd() || opnd->IsIntConstOpnd());
-    intptr value = opnd->IsAddrOpnd() ? (intptr)opnd->AsAddrOpnd()->m_address : opnd->AsIntConstOpnd()->GetValue();
+    intptr_t value = opnd->IsAddrOpnd() ? (intptr_t)opnd->AsAddrOpnd()->m_address : opnd->AsIntConstOpnd()->GetValue();
     if (value == 0
         && instr->m_opcode == Js::OpCode::MOV
         && !instr->GetDst()->IsRegOpnd()

+ 2 - 2
lib/Backend/i386/LowererMDArch.cpp

@@ -922,7 +922,7 @@ LowererMDArch::LowerAsmJsLdElemHelper(IR::Instr * instr, bool isSimdLoad /*= fal
         Js::Var* module = (Js::Var*)m_func->m_workItem->GetEntryPoint()->GetModuleAddress();
         Js::ArrayBuffer* arrayBuffer = *(Js::ArrayBuffer**)(module + Js::AsmJsModuleMemory::MemoryTableBeginOffset);
         Assert(arrayBuffer);
-        src1->AsIndirOpnd()->SetOffset((uintptr)arrayBuffer->GetBuffer(), true);
+        src1->AsIndirOpnd()->SetOffset((uintptr_t)arrayBuffer->GetBuffer(), true);
     }
 
     if (isSimdLoad)
@@ -998,7 +998,7 @@ LowererMDArch::LowerAsmJsStElemHelper(IR::Instr * instr, bool isSimdStore /*= fa
         Js::Var* module = (Js::Var*)m_func->m_workItem->GetEntryPoint()->GetModuleAddress();
         Js::ArrayBuffer* arrayBuffer = *(Js::ArrayBuffer**)(module + Js::AsmJsModuleMemory::MemoryTableBeginOffset);
         Assert(arrayBuffer);
-        dst->AsIndirOpnd()->SetOffset((uintptr)arrayBuffer->GetBuffer(), true);
+        dst->AsIndirOpnd()->SetOffset((uintptr_t)arrayBuffer->GetBuffer(), true);
     }
     return doneLabel;
 }

+ 3 - 3
lib/Runtime/Base/FunctionBody.h

@@ -447,7 +447,7 @@ namespace Js
         void * nativeAddress;
         ptrdiff_t codeSize;
         bool isAsmJsFunction; // true if entrypoint is for asmjs function
-        uintptr  mModuleAddress; //asm Module address
+        uintptr_t  mModuleAddress; //asm Module address
 
 #ifdef FIELD_ACCESS_STATS
         FieldAccessStatsPtr fieldAccessStats;
@@ -658,14 +658,14 @@ namespace Js
 #endif
 
 #ifndef TEMP_DISABLE_ASMJS
-        void SetModuleAddress(uintptr moduleAddress)
+        void SetModuleAddress(uintptr_t moduleAddress)
         {
             Assert(this->GetIsAsmJSFunction());
             Assert(moduleAddress);
             mModuleAddress = moduleAddress;
         }
 
-        uintptr GetModuleAddress()const
+        uintptr_t GetModuleAddress()const
         {
             Assert(this->GetIsAsmJSFunction());
             Assert(mModuleAddress); // module address should not be null

+ 4 - 4
lib/Runtime/Language/InterpreterStackFrame.cpp

@@ -2832,12 +2832,12 @@ namespace Js
         uint homingAreaSize = 0;
 #endif
 
-        uintptr argAddress = (uintptr)m_inParams;
+        uintptr_t argAddress = (uintptr_t)m_inParams;
         for (ArgSlot i = 0; i < argCount; i++)
         {
 #if _M_X64
             // 3rd Argument should be at the end of the homing area.
-            Assert(i != 3 || argAddress == (uintptr)m_inParams + homingAreaSize);
+            Assert(i != 3 || argAddress == (uintptr_t)m_inParams + homingAreaSize);
             if (i < 3)
             {
                 // for x64 we spill the first 3 floating point args below the rest of the arguments on the stack
@@ -2857,7 +2857,7 @@ namespace Js
                 // IAT xmm1 spill <- floatSpillAddress for arg1
 
                 // floats are spilled as xmmwords
-                uintptr floatSpillAddress = (uintptr)m_inParams - MachPtr * (15 - 2*i);
+                uintptr_t floatSpillAddress = (uintptr_t)m_inParams - MachPtr * (15 - 2*i);
 
                 if (info->GetArgType(i).isInt())
                 {
@@ -2889,7 +2889,7 @@ namespace Js
                     // If we have simd arguments, the homing area in m_inParams can be larger than 3 64-bit slots. This is because SIMD values are unboxed there too.
                     // After unboxing, the homing area is overwritten by rdx, r8 and r9, and we read/skip 64-bit slots from the homing area (argAddress += MachPtr).
                     // After the last argument of the 3 is read, we need to advance argAddress to skip over the possible extra space and to the start of the rest of the arguments.
-                    argAddress = (uintptr)m_inParams + homingAreaSize;
+                    argAddress = (uintptr_t)m_inParams + homingAreaSize;
                 }
                 else
                 {

+ 2 - 2
lib/Runtime/Language/JavascriptNativeOperators.cpp

@@ -8,12 +8,12 @@
 namespace Js
 {
 #if ENABLE_NATIVE_CODEGEN
-    void * JavascriptNativeOperators::Op_SwitchStringLookUp(JavascriptString* str, Js::BranchDictionaryWrapper<JavascriptString*>* branchTargets, uintptr funcStart, uintptr funcEnd)
+    void * JavascriptNativeOperators::Op_SwitchStringLookUp(JavascriptString* str, Js::BranchDictionaryWrapper<JavascriptString*>* branchTargets, uintptr_t funcStart, uintptr_t funcEnd)
     {
         void* defaultTarget = branchTargets->defaultTarget;
         Js::BranchDictionaryWrapper<JavascriptString*>::BranchDictionary& stringDictionary = branchTargets->dictionary;
         void* target = stringDictionary.Lookup(str, defaultTarget);
-        uintptr utarget = (uintptr)target;
+        uintptr_t utarget = (uintptr_t)target;
 
         if ((utarget - funcStart) > (funcEnd - funcStart))
         {

+ 1 - 1
lib/Runtime/Language/JavascriptNativeOperators.h

@@ -29,7 +29,7 @@ namespace Js
     class JavascriptNativeOperators
     {
     public:
-        static void * Op_SwitchStringLookUp(JavascriptString* str, Js::BranchDictionaryWrapper<Js::JavascriptString*>* stringDictionary, uintptr funcStart, uintptr funcEnd);
+        static void * Op_SwitchStringLookUp(JavascriptString* str, Js::BranchDictionaryWrapper<Js::JavascriptString*>* stringDictionary, uintptr_t funcStart, uintptr_t funcEnd);
     };
 #endif
 };

+ 6 - 6
lib/Runtime/Language/TaggedInt.inl

@@ -29,10 +29,10 @@ namespace Js
 #if INT32VAR
     __inline bool TaggedInt::Is(Var aValue)
     {
-        bool result = (((uintptr) aValue) >> VarTag_Shift) == AtomTag;
+        bool result = (((uintptr_t) aValue) >> VarTag_Shift) == AtomTag;
         if(result)
         {
-            Assert((uintptr)aValue >> 32 == (AtomTag << 16));
+            Assert((uintptr_t)aValue >> 32 == (AtomTag << 16));
         }
         return result;
     }
@@ -86,18 +86,18 @@ namespace Js
     {
         //
         // To convert to an var we first cast to uint32 to lose the signedness and then
-        // extend it to a 64-bit uintptr before OR'ing the 64-bit atom tag.
+        // extend it to a 64-bit uintptr_t before OR'ing the 64-bit atom tag.
         //
 
         AssertMsg(!IsOverflow(nValue), "Ensure no information loss from conversion");
 
-        return reinterpret_cast<Var>(((uintptr)(uint32)nValue) | AtomTag_IntPtr);
+        return reinterpret_cast<Var>(((uintptr_t)(uint32)nValue) | AtomTag_IntPtr);
     }
 
 #else
     __inline bool TaggedInt::Is(const Var aValue)
     {
-        return (((uintptr) aValue) & AtomTag) == AtomTag_IntPtr;
+        return (((uintptr_t) aValue) & AtomTag) == AtomTag_IntPtr;
     }
 
     __inline bool TaggedInt::IsPair(Var aLeft, Var aRight)
@@ -110,7 +110,7 @@ namespace Js
         //   short-circuit evaluation.
         //
 
-        return (((uintptr) aLeft) & ((uintptr) aRight) & AtomTag) == AtomTag_IntPtr;
+        return (((uintptr_t) aLeft) & ((uintptr_t) aRight) & AtomTag) == AtomTag_IntPtr;
     }
 
     __inline int32 TaggedInt::ToInt32(Var aValue)

+ 1 - 1
lib/Runtime/Library/JavascriptFunction.cpp

@@ -2004,7 +2004,7 @@ LABEL1:
         {
             // some extra checks for asm.js because we have slightly more information that we can validate
             Js::EntryPointInfo* entryPointInfo = (Js::EntryPointInfo*)funcBody->GetDefaultEntryPointInfo();
-            uintptr moduleMemory = entryPointInfo->GetModuleAddress();
+            uintptr_t moduleMemory = entryPointInfo->GetModuleAddress();
             if (!moduleMemory)
             {
                 return EXCEPTION_CONTINUE_SEARCH;

+ 1 - 1
lib/Runtime/Library/JavascriptLibrary.h

@@ -980,7 +980,7 @@ namespace Js
         CharStringCache& GetCharStringCache() { return charStringCache;  }
         static JavascriptLibrary * FromCharStringCache(CharStringCache * cache)
         {
-            return (JavascriptLibrary *)((uintptr)cache - offsetof(JavascriptLibrary, charStringCache));
+            return (JavascriptLibrary *)((uintptr_t)cache - offsetof(JavascriptLibrary, charStringCache));
         }
 
         bool GetArrayObjectHasUserDefinedSpecies() const { return arrayObjectHasUserDefinedSpecies; }

+ 2 - 2
lib/Runtime/Library/JavascriptWeakMap.h

@@ -49,8 +49,8 @@ namespace Js
         WeakMapKeyMap* GetWeakMapKeyMapFromKey(DynamicObject* key) const;
         WeakMapKeyMap* AddWeakMapKeyMapToKey(DynamicObject* key);
 
-        WeakMapId GetWeakMapId() const { return (void*)(((uintptr)this) | 1); }
-        static JavascriptWeakMap* GetWeakMapFromId(WeakMapId id) { return reinterpret_cast<JavascriptWeakMap*>((uintptr)id & (~1)); }
+        WeakMapId GetWeakMapId() const { return (void*)(((uintptr_t)this) | 1); }
+        static JavascriptWeakMap* GetWeakMapFromId(WeakMapId id) { return reinterpret_cast<JavascriptWeakMap*>((uintptr_t)id & (~1)); }
 
         bool KeyMapGet(WeakMapKeyMap* map, Var* value) const;
 

+ 3 - 3
lib/Runtime/Library/PropertyString.h

@@ -16,7 +16,7 @@ namespace Js
                 uint16 preventdataSlotIndexFalseRef;
                 uint16 dataSlotIndex;
             };
-            intptr ptrSlot1;
+            intptr_t ptrSlot1;
         };
         union
         {
@@ -26,9 +26,9 @@ namespace Js
                 bool isInlineSlot;
                 bool isStoreFieldEnabled;
             };
-            intptr ptrSlot2;
+            intptr_t ptrSlot2;
         };
-        intptr blank;
+        intptr_t blank;
     };
 
     CompileAssert(sizeof(PropertyCache) == sizeof(InlineCacheAllocator::CacheLayout));

+ 13 - 13
lib/Runtime/RuntimeCommon.h

@@ -128,26 +128,26 @@ namespace Js
     typedef Var(*ExternalMethod)(RecyclableObject*, CallInfo, Var*);
 
 
-    const uintptr AtomTag_Object    = 0x0;
+    const uintptr_t AtomTag_Object    = 0x0;
 
 #if INT32VAR
     // The 49th bit is set in this representation
-    const int32 VarTag_Shift        = 48;
-    const uintptr AtomTag_IntPtr    = (((uintptr)0x1i64) << VarTag_Shift);
-    const int32 AtomTag_Int32       = 0x0;     // lower 32-bits of a tagged integer
-    const uintptr AtomTag           = 0x1;
-    const int32 AtomTag_Multiply    = 1;
-    const int32 AtomTag_Pair        = 0x00010001;  // Pair of tags
+    const int32 VarTag_Shift          = 48;
+    const uintptr_t AtomTag_IntPtr    = (((uintptr_t)0x1i64) << VarTag_Shift);
+    const int32 AtomTag_Int32         = 0x0;     // lower 32-bits of a tagged integer
+    const uintptr_t AtomTag           = 0x1;
+    const int32 AtomTag_Multiply      = 1;
+    const int32 AtomTag_Pair          = 0x00010001;  // Pair of tags
 #else
-    const uintptr AtomTag_IntPtr     = 0x1;
-    const int32 AtomTag_Int32        = 0x1;    // lower 32-bits of a tagged integer
-    const uintptr AtomTag            = 0x1;
-    const int32 VarTag_Shift         = 1;
-    const int32 AtomTag_Multiply     = 1 << VarTag_Shift;
+    const uintptr_t AtomTag_IntPtr    = 0x1;
+    const int32 AtomTag_Int32         = 0x1;    // lower 32-bits of a tagged integer
+    const uintptr_t AtomTag           = 0x1;
+    const int32 VarTag_Shift          = 1;
+    const int32 AtomTag_Multiply      = 1 << VarTag_Shift;
 #endif
 
 #if FLOATVAR
-    const uint64 FloatTag_Value      = 0xFFFCull << 48;
+    const uint64 FloatTag_Value       = 0xFFFCull << 48;
 #endif
     template <bool IsPrototypeTemplate> class NullTypeHandler;
 

+ 2 - 2
lib/Runtime/Types/RecyclableObject.inl

@@ -12,14 +12,14 @@ namespace Js
     {
         AssertMsg(aValue != nullptr, "RecyclableObject::Is aValue is null");
 
-        return (((uintptr)aValue) >> VarTag_Shift) == 0;
+        return (((uintptr_t)aValue) >> VarTag_Shift) == 0;
     }
 #else
     inline bool RecyclableObject::Is(Var aValue)
     {
         AssertMsg(aValue != nullptr, "RecyclableObject::Is aValue is null");
 
-        return (((uintptr)aValue) & AtomTag) == AtomTag_Object;
+        return (((uintptr_t)aValue) & AtomTag) == AtomTag_Object;
     }
 #endif
 

+ 3 - 3
lib/common/BackEndAPI.h

@@ -32,9 +32,9 @@ class StackSym;
 class Func;
 struct InlinedFrameLayout;
 
-typedef intptr IntConstType;
-typedef uintptr  UIntConstType;
-typedef IntMath<intptr>::Type IntConstMath;
+typedef intptr_t IntConstType;
+typedef uintptr_t  UIntConstType;
+typedef IntMath<intptr_t>::Type IntConstMath;
 typedef double  FloatConstType;
 
 #include "EmitBuffer.h"

+ 8 - 8
lib/common/Memory/ArenaAllocator.cpp

@@ -955,7 +955,7 @@ void * InlineCacheFreeListPolicy::Allocate(void * policy, size_t size)
 
     if (NULL != freeObject)
     {
-        freeObjectLists[index] = reinterpret_cast<FreeObject *>(reinterpret_cast<intptr>(freeObject->next) & ~InlineCacheFreeListTag);
+        freeObjectLists[index] = reinterpret_cast<FreeObject *>(reinterpret_cast<intptr_t>(freeObject->next) & ~InlineCacheFreeListTag);
 
 #ifdef ARENA_MEMORY_VERIFY
         // Make sure the next pointer bytes are also DbgFreeMemFill-ed, before we give them out.
@@ -974,7 +974,7 @@ void * InlineCacheFreeListPolicy::Free(void * policy, void * object, size_t size
     FreeObject * freeObject = reinterpret_cast<FreeObject *>(object);
     size_t index = (size >> InlineCacheAllocatorInfo::ObjectAlignmentBitShift) - 1;
 
-    freeObject->next = reinterpret_cast<FreeObject *>(reinterpret_cast<intptr>(freeObjectLists[index]) | InlineCacheFreeListTag);
+    freeObject->next = reinterpret_cast<FreeObject *>(reinterpret_cast<intptr_t>(freeObjectLists[index]) | InlineCacheFreeListTag);
     freeObjectLists[index] = freeObject;
     return policy;
 }
@@ -1193,16 +1193,16 @@ bool InlineCacheAllocator::IsDeadWeakRef(Recycler* recycler, void* ptr)
 
 bool InlineCacheAllocator::CacheHasDeadWeakRefs(Recycler* recycler, CacheLayout* cache)
 {
-    for (intptr* curWeakRefPtr = cache->weakRefs; curWeakRefPtr < &cache->strongRef; curWeakRefPtr++)
+    for (intptr_t* curWeakRefPtr = cache->weakRefs; curWeakRefPtr < &cache->strongRef; curWeakRefPtr++)
     {
-        intptr curWeakRef = *curWeakRefPtr;
+        intptr_t curWeakRef = *curWeakRefPtr;
 
         if (curWeakRef == 0)
         {
             continue;
         }
 
-        curWeakRef &= ~(intptr)InlineCacheAuxSlotTypeTag;
+        curWeakRef &= ~(intptr_t)InlineCacheAuxSlotTypeTag;
 
         if ((curWeakRef & (HeapConstants::ObjectGranularity - 1)) != 0)
         {
@@ -1273,16 +1273,16 @@ bool InlineCacheAllocator::HasNoDeadWeakRefs(Recycler* recycler)
 
 void InlineCacheAllocator::ClearCacheIfHasDeadWeakRefs(Recycler* recycler, CacheLayout* cache)
 {
-    for (intptr* curWeakRefPtr = cache->weakRefs; curWeakRefPtr < &cache->strongRef; curWeakRefPtr++)
+    for (intptr_t* curWeakRefPtr = cache->weakRefs; curWeakRefPtr < &cache->strongRef; curWeakRefPtr++)
     {
-        intptr curWeakRef = *curWeakRefPtr;
+        intptr_t curWeakRef = *curWeakRefPtr;
 
         if (curWeakRef == 0)
         {
             continue;
         }
 
-        curWeakRef &= ~(intptr)InlineCacheAuxSlotTypeTag;
+        curWeakRef &= ~(intptr_t)InlineCacheAuxSlotTypeTag;
 
         if ((curWeakRef & (HeapConstants::ObjectGranularity - 1)) != 0)
         {

+ 6 - 6
lib/common/Memory/ArenaAllocator.h

@@ -557,13 +557,13 @@ class InlineCacheAllocatorInfo
 public:
     struct CacheLayout
     {
-        intptr weakRefs[3];
-        intptr strongRef;
+        intptr_t weakRefs[3];
+        intptr_t strongRef;
     };
 
     struct FreeObject
     {
-        intptr blankSlots[3];
+        intptr_t blankSlots[3];
         FreeObject * next;
     };
 
@@ -692,8 +692,8 @@ class InlineCacheAllocator : public ArenaAllocatorBase<InlineCacheAllocatorTrait
 public:
     struct CacheLayout
     {
-        intptr weakRefs[3];
-        intptr strongRef;
+        intptr_t weakRefs[3];
+        intptr_t strongRef;
     };
 
 #ifdef POLY_INLINE_CACHE_SIZE_STATS
@@ -740,7 +740,7 @@ class IsInstInlineCacheAllocatorInfo
 public:
     struct CacheLayout
     {
-        char bytes[4 * sizeof(intptr)];
+        char bytes[4 * sizeof(intptr_t)];
     };
 
 #if _M_X64 || _M_ARM64

+ 6 - 6
lib/common/Memory/LargeHeapBlock.cpp

@@ -30,7 +30,7 @@ LargeObjectHeader::CalculateCheckSum(LargeObjectHeader* decodedNext, unsigned ch
 LargeObjectHeader*
 LargeObjectHeader::EncodeNext(uint cookie, LargeObjectHeader* next)
 {
-    return (LargeObjectHeader *)((uintptr)next ^ cookie);
+    return (LargeObjectHeader *)((uintptr_t)next ^ cookie);
 }
 
 ushort
@@ -1289,10 +1289,10 @@ LargeHeapBlock::TrimObject(Recycler* recycler, LargeObjectHeader* header, size_t
         char* objectAddress = (char*) header;
         char* objectEndAddress = objectAddress + sizeof(LargeObjectHeader) + header->objectSize;
 
-        uintptr alignmentMask = ~((uintptr) (AutoSystemInfo::PageSize - 1));
+        uintptr_t alignmentMask = ~((uintptr_t) (AutoSystemInfo::PageSize - 1));
 
-        uintptr objectFreeAddress = (uintptr) objectAddress;
-        uintptr objectFreeEndAddress = ((uintptr) objectEndAddress) & alignmentMask;
+        uintptr_t objectFreeAddress = (uintptr_t) objectAddress;
+        uintptr_t objectFreeEndAddress = ((uintptr_t) objectEndAddress) & alignmentMask;
 
         size_t bytesToFree = (objectFreeEndAddress - objectFreeAddress);
 
@@ -1304,9 +1304,9 @@ LargeHeapBlock::TrimObject(Recycler* recycler, LargeObjectHeader* header, size_t
         // The exception is if the original object's size + header size is a multiple of the page size
         Assert(objectAddress == this->address);
         Assert(header->objectIndex == 0);
-        Assert(objectFreeEndAddress <= (uintptr) objectEndAddress);
+        Assert(objectFreeEndAddress <= (uintptr_t) objectEndAddress);
         Assert(objectFreeAddress <= objectFreeEndAddress);
-        Assert(bytesToFree < sizeOfObject + sizeof(LargeObjectHeader) || (uintptr) objectEndAddress == objectFreeEndAddress);
+        Assert(bytesToFree < sizeOfObject + sizeof(LargeObjectHeader) || (uintptr_t) objectEndAddress == objectFreeEndAddress);
 
         // If we actually have something to free, release those pages
         // Move the heap block to start from the new start address

+ 2 - 2
lib/common/common/MathUtil.h

@@ -18,7 +18,7 @@ public:
     template <typename T>
     static T PointerCastToIntegralTruncate(void * pointer)
     {
-        return (T)(uintptr)pointer;
+        return (T)(uintptr_t)pointer;
     }
 
     // Explicit cast to integral. Assert that it doesn't truncate.  Avoids warning C4302 'type cast': truncation
@@ -26,7 +26,7 @@ public:
     static T PointerCastToIntegral(void * pointer)
     {
         T value = PointerCastToIntegralTruncate<T>(pointer);
-        Assert((uintptr)value == (uintptr)pointer);
+        Assert((uintptr_t)value == (uintptr_t)pointer);
         return value;
     }
 

+ 0 - 8
lib/common/core/CommonTypedefs.h

@@ -19,14 +19,6 @@ typedef unsigned __int16 uint16;
 typedef unsigned __int32 uint32;
 typedef unsigned __int64 uint64;
 
-#if defined (_WIN64)
-typedef __int64 intptr;
-typedef unsigned __int64 uintptr;
-#else
-typedef __int32 intptr;
-typedef unsigned __int32 uintptr;
-#endif
-
 // charcount_t represents a count of characters in a JavascriptString
 // It is unsigned and the maximum value is (INT_MAX-1)
 typedef uint32 charcount_t;