Browse Source

linux build pass (built with bash on windows)
can't run yes because of assertion on _cardTable

Lei Shi 9 years ago
parent
commit
800c5c6116

+ 1 - 0
lib/Common/Memory/CMakeLists.txt

@@ -22,6 +22,7 @@ add_library (Chakra.Common.Memory OBJECT
     MarkContext.cpp
     MemoryLogger.cpp
     MemoryTracking.cpp
+    MemUtils.cpp
     PageAllocator.cpp
     Recycler.cpp
     RecyclerHeuristic.cpp

+ 1 - 1
lib/Common/Memory/MemUtils.cpp

@@ -47,7 +47,7 @@ Memory::ChakraMemCopy(__bcount(sizeInBytes) void *dst, size_t sizeInBytes, __in_
     {
         if (!WriteProcessMemory(processHandle, dst, src, count, NULL))
         {
-            wprintf(L"FATAL ERROR: WriteProcessMemory failed, GLE: %d\n", GetLastError());
+            Output::Print(_u("FATAL ERROR: WriteProcessMemory failed, GLE: %d\n"), GetLastError());
             Js::Throw::FatalInternalError();
         }
     }

+ 4 - 4
lib/Common/Memory/PageAllocator.cpp

@@ -190,7 +190,7 @@ PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, bool commi
 
 template<typename T>
 PageSegmentBase<T>::PageSegmentBase(PageAllocatorBase<T> * allocator, void* address, uint pageCount, uint committedCount) :
-    SegmentBase(allocator, allocator->maxAllocPageCount), decommitPageCount(0), freePageCount(0)
+    SegmentBase<T>(allocator, allocator->maxAllocPageCount), decommitPageCount(0), freePageCount(0)
 {
     this->address = (char*)address;
     this->segmentPageCount = pageCount;
@@ -345,7 +345,7 @@ PageSegmentBase<TVirtualAlloc>::AllocDecommitPages(uint pageCount, T freePages,
                 }
             }
 
-            void * ret = GetAllocator()->GetVirtualAllocator()->Alloc(pages, pageCount * AutoSystemInfo::PageSize, MEM_COMMIT, PAGE_READWRITE, this->IsInCustomHeapAllocator(), this->allocator->processHandle);
+            void * ret = this->GetAllocator()->GetVirtualAllocator()->Alloc(pages, pageCount * AutoSystemInfo::PageSize, MEM_COMMIT, PAGE_READWRITE, this->IsInCustomHeapAllocator(), this->allocator->processHandle);
             if (ret != nullptr)
             {
                 Assert(ret == pages);
@@ -463,7 +463,7 @@ PageSegmentBase<T>::DecommitPages(__in void * address, uint pageCount)
     if (!onlyUpdateState)
     {
 #pragma warning(suppress: 6250)
-        GetAllocator()->GetVirtualAllocator()->Free(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT, this->allocator->processHandle);
+        this->GetAllocator()->GetVirtualAllocator()->Free(address, pageCount * AutoSystemInfo::PageSize, MEM_DECOMMIT, this->allocator->processHandle);
     }
 
     Assert(decommitPageCount == (uint)this->GetCountOfDecommitPages());
@@ -484,7 +484,7 @@ PageSegmentBase<T>::DecommitFreePages(size_t pageToDecommit)
             this->ClearBitInFreePagesBitVector(i);
             this->SetBitInDecommitPagesBitVector(i);
 #pragma warning(suppress: 6250)
-            GetAllocator()->GetVirtualAllocator()->Free(currentAddress, AutoSystemInfo::PageSize, MEM_DECOMMIT, this->allocator->processHandle);
+            this->GetAllocator()->GetVirtualAllocator()->Free(currentAddress, AutoSystemInfo::PageSize, MEM_DECOMMIT, this->allocator->processHandle);
             decommitCount++;
         }
         currentAddress += AutoSystemInfo::PageSize;

+ 55 - 49
lib/JITIDL/JITTypes.h

@@ -19,15 +19,21 @@ import "wtypes.idl";
 #endif
 
 #if defined(_M_X64) && defined(__midl)
-#define IDL_PAD1(num) byte struct_pad_##num
-#define IDL_PAD2(num) short struct_pad_##num
-#define IDL_PAD4(num) int struct_pad_##num
+#define IDL_PAD1(num) byte struct_pad_##num;
+#define IDL_PAD2(num) short struct_pad_##num;
+#define IDL_PAD4(num) int struct_pad_##num;
 #else
 #define IDL_PAD1(num)
 #define IDL_PAD2(num)
 #define IDL_PAD4(num)
 #endif
 
+#ifndef __midl
+#ifndef _MSC_VER
+typedef unsigned char boolean;
+#endif
+#endif
+
 // TODO: OOP JIT, how do we make this better?
 const int VTABLE_COUNT = 47;
 const int EQUIVALENT_TYPE_CACHE_SIZE_IDL = 8;
@@ -39,8 +45,8 @@ typedef struct TypeHandlerIDL
 
     unsigned short inlineSlotCapacity;
     unsigned short offsetOfInlineSlots;
-    IDL_PAD2(0);
-    IDL_PAD4(1);
+    IDL_PAD2(0)
+    IDL_PAD4(1)
     int slotCapacity;
 } TypeHandlerIDL;
 
@@ -48,7 +54,7 @@ typedef struct TypeIDL
 {
     unsigned char flags;
     boolean isShared;
-    IDL_PAD2(0);
+    IDL_PAD2(0)
     int typeId;
 
     CHAKRA_PTR libAddr;
@@ -63,9 +69,9 @@ typedef struct TypeIDL
 typedef struct EquivalentTypeSetIDL
 {
     boolean sortedAndDuplicatesRemoved;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
     unsigned short count;
-    IDL_PAD4(1);
+    IDL_PAD4(1)
     IDL_DEF([size_is(count)]) TypeIDL ** types;
 } EquivalentTypeSetIDL;
 
@@ -90,10 +96,10 @@ typedef struct JITTimeConstructorCacheIDL
 
     short inlineSlotCount;
 
-    IDL_PAD2(0);
+    IDL_PAD2(0)
     int slotCount;
 
-    IDL_PAD4(1);
+    IDL_PAD4(1)
     TypeIDL type;
 
     CHAKRA_PTR runtimeCacheAddr;
@@ -104,13 +110,13 @@ typedef struct JITTimeConstructorCacheIDL
 typedef struct ObjTypeSpecFldIDL
 {
     boolean inUse;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
     // TODO: OOP JIT we may want to copy some of the data in these pointers
     unsigned short flags;
     unsigned short slotIndex;
     unsigned short fixedFieldCount;
     unsigned short fixedFieldInfoArraySize; // 1 (when fixedFieldCount is 0) or fixedFieldCount
-    IDL_PAD2(1);
+    IDL_PAD2(1)
     int propertyId;
     int typeId;
     unsigned int id;
@@ -125,10 +131,10 @@ typedef struct ObjTypeSpecFldIDL
 typedef struct PinnedTypeRefsIDL
 {
     boolean isOOPJIT;// REVIEW: remove this
-    IDL_PAD1(0);
-    IDL_PAD2(1);
+    IDL_PAD1(0)
+    IDL_PAD2(1)
     unsigned int count;
-    IDL_DEF([size_is(count)]) CHAKRA_PTR typeRefs[*];
+    IDL_DEF([size_is(count)]) CHAKRA_PTR typeRefs[IDL_DEF(*)];
 
 } PinnedTypeRefsIDL;
 
@@ -140,8 +146,8 @@ typedef struct BVUnitIDL
 typedef struct BVFixedIDL
 {
     unsigned int len;
-    IDL_PAD4(0);
-    IDL_DEF([size_is(((len - 1) >> BV_SHIFT) + 1)]) BVUnitIDL data[*];
+    IDL_PAD4(0)
+    IDL_DEF([size_is(((len - 1) >> BV_SHIFT) + 1)]) BVUnitIDL data[IDL_DEF(*)];
 } BVFixedIDL;
 
 typedef struct CallSiteIDL
@@ -157,7 +163,7 @@ typedef struct ThisIDL
 {
     unsigned short valueType;
     byte thisType;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
 } ThisIDL;
 
 typedef struct FldIDL
@@ -170,11 +176,11 @@ typedef struct FldIDL
 typedef struct ArrayCallSiteIDL
 {
     byte bits;
-    IDL_PAD1(0);
-    IDL_PAD2(1);
+    IDL_PAD1(0)
+    IDL_PAD2(1)
     unsigned int functionNumber;
     unsigned short callSiteNumber;
-    IDL_PAD2(2);
+    IDL_PAD2(2)
 } ArrayCallSiteIDL;
 
 typedef struct LdElemIDL
@@ -193,7 +199,7 @@ typedef struct StElemIDL
 typedef struct ProfileDataIDL
 {
     byte implicitCallFlags;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
 
     ThisIDL thisData;
 
@@ -247,9 +253,9 @@ typedef struct ThreadContextDataIDL
 {
     boolean isThreadBound;
 
-    IDL_PAD1(0);
-    IDL_PAD2(1);
-    IDL_PAD4(2);
+    IDL_PAD1(0)
+    IDL_PAD2(1)
+    IDL_PAD4(2)
     CHAKRA_PTR processHandle;
     CHAKRA_PTR chakraBaseAddress;
     CHAKRA_PTR crtBaseAddress;
@@ -272,7 +278,7 @@ typedef struct ScriptContextDataIDL
     boolean isRecyclerVerifyEnabled;
     boolean recyclerAllowNativeCodeBumpAllocation;
     boolean isSIMDEnabled;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
     unsigned int recyclerVerifyPad;
     CHAKRA_PTR vtableAddresses[VTABLE_COUNT];
 
@@ -311,7 +317,7 @@ typedef struct SmallSpanSequenceIDL
     int baseValue;
     unsigned int statementLength;
     IDL_DEF([size_is(statementLength)]) unsigned int * statementBuffer;
-    IDL_PAD4(1);
+    IDL_PAD4(1)
     unsigned int actualOffsetLength; // REVIEW: are lengths the same?
     IDL_DEF([size_is(actualOffsetLength)]) unsigned int * actualOffsetList;
 } SmallSpanSequenceIDL;
@@ -320,7 +326,7 @@ typedef struct JITLoopHeaderIDL
 {
     boolean isNested;
     boolean isInTry;
-    IDL_PAD2(0);
+    IDL_PAD2(0)
     unsigned int interpretCount;
     unsigned int startOffset;
     unsigned int endOffset;
@@ -332,7 +338,7 @@ typedef struct AsmJsDataIDL
     boolean usesHeapBuffer;
     unsigned short argByteSize;
     unsigned short argCount;
-    IDL_PAD2(0);
+    IDL_PAD2(0)
     int retType;
     int intConstCount;
     int doubleConstCount;
@@ -362,15 +368,15 @@ typedef struct PropertyRecordIDL
     boolean isNumeric;
     boolean isBound;
     boolean isSymbol;
-    IDL_PAD1(0);
+    IDL_PAD1(0)
     unsigned int byteCount;
-    IDL_DEF([size_is(byteCount + sizeof(wchar_t) + (isNumeric ? sizeof(unsigned int) : 0))]) byte buffer[*];
+    IDL_DEF([size_is(byteCount + sizeof(wchar_t) + (isNumeric ? sizeof(unsigned int) : 0))]) byte buffer[IDL_DEF(*)];
 } PropertyRecordIDL;
 
 typedef struct FunctionJITRuntimeIDL
 {
     unsigned int clonedCacheCount;
-    IDL_PAD4(0);
+    IDL_PAD4(0)
     IDL_DEF([size_is(clonedCacheCount)]) CHAKRA_PTR * clonedInlineCaches;
 } FunctionJITRuntimeIDL;
 
@@ -381,7 +387,7 @@ typedef struct PropertyIdArrayIDL
     boolean hadDuplicates;
     boolean has__proto__;
     boolean hasNonSimpleParams;
-    IDL_DEF([size_is(count + extraSlotCount)]) int elements[*];
+    IDL_DEF([size_is(count + extraSlotCount)]) int elements[IDL_DEF(*)];
 } PropertyIdArrayIDL;
 
 
@@ -414,7 +420,7 @@ typedef struct RecyclableObjectIDL
 typedef struct ConstTableContentIDL
 {
     unsigned int count;
-    IDL_PAD4(0);
+    IDL_PAD4(0)
     IDL_DEF([size_is(count)]) RecyclableObjectIDL** content;
 } ConstTableContentIDL;
 
@@ -450,7 +456,7 @@ typedef struct FunctionBodyDataIDL
     unsigned short argUsedForBranch;
     unsigned short profiledCallSiteCount;
 
-    IDL_PAD2(0);
+    IDL_PAD2(0)
 
     unsigned int funcNumber;
     unsigned int sourceContextId;
@@ -485,7 +491,7 @@ typedef struct FunctionBodyDataIDL
     unsigned int auxDataCount;
     unsigned int auxContextDataCount;
 
-    IDL_PAD4(1);
+    IDL_PAD4(1)
 
     SmallSpanSequenceIDL statementMap;
 
@@ -533,7 +539,7 @@ typedef struct FunctionJITTimeDataIDL
 {
     boolean isAggressiveInliningEnabled;
     boolean isInlined;
-    IDL_PAD2(0);
+    IDL_PAD2(0)
     unsigned int localFuncId;
     FunctionBodyDataIDL * bodyData; // TODO: oop jit, can these repeat, should we share?
 
@@ -552,7 +558,7 @@ typedef struct FunctionJITTimeDataIDL
 
     IDL_DEF([size_is(ldFldInlineeCount)]) struct FunctionJITTimeDataIDL ** ldFldInlinees;
 
-    IDL_PAD4(1);
+    IDL_PAD4(1)
     unsigned int objTypeSpecFldInfoCount;
     IDL_DEF([size_is(objTypeSpecFldInfoCount)]) ObjTypeSpecFldIDL * objTypeSpecFldInfoArray;
 
@@ -581,8 +587,8 @@ typedef struct XProcNumberPageSegment
 typedef struct PolymorphicInlineCacheIDL
 {
     unsigned short size;
-    IDL_PAD2(0);
-    IDL_PAD4(1);
+    IDL_PAD2(0)
+    IDL_PAD4(1)
     CHAKRA_PTR addr;
     CHAKRA_PTR inlineCachesAddr;
 } PolymorphicInlineCacheIDL;
@@ -636,15 +642,15 @@ typedef struct NativeDataFixupRecord
     unsigned int index;
     unsigned int length;
     unsigned int startOffset;
-    IDL_PAD4(0);
+    IDL_PAD4(0)
     struct NativeDataFixupEntry* updateList;
 } NativeDataFixupRecord;
 
 typedef struct NativeDataFixupTable
 {
     unsigned int count;
-    IDL_PAD4(0);
-    IDL_DEF([size_is(count)]) NativeDataFixupRecord fixupRecords[*];
+    IDL_PAD4(0)
+    IDL_DEF([size_is(count)]) NativeDataFixupRecord fixupRecords[IDL_DEF(*)];
 } NativeDataFixupTable;
 
 
@@ -674,8 +680,8 @@ typedef struct EquivalentTypeGuardIDL
 typedef struct EquivalentTypeGuardOffsets
 {
     unsigned int count;
-    IDL_PAD4(0);
-    IDL_DEF([size_is(count)]) EquivalentTypeGuardIDL guards[*];
+    IDL_PAD4(0)
+    IDL_DEF([size_is(count)]) EquivalentTypeGuardIDL guards[IDL_DEF(*)];
 
 } EquivalentTypeGuardOffsets;
 
@@ -684,21 +690,21 @@ typedef struct TypeGuardTransferEntryIDL
     unsigned int propId;
     unsigned int guardsCount;
     struct TypeGuardTransferEntryIDL* next;
-    IDL_DEF([size_is(guardsCount)]) int guardOffsets[*];
+    IDL_DEF([size_is(guardsCount)]) int guardOffsets[IDL_DEF(*)];
 } TypeGuardTransferEntryIDL;
 
 typedef struct  CtorCacheTransferEntryIDL
 {
     unsigned int propId;
     unsigned int cacheCount;
-    IDL_DEF([size_is(cacheCount)]) CHAKRA_PTR caches[*];
+    IDL_DEF([size_is(cacheCount)]) CHAKRA_PTR caches[IDL_DEF(*)];
 }  CtorCacheTransferEntryIDL;
 
 typedef struct NativeDataBuffer
 {
     unsigned int len;
     unsigned int unused;
-    IDL_DEF([size_is(len)]) byte data[*];
+    IDL_DEF([size_is(len)]) byte data[IDL_DEF(*)];
 } NativeDataBuffer;
 
 // Fields that JIT modifies
@@ -716,7 +722,7 @@ typedef struct JITOutputIDL
 
     boolean hasJittedStackClosure;
 
-    IDL_PAD1(0);
+    IDL_PAD1(0)
 
     unsigned short pdataCount;
     unsigned short xdataSize;

+ 1 - 0
lib/Jsrt/CMakeLists.txt

@@ -39,6 +39,7 @@ add_subdirectory(Core)
   
 target_include_directories (
     Chakra.Jsrt PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
+    ../JITIDL
     ../Runtime
     ../Runtime/Base
     ../Runtime/Debug

+ 1 - 0
lib/Jsrt/Core/CMakeLists.txt

@@ -4,6 +4,7 @@ add_library (Chakra.Jsrt.Core OBJECT
 
 target_include_directories (
     Chakra.Jsrt.Core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
+    ../../JITIDL
     ../../Runtime
     ../../Parser
     ../

+ 1 - 0
lib/Parser/CMakeLists.txt

@@ -28,6 +28,7 @@ add_library (Chakra.Parser OBJECT
 
 target_include_directories (
     Chakra.Parser PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
+    ../JITIDL
     ../Common
     ../Backend
     ../Runtime

+ 1 - 0
lib/Runtime/Base/CMakeLists.txt

@@ -24,6 +24,7 @@ add_library (Chakra.Runtime.Base OBJECT
     TempArenaAllocatorObject.cpp
     TestEtwEventSink.cpp
     ThreadBoundThreadContextManager.cpp
+    ThreadContextInfo.cpp
     ThreadContext.cpp
     ThreadContextTlsEntry.cpp
     ThreadServiceWrapperBase.cpp

+ 2 - 1
lib/Runtime/Base/FunctionBody.cpp

@@ -9110,7 +9110,6 @@ namespace Js
             {
                 this->constructorCaches->Clear();
             }
-#endif
 
 #if defined(_M_X64) || defined(_M_ARM32_OR_ARM64)
             if (this->xdataInfo != nullptr)
@@ -9120,6 +9119,8 @@ namespace Js
                 this->xdataInfo = nullptr;
             }
 #endif
+#endif
+
             // This is how we set the CleanedUp state
             this->workItem = nullptr;
             this->nativeAddress = nullptr;

+ 12 - 2
lib/Runtime/Base/FunctionBody.h

@@ -175,10 +175,12 @@ namespace Js
         void SetHasFixedValue() { this->hasFixedValue = true; }
         bool HasFixedValue() const { return this->hasFixedValue; }
 
+#if ENABLE_NATIVE_CODEGEN
         void Fixup(NativeCodeData::DataChunk* chunkList)
         {
             Assert(false); // not implemented yet
         }
+#endif
     };
 
     class JitEquivalentTypeGuard : public JitIndexedPropertyGuard
@@ -214,10 +216,12 @@ namespace Js
             this->cache = cache;
         }
 
+#if ENABLE_NATIVE_CODEGEN
         void Fixup(NativeCodeData::DataChunk* chunkList)
         {
             // cache will be recycler allocated pointer
         }
+#endif
     };
 
 #pragma region Inline Cache Info class declarations
@@ -409,7 +413,9 @@ namespace Js
             // swap the cache associated with each guard from the heap to the recycler (so the types in the cache are kept alive).
             JitEquivalentTypeGuard** equivalentTypeGuards;
             Js::PropertyId* lazyBailoutProperties;
+#if ENABLE_NATIVE_CODEGEN
             NativeCodeData* jitTransferRawData;
+#endif
             EquivalentTypeGuardOffsets* equivalentTypeGuardOffsets;
             TypeGuardTransferData typeGuardTransferData;
             CtorCacheTransferData ctorCacheTransferData;
@@ -425,7 +431,9 @@ namespace Js
                 equivalentTypeGuardCount(0), equivalentTypeGuards(nullptr), jitTransferRawData(nullptr),
                 falseReferencePreventionBit(true), isReady(false), lazyBailoutProperties(nullptr), lazyBailoutPropertyCount(0){}
 
+#if ENABLE_NATIVE_CODEGEN
             void SetRawData(NativeCodeData* rawData) { jitTransferRawData = rawData; }
+#endif
             void AddJitTimeTypeRef(void* typeRef, Recycler* recycler);
 
             int GetRuntimeTypeRefCount() { return this->runtimeTypeRefs ? this->runtimeTypeRefs->count : 0; }
@@ -463,9 +471,10 @@ namespace Js
         private:
             void EnsureJitTimeTypeRefs(Recycler* recycler);
         };
-
+#if ENABLE_NATIVE_CODEGEN
         NativeCodeData * inProcJITNaticeCodedata;
         char* nativeDataBuffer;
+#endif
         CodeGenNumberChunk * numberChunks;
 
         SmallSpanSequence *nativeThrowSpanSequence;
@@ -525,11 +534,12 @@ namespace Js
         };
         uint frameHeight;
 
+#if ENABLE_NATIVE_CODEGEN
         char** GetNativeDataBufferRef() { return &nativeDataBuffer; }
         char* GetNativeDataBuffer() { return nativeDataBuffer; }
         void SetInProcJITNativeCodeData(NativeCodeData* nativeCodeData) { inProcJITNaticeCodedata = nativeCodeData; }
-
         void SetNumberChunks(CodeGenNumberChunk * chunks) { numberChunks = chunks; }
+#endif
 
     private:
 #if ENABLE_NATIVE_CODEGEN

+ 9 - 0
lib/Runtime/Base/ScriptContext.cpp

@@ -326,7 +326,10 @@ namespace Js
         intConstPropsOnGlobalObject = Anew(GeneralAllocator(), PropIdSetForConstProp, GeneralAllocator());
         intConstPropsOnGlobalUserObject = Anew(GeneralAllocator(), PropIdSetForConstProp, GeneralAllocator());
 
+#if ENABLE_NATIVE_CODEGEN
         m_domFastPathHelperMap = HeapNew(JITDOMFastPathHelperMap, &HeapAllocator::Instance, 17);
+#endif
+
         this->debugContext = HeapNew(DebugContext, this);
     }
 
@@ -378,6 +381,7 @@ namespace Js
         // Take etw rundown lock on this thread context. We are going to change/destroy this scriptContext.
         AutoCriticalSection autocs(GetThreadContext()->GetEtwRundownCriticalSection());
 
+#if ENABLE_NATIVE_CODEGEN
         if (m_domFastPathHelperMap != nullptr)
         {
             HeapDelete(m_domFastPathHelperMap);
@@ -387,6 +391,7 @@ namespace Js
             Assert(JITManager::GetJITManager()->IsOOPJITEnabled());
             JITManager::GetJITManager()->CleanupScriptContext(m_remoteScriptContextAddr);
         }
+#endif
 
         // TODO: Can we move this on Close()?
         ClearHostScriptContext();
@@ -4412,6 +4417,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
         }
     }
 
+#if ENABLE_NATIVE_CODEGEN
     void ScriptContext::InitializeRemoteScriptContext()
     {
         Assert(JITManager::GetJITManager()->IsOOPJITEnabled());
@@ -4465,6 +4471,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
         }
         JITManager::GetJITManager()->InitializeScriptContext(&contextData, &m_remoteScriptContextAddr);
     }
+#endif
 
     intptr_t ScriptContext::GetNullAddr() const
     {
@@ -4621,6 +4628,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
         return (intptr_t)this;
     }
 
+#if ENABLE_NATIVE_CODEGEN
     void ScriptContext::AddToDOMFastPathHelperMap(intptr_t funcInfoAddr, IR::JnHelperMethod helper)
     {
         m_domFastPathHelperMap->Add(funcInfoAddr, helper);
@@ -4637,6 +4645,7 @@ void ScriptContext::RegisterPrototypeChainEnsuredToHaveOnlyWritableDataPropertie
         Assert(found);
         return helper;
     }
+#endif
 
     intptr_t ScriptContext::GetVTableAddress(VTableValue vtableType) const
     {

+ 6 - 0
lib/Runtime/Base/ScriptContext.h

@@ -442,7 +442,9 @@ namespace Js
             return isInvalidatedForHostObjects;
         }
 
+#if ENABLE_NATIVE_CODEGEN
         void InitializeRemoteScriptContext();
+#endif
 
 #ifdef ENABLE_JS_ETW
         void EmitStackTraceEvent(__in UINT64 operationID, __in USHORT maxFrameCount, bool emitV2AsyncStackEvent);
@@ -1686,8 +1688,10 @@ private:
         virtual bool IsPRNGSeeded() const override;
         virtual intptr_t GetBuiltinFunctionsBaseAddr() const override;
 
+#if ENABLE_NATIVE_CODEGEN
         virtual void AddToDOMFastPathHelperMap(intptr_t funcInfoAddr, IR::JnHelperMethod helper) override;
         virtual IR::JnHelperMethod GetDOMFastPathHelper(intptr_t funcInfoAddr) override;
+#endif
 
         virtual intptr_t GetAddr() const override;
 
@@ -1709,7 +1713,9 @@ private:
     private:
         BuiltInLibraryFunctionMap* builtInLibraryFunctions;
 
+#if ENABLE_NATIVE_CODEGEN
         JITDOMFastPathHelperMap * m_domFastPathHelperMap;
+#endif
 
 #ifdef RECYCLER_PERF_COUNTERS
         size_t bindReferenceCount;

+ 4 - 2
lib/Runtime/Base/ScriptContextInfo.h

@@ -46,12 +46,14 @@ public:
     virtual bool IsRecyclerVerifyEnabled() const = 0;
     virtual uint GetRecyclerVerifyPad() const = 0;
 
+    virtual Js::Var* GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex) = 0;
+
+#if ENABLE_NATIVE_CODEGEN
     virtual void AddToDOMFastPathHelperMap(intptr_t funcInfoAddr, IR::JnHelperMethod helper) = 0;
     virtual IR::JnHelperMethod GetDOMFastPathHelper(intptr_t funcInfoAddr) = 0;
 
-    virtual Js::Var* GetModuleExportSlotArrayAddress(uint moduleIndex, uint slotIndex) = 0;
-
     typedef JsUtil::BaseDictionary<intptr_t, IR::JnHelperMethod, HeapAllocator, PowerOf2SizePolicy,
         DefaultComparer, JsUtil::SimpleDictionaryEntry, JsUtil::AsymetricResizeLock> JITDOMFastPathHelperMap;
+#endif
 
 };

+ 10 - 0
lib/Runtime/Base/ThreadContext.cpp

@@ -319,11 +319,13 @@ ThreadContext::GetThreadStackLimitAddr() const
     return (intptr_t)GetAddressOfStackLimitForCurrentThread();
 }
 
+#if ENABLE_NATIVE_CODEGEN
 intptr_t
 ThreadContext::GetSimdTempAreaAddr(uint8 tempIndex) const
 {
     return (intptr_t)&X86_TEMP_SIMD[tempIndex];
 }
+#endif
 
 intptr_t 
 ThreadContext::GetDisableImplicitFlagsAddr() const
@@ -1091,11 +1093,14 @@ ThreadContext::AddPropertyRecordInternal(const Js::PropertyRecord * propertyReco
     // Add to the map
     m_propertyMap->Add(propertyRecord);
 
+#if ENABLE_NATIVE_CODEGEN
     // add to OOP JIT process if the context has already been initialized
     if (JITManager::GetJITManager()->IsOOPJITEnabled() && m_remoteThreadContextInfo)
     {
         JITManager::GetJITManager()->AddPropertyRecord(m_remoteThreadContextInfo, (PropertyRecordIDL*)propertyRecord);
     }
+#endif
+
     PropertyRecordTrace(_u("Added property '%s' at 0x%08x, pid = %d\n"), propertyName, propertyRecord, propertyId);
 
     // Do not store the pid for symbols in the direct property name table.
@@ -1910,6 +1915,7 @@ ThreadContext::IsInAsyncHostOperation() const
 }
 #endif
 
+#if ENABLE_NATIVE_CODEGEN
 void
 ThreadContext::SetJITConnectionInfo(HANDLE processHandle, void* serverSecurityDescriptor, UUID connectionId)
 {
@@ -1957,6 +1963,8 @@ ThreadContext::SetJITConnectionInfo(HANDLE processHandle, void* serverSecurityDe
         });
     }
 }
+#endif
+
 #if ENABLE_TTD
 
 bool ThreadContext::IsTTDInitialized() const
@@ -2177,10 +2185,12 @@ void ThreadContext::SetWellKnownHostTypeId(WellKnownHostType wellKnownType, Js::
     if (WellKnownHostType_HTMLAllCollection == wellKnownType)
     {
         this->wellKnownHostTypeHTMLAllCollectionTypeId = typeId;
+#if ENABLE_NATIVE_CODEGEN
         if (this->m_remoteThreadContextInfo != 0)
         {
             JITManager::GetJITManager()->SetWellKnownHostTypeId(this->m_remoteThreadContextInfo, (int)typeId);
         }
+#endif
     }
 }
 

+ 8 - 1
lib/Runtime/Base/ThreadContext.h

@@ -534,13 +534,17 @@ private:
     intptr_t m_remoteThreadContextInfo;
     intptr_t m_prereservedRegionAddr;
 
+#if ENABLE_NATIVE_CODEGEN
 public:
+
     void SetJITConnectionInfo(HANDLE processHandle, void* serverSecurityDescriptor, UUID connectionId);
 
     intptr_t GetRemoteThreadContextAddr() const
     {
         return m_remoteThreadContextInfo;
     }
+#endif
+
 private:
     typedef JsUtil::BaseDictionary<uint, Js::SourceDynamicProfileManager*, Recycler, PowerOf2SizePolicy> SourceDynamicProfileManagerMap;
     typedef JsUtil::BaseDictionary<const char16*, const Js::PropertyRecord*, Recycler, PowerOf2SizePolicy> SymbolRegistrationMap;
@@ -1057,11 +1061,12 @@ public:
 
     void ShutdownThreads()
     {
+#if ENABLE_NATIVE_CODEGEN
         if (JITManager::GetJITManager()->IsOOPJITEnabled())
         {
             JITManager::GetJITManager()->CleanupThreadContext(m_remoteThreadContextInfo);
         }
-#if ENABLE_NATIVE_CODEGEN
+
         if (jobProcessor)
         {
             jobProcessor->Close();
@@ -1279,7 +1284,9 @@ public:
 
     virtual intptr_t GetThreadStackLimitAddr() const override;
 
+#if ENABLE_NATIVE_CODEGEN
     virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const override;
+#endif
 
     virtual intptr_t GetDisableImplicitFlagsAddr() const override;
     virtual intptr_t GetImplicitCallFlagsAddr() const override;

+ 6 - 2
lib/Runtime/Base/ThreadContextInfo.cpp

@@ -12,6 +12,7 @@ ThreadContextInfo::ThreadContextInfo() :
 {
 }
 
+#if ENABLE_NATIVE_CODEGEN
 intptr_t
 ThreadContextInfo::GetNullFrameDisplayAddr() const
 {
@@ -287,6 +288,7 @@ ThreadContextInfo::GetStringMatchNameAddr() const
 {
     return SHIFT_ADDR(this, Js::Constants::StringMatch);
 }
+#endif
 
 bool
 ThreadContextInfo::IsAllJITCodeInPreReservedRegion() const
@@ -331,13 +333,13 @@ ThreadContextInfo::IsCFGEnabled()
 void
 ThreadContextInfo::BeginJIT()
 {
-    InterlockedExchangeAdd(&m_activeJITCount, 1);
+    InterlockedExchangeAdd(&m_activeJITCount, (uint)1);
 }
 
 void
 ThreadContextInfo::EndJIT()
 {
-    InterlockedExchangeSubtract(&m_activeJITCount, 1);
+    InterlockedExchangeSubtract(&m_activeJITCount, (uint)1);
 }
 
 bool
@@ -346,6 +348,7 @@ ThreadContextInfo::IsJITActive()
     return m_activeJITCount != 0;
 }
 
+#if ENABLE_NATIVE_CODEGEN
 intptr_t SHIFT_ADDR(const ThreadContextInfo*const context, intptr_t address)
 {
     Assert(AutoSystemInfo::Data.IsJscriptModulePointer((void*)address));
@@ -361,3 +364,4 @@ intptr_t SHIFT_CRT_ADDR(const ThreadContextInfo*const context, intptr_t address)
     }
     return (intptr_t)address + context->GetCRTBaseAddressDifference();
 }
+#endif

+ 10 - 3
lib/Runtime/Base/ThreadContextInfo.h

@@ -11,6 +11,7 @@ class ThreadContextInfo
 public:
     ThreadContextInfo();
 
+#if ENABLE_NATIVE_CODEGEN
     intptr_t GetNullFrameDisplayAddr() const;
     intptr_t GetStrictNullFrameDisplayAddr() const;
 
@@ -61,6 +62,7 @@ public:
 
     intptr_t GetStringReplaceNameAddr() const;
     intptr_t GetStringMatchNameAddr() const;
+#endif
 
     void ResetIsAllJITCodeInPreReservedRegion();
     bool IsAllJITCodeInPreReservedRegion() const;
@@ -73,11 +75,14 @@ public:
 
     virtual intptr_t GetThreadStackLimitAddr() const = 0;
 
-    virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const = 0;
-
     virtual intptr_t GetDisableImplicitFlagsAddr() const = 0;
     virtual intptr_t GetImplicitCallFlagsAddr() const = 0;
+
+#if ENABLE_NATIVE_CODEGEN
+    virtual intptr_t GetSimdTempAreaAddr(uint8 tempIndex) const = 0;
     virtual intptr_t GetBailOutRegisterSaveSpaceAddr() const = 0;
+    virtual PreReservedVirtualAllocWrapper * GetPreReservedVirtualAllocator() = 0;
+#endif
 
     virtual intptr_t GetDebuggingFlagsAddr() const = 0;
     virtual intptr_t GetDebugStepTypeAddr() const = 0;
@@ -86,7 +91,6 @@ public:
 
     virtual ptrdiff_t GetChakraBaseAddressDifference() const = 0;
     virtual ptrdiff_t GetCRTBaseAddressDifference() const = 0;
-    virtual PreReservedVirtualAllocWrapper * GetPreReservedVirtualAllocator() = 0;
 
     virtual Js::PropertyRecord const * GetPropertyRecord(Js::PropertyId propertyId) = 0;
 
@@ -111,6 +115,7 @@ private:
     
 };
 
+#if ENABLE_NATIVE_CODEGEN
 // TODO: OOP JIT, is there any issue when crossing over 2^31/2^63?
 template<typename T>
 intptr_t SHIFT_ADDR(const ThreadContextInfo*const context, T* address)
@@ -130,5 +135,7 @@ intptr_t SHIFT_CRT_ADDR(const ThreadContextInfo*const context, T* address)
     return (intptr_t)address + context->GetCRTBaseAddressDifference();
 }
 
+
 intptr_t SHIFT_ADDR(const ThreadContextInfo*const context, intptr_t address);
 intptr_t SHIFT_CRT_ADDR(const ThreadContextInfo*const context, intptr_t address);
+#endif

+ 28 - 32
lib/Runtime/ByteCode/ByteCodeSerializer.cpp

@@ -3343,48 +3343,44 @@ public:
         bool isPropertyIdArrayAvailable = false;
         current = ReadBool(current, &isPropertyIdArrayAvailable);
 
-        if (!isPropertyIdArrayAvailable)
+        if (isPropertyIdArrayAvailable)
         {
-            goto Done;
-        }
-
-        uint32 count = 0;
-        current = ReadUInt32(current, &count);
+            uint32 count = 0;
+            current = ReadUInt32(current, &count);
 
-        byte extraSlotCount = 0;
-        current = ReadByte(current, &extraSlotCount);
+            byte extraSlotCount = 0;
+            current = ReadByte(current, &extraSlotCount);
 
-        PropertyIdArray * propIds = function->AllocatePropertyIdArrayForFormals((extraSlotCount + count) * sizeof(PropertyId),count, extraSlotCount);
-        propIds->count = count;
-        
+            PropertyIdArray * propIds = function->AllocatePropertyIdArrayForFormals((extraSlotCount + count) * sizeof(PropertyId), count, extraSlotCount);
+            propIds->count = count;
 
-        bool hadDuplicates = false;
-        current = ReadBool(current, &hadDuplicates);
-        propIds->hadDuplicates = hadDuplicates;
+            bool hadDuplicates = false;
+            current = ReadBool(current, &hadDuplicates);
+            propIds->hadDuplicates = hadDuplicates;
 
-        bool has__proto__ = false;
-        current = ReadBool(current, &has__proto__);
-        propIds->has__proto__ = has__proto__;
+            bool has__proto__ = false;
+            current = ReadBool(current, &has__proto__);
+            propIds->has__proto__ = has__proto__;
 
-        bool hasNonSimpleParams = false;
-        current = ReadBool(current, &hasNonSimpleParams);
-        propIds->hasNonSimpleParams = hasNonSimpleParams;
+            bool hasNonSimpleParams = false;
+            current = ReadBool(current, &hasNonSimpleParams);
+            propIds->hasNonSimpleParams = hasNonSimpleParams;
 
-        int id = 0;
-        for (uint i = 0; i < propIds->count; ++i)
-        {
-            current = ReadInt32(current, &id);
-            PropertyId propertyId = function->GetByteCodeCache()->LookupPropertyId(id);
-            propIds->elements[i] = propertyId;
-        }
+            int id = 0;
+            for (uint i = 0; i < propIds->count; ++i)
+            {
+                current = ReadInt32(current, &id);
+                PropertyId propertyId = function->GetByteCodeCache()->LookupPropertyId(id);
+                propIds->elements[i] = propertyId;
+            }
 
-        for (int i = 0; i < extraSlotCount; ++i)
-        {
-            current = ReadInt32(current, &id);
-            propIds->elements[propIds->count + i] = id;
+            for (int i = 0; i < extraSlotCount; ++i)
+            {
+                current = ReadInt32(current, &id);
+                propIds->elements[propIds->count + i] = id;
+            }
         }
 
-    Done:
 #ifdef BYTE_CODE_MAGIC_CONSTANTS
         current = ReadInt32(current, &constant);
         Assert(constant == magicEndOfPropIdsOfFormals);

+ 1 - 0
lib/Runtime/CMakeLists.txt

@@ -2,6 +2,7 @@ project(CHAKRA_RUNTIME)
 
 include_directories(
     .
+    ../JITIDL
     ../Common
     ../Backend
     ../Parser

+ 1 - 1
lib/Runtime/Language/CMakeLists.txt

@@ -31,7 +31,7 @@ add_library (Chakra.Runtime.Language OBJECT
     JavascriptOperators.cpp
     JavascriptStackWalker.cpp
     ProfilingHelpers.cpp
-    ReadOnlyDynamicProfileInfo.cpp
+    #ReadOnlyDynamicProfileInfo.cpp
     RuntimeLanguagePch.cpp
     # SimdBool16x8Operation.cpp
     # SimdBool16x8OperationX86X64.cpp

+ 2 - 0
lib/Runtime/Language/EHBailoutData.h

@@ -24,10 +24,12 @@ namespace Js
             this->child = nullptr;
         }
 
+#if ENABLE_NATIVE_CODEGEN
         void Fixup(NativeCodeData::DataChunk* chunkList)
         {
             FixupNativeDataPointer(parent, chunkList);
             FixupNativeDataPointer(child, chunkList);
         }
+#endif
     };
 }

+ 2 - 0
lib/Runtime/Language/SimdUtils.h

@@ -145,8 +145,10 @@ const _x86_SIMDValue X86_4LANES_MASKS[]     = {{ 0xffffffff, 0x00000000, 0x00000
 
 #pragma warning(pop)
 
+#if ENABLE_NATIVE_CODEGEN && defined(ENABLE_SIMDJS)
 // auxiliary SIMD values in memory to help JIT'ed code. E.g. used for Int8x16 shuffle. 
 extern _x86_SIMDValue X86_TEMP_SIMD[];
+#endif
 
 typedef _x86_SIMDValue X86SIMDValue;
 CompileAssert(sizeof(X86SIMDValue) == 16);

+ 2 - 0
lib/Runtime/Language/SourceTextModuleRecord.cpp

@@ -804,6 +804,7 @@ namespace Js
                 localSlotCount = 0;
             }
 
+#if ENABLE_NATIVE_CODEGEN
             if (JITManager::GetJITManager()->IsOOPJITEnabled())
             {
                 JITManager::GetJITManager()->AddModuleRecordInfo(
@@ -811,6 +812,7 @@ namespace Js
                     this->GetModuleId(),
                     (intptr_t)this->GetLocalExportSlots());
             }
+#endif
         }
     }
 

+ 2 - 2
lib/Runtime/Language/amd64/StackFrame.SystemV.cpp

@@ -29,7 +29,7 @@ Amd64StackFrame::InitializeByFrameId(void * frame, ScriptContext* scriptContext)
 
     this->stackCheckCodeHeight =
         scriptContext->GetThreadContext()->DoInterruptProbe() ? stackCheckCodeHeightWithInterruptProbe
-        : scriptContext->GetThreadContext()->GetIsThreadBound() ? stackCheckCodeHeightThreadBound
+        : scriptContext->GetThreadContext()->IsThreadBound() ? stackCheckCodeHeightThreadBound
         : stackCheckCodeHeightNotThreadBound;
 
     return Next();
@@ -47,7 +47,7 @@ Amd64StackFrame::InitializeByReturnAddress(void * returnAddress, ScriptContext*
 
     this->stackCheckCodeHeight =
         scriptContext->GetThreadContext()->DoInterruptProbe() ? stackCheckCodeHeightWithInterruptProbe
-        : scriptContext->GetThreadContext()->GetIsThreadBound() ? stackCheckCodeHeightThreadBound
+        : scriptContext->GetThreadContext()->IsThreadBound() ? stackCheckCodeHeightThreadBound
         : stackCheckCodeHeightNotThreadBound;
 
     while (Next())

+ 2 - 0
lib/Runtime/Library/JavascriptLibrary.cpp

@@ -6514,10 +6514,12 @@ namespace Js
     void JavascriptLibrary::SetIsPRNGSeeded(bool val)
     {
         this->isPRNGSeeded = val;
+#if ENABLE_NATIVE_CODEGEN
         if (JITManager::GetJITManager()->IsOOPJITEnabled())
         {
             JITManager::GetJITManager()->SetIsPRNGSeeded(GetScriptContext()->GetRemoteScriptAddr(), val);
         }
+#endif
     }
     INT_PTR* JavascriptLibrary::GetVTableAddresses()
     {

+ 1 - 1
lib/Runtime/Library/MathLibrary.cpp

@@ -12,7 +12,7 @@
 #pragma intrinsic(_mm_round_sd)
 #endif
 
-const LPCWSTR UCrtC99MathApis::LibraryName = L"api-ms-win-crt-math-l1-1-0.dll";
+const LPCWSTR UCrtC99MathApis::LibraryName = _u("api-ms-win-crt-math-l1-1-0.dll");
 
 void UCrtC99MathApis::Ensure()
 {

+ 5 - 0
lib/Runtime/Runtime.h

@@ -363,7 +363,11 @@ enum tagDEBUG_EVENT_INFO_TYPE
 #define DBGPROP_ATTRIB_VALUE_PENDING_MUTATION 0x10000000
 #endif
 
+#ifdef _MSC_VER
 #include "JITClient.h"
+#else
+#include "JITTypes.h"
+#endif
 
 #include "Base/SourceHolder.h"
 #include "Base/Utf8SourceInfo.h"
@@ -374,6 +378,7 @@ enum tagDEBUG_EVENT_INFO_TYPE
 #include "Base/CallInfo.h"
 #include "Language/ExecutionMode.h"
 #include "Types/TypeId.h"
+
 #include "BackendApi.h"
 #include "DetachedStateBase.h"
 

+ 39 - 0
pal/inc/pal.h

@@ -3828,6 +3828,16 @@ VirtualAlloc(
          IN DWORD flAllocationType,
          IN DWORD flProtect);
 
+PALIMPORT
+LPVOID
+PALAPI
+VirtualAllocEx(
+         IN HANDLE hProcess,
+         IN LPVOID lpAddress,
+         IN SIZE_T dwSize,
+         IN DWORD flAllocationType,
+         IN DWORD flProtect);
+
 PALIMPORT
 BOOL
 PALAPI
@@ -3836,6 +3846,16 @@ VirtualFree(
         IN SIZE_T dwSize,
         IN DWORD dwFreeType);
 
+
+PALIMPORT
+BOOL
+PALAPI
+VirtualFreeEx(
+        IN HANDLE hProcess,
+        IN LPVOID lpAddress,
+        IN SIZE_T dwSize,
+        IN DWORD dwFreeType);
+
 PALIMPORT
 BOOL
 PALAPI
@@ -3845,6 +3865,16 @@ VirtualProtect(
            IN DWORD flNewProtect,
            OUT PDWORD lpflOldProtect);
 
+PALIMPORT
+BOOL
+PALAPI
+VirtualProtectEx(
+           IN HANDLE hProcess,
+           IN LPVOID lpAddress,
+           IN SIZE_T dwSize,
+           IN DWORD flNewProtect,
+           OUT PDWORD lpflOldProtect);
+
 typedef struct _MEMORYSTATUSEX {
   DWORD     dwLength;
   DWORD     dwMemoryLoad;
@@ -3881,6 +3911,15 @@ VirtualQuery(
          OUT PMEMORY_BASIC_INFORMATION lpBuffer,
          IN SIZE_T dwLength);
 
+PALIMPORT
+SIZE_T
+PALAPI
+VirtualQueryEx(
+         IN HANDLE hProcess,
+         IN LPCVOID lpAddress,
+         OUT PMEMORY_BASIC_INFORMATION lpBuffer,
+         IN SIZE_T dwLength);
+
 PALIMPORT
 BOOL
 PALAPI

+ 190 - 147
pal/src/map/virtual.cpp

@@ -1,6 +1,6 @@
 //
 // Copyright (c) Microsoft. All rights reserved.
-// Licensed under the MIT license. See LICENSE file in the project root for full license information. 
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
 //
 
 /*++
@@ -113,7 +113,7 @@ static ExecutableMemoryAllocator g_executableMemoryAllocator PAL_GLOBAL;
 /*++
 Function:
     VIRTUALInitialize()
-    
+
     Initializes this section's critical section.
 
 Return value:
@@ -174,7 +174,7 @@ void VIRTUALCleanup()
         InternalFree(pTempEntry );
     }
     pVirtualMemory = NULL;
-    
+
 #if MMAP_IGNORES_HINT
     // Clean up the free list.
     pFreeBlock = pFreeMemory;
@@ -188,7 +188,7 @@ void VIRTUALCleanup()
         InternalFree(pTempFreeBlock);
     }
     pFreeMemory = NULL;
-    gBackingBaseAddress = MAP_FAILED;   
+    gBackingBaseAddress = MAP_FAILED;
 #endif  // MMAP_IGNORES_HINT
 
 #if RESERVE_FROM_BACKING_FILE
@@ -245,12 +245,12 @@ static BOOL VIRTUALIsPageCommitted( SIZE_T nBitToRetrieve, CONST PCMI pInformati
         ERROR( "pInformation was NULL!\n" );
         return FALSE;
     }
-    
+
     nByteOffset = nBitToRetrieve / CHAR_BIT;
     nBitOffset = nBitToRetrieve % CHAR_BIT;
 
     byteMask = 1 << nBitOffset;
-    
+
     if ( pInformation->pAllocState[ nByteOffset ] & byteMask )
     {
         return TRUE;
@@ -283,12 +283,12 @@ static BOOL VIRTUALIsPageDirty( SIZE_T nBitToRetrieve, CONST PCMI pInformation )
         ERROR( "pInformation was NULL!\n" );
         return FALSE;
     }
-    
+
     nByteOffset = nBitToRetrieve / CHAR_BIT;
     nBitOffset = nBitToRetrieve % CHAR_BIT;
 
     byteMask = 1 << nBitOffset;
-    
+
     if ( pInformation->pDirtyPages[ nByteOffset ] & byteMask )
     {
         return TRUE;
@@ -304,7 +304,7 @@ static BOOL VIRTUALIsPageDirty( SIZE_T nBitToRetrieve, CONST PCMI pInformation )
  *
  *  VIRTUALGetAllocationType
  *
- *      IN SIZE_T Index - The page within the range to retrieve 
+ *      IN SIZE_T Index - The page within the range to retrieve
  *                      the state for.
  *
  *      IN pInformation - The virtual memory object.
@@ -334,18 +334,18 @@ static INT VIRTUALGetAllocationType( SIZE_T Index, CONST PCMI pInformation )
  *
  *  Returns TRUE on success, FALSE otherwise.
  *  Turn on/off memory staus bits.
- *         
+ *
  */
-static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit, 
+static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
                                  SIZE_T nNumberOfBits, BYTE * pBitArray )
 {
-    /* byte masks for optimized modification of partial bytes (changing less 
-       than 8 bits in a single byte). note that bits are treated in little 
-       endian order : value 1 is bit 0; value 128 is bit 7. in the binary 
+    /* byte masks for optimized modification of partial bytes (changing less
+       than 8 bits in a single byte). note that bits are treated in little
+       endian order : value 1 is bit 0; value 128 is bit 7. in the binary
        representations below, bit 0 is on the right */
 
-    /* start masks : for modifying bits >= n while preserving bits < n. 
-       example : if nStartignBit%8 is 3, then bits 0, 1, 2 remain unchanged 
+    /* start masks : for modifying bits >= n while preserving bits < n.
+       example : if nStartignBit%8 is 3, then bits 0, 1, 2 remain unchanged
        while bits 3..7 are changed; startmasks[3] can be used for this.  */
     static const BYTE startmasks[8] = {
       0xff, /* start at 0 : 1111 1111 */
@@ -358,8 +358,8 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
     0x80  /* start at 7 : 1000 0000 */
     };
 
-    /* end masks : for modifying bits <= n while preserving bits > n. 
-       example : if the last bit to change is 5, then bits 6 & 7 stay unchanged 
+    /* end masks : for modifying bits <= n while preserving bits > n.
+       example : if the last bit to change is 5, then bits 6 & 7 stay unchanged
        while bits 1..5 are changed; endmasks[5] can be used for this.  */
     static const BYTE endmasks[8] = {
       0x01, /* end at 0 : 0000 0001 */
@@ -371,14 +371,14 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
       0x7f, /* end at 6 : 0111 1111 */
       0xff  /* end at 7 : 1111 1111 */
     };
-    /* last example : if only the middle of a byte must be changed, both start 
-       and end masks can be combined (bitwise AND) to obtain the correct mask. 
-       if we want to change bits 2 to 4 : 
+    /* last example : if only the middle of a byte must be changed, both start
+       and end masks can be combined (bitwise AND) to obtain the correct mask.
+       if we want to change bits 2 to 4 :
        startmasks[2] : 0xfc   1111 1100  (change 2,3,4,5,6,7)
        endmasks[4]:    0x1f   0001 1111  (change 0,1,2,3,4)
        bitwise AND :   0x1c   0001 1100  (change 2,3,4)
     */
-    
+
     BYTE byte_mask;
     SIZE_T nLastBit;
     SIZE_T nFirstByte;
@@ -386,8 +386,8 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
     SIZE_T nFullBytes;
 
     TRACE( "VIRTUALSetPageBits( nStatus = %d, nStartingBit = %d, "
-           "nNumberOfBits = %d, pBitArray = 0x%p )\n", 
-           nStatus, nStartingBit, nNumberOfBits, pBitArray ); 
+           "nNumberOfBits = %d, pBitArray = 0x%p )\n",
+           nStatus, nStartingBit, nNumberOfBits, pBitArray );
 
     if ( 0 == nNumberOfBits )
     {
@@ -404,7 +404,7 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
     {
         byte_mask = startmasks[nStartingBit % 8];
 
-        /* if 1st byte is the only changing byte, combine endmask to preserve 
+        /* if 1st byte is the only changing byte, combine endmask to preserve
            trailing bits (see 3rd example above) */
         if( nLastByte == nFirstByte)
         {
@@ -414,7 +414,7 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
         /* byte_mask contains 1 for bits to change, 0 for bits to leave alone */
         if(0 == nStatus)
         {
-            /* bits to change must be set to 0 : invert byte_mask (giving 0 for 
+            /* bits to change must be set to 0 : invert byte_mask (giving 0 for
                bits to change), use bitwise AND */
             pBitArray[nFirstByte] &= ~byte_mask;
         }
@@ -449,7 +449,7 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
     /* byte_mask contains 1 for bits to change, 0 for bits to leave alone */
     if(0 == nStatus)
     {
-        /* bits to change must be set to 0 : invert byte_mask (giving 0 for 
+        /* bits to change must be set to 0 : invert byte_mask (giving 0 for
            bits to change), use bitwise AND */
         pBitArray[nLastByte] &= ~byte_mask;
     }
@@ -474,14 +474,14 @@ static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit,
  *
  *  Returns TRUE on success, FALSE otherwise.
  *  Turn bit on to indicate committed, turn bit off to indicate reserved.
- *         
+ *
  */
-static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit, 
+static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit,
                            SIZE_T nNumberOfBits, CONST PCMI pInformation )
 {
     TRACE( "VIRTUALSetAllocState( nAction = %d, nStartingBit = %d, "
-           "nNumberOfBits = %d, pStateArray = 0x%p )\n", 
-           nAction, nStartingBit, nNumberOfBits, pInformation ); 
+           "nNumberOfBits = %d, pStateArray = 0x%p )\n",
+           nAction, nStartingBit, nNumberOfBits, pInformation );
 
     if ( !pInformation )
     {
@@ -489,7 +489,7 @@ static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit,
         return FALSE;
     }
 
-    return VIRTUALSetPageBits((MEM_COMMIT == nAction) ? 1 : 0, nStartingBit, 
+    return VIRTUALSetPageBits((MEM_COMMIT == nAction) ? 1 : 0, nStartingBit,
                               nNumberOfBits, pInformation->pAllocState);
 }
 
@@ -506,14 +506,14 @@ static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit,
  *
  *  Returns TRUE on success, FALSE otherwise.
  *  Turns bit(s) on/off bit to indicate dirty page(s)
- *         
+ *
  */
-static BOOL VIRTUALSetDirtyPages( UINT nStatus, SIZE_T nStartingBit, 
+static BOOL VIRTUALSetDirtyPages( UINT nStatus, SIZE_T nStartingBit,
                            SIZE_T nNumberOfBits, CONST PCMI pInformation )
 {
     TRACE( "VIRTUALSetDirtyPages( nStatus = %d, nStartingBit = %d, "
-           "nNumberOfBits = %d, pStateArray = 0x%p )\n", 
-           nStatus, nStartingBit, nNumberOfBits, pInformation ); 
+           "nNumberOfBits = %d, pStateArray = 0x%p )\n",
+           nStatus, nStartingBit, nNumberOfBits, pInformation );
 
     if ( !pInformation )
     {
@@ -521,7 +521,7 @@ static BOOL VIRTUALSetDirtyPages( UINT nStatus, SIZE_T nStartingBit,
         return FALSE;
     }
 
-    return VIRTUALSetPageBits(nStatus, nStartingBit, 
+    return VIRTUALSetPageBits(nStatus, nStartingBit,
                               nNumberOfBits, pInformation->pDirtyPages);
 }
 #endif // MMAP_DOESNOT_ALLOW_REMAP
@@ -535,14 +535,14 @@ static BOOL VIRTUALSetDirtyPages( UINT nStatus, SIZE_T nStartingBit,
  *
  *          Returns the PCMI if found, NULL otherwise.
  */
-static PCMI VIRTUALFindRegionInformation( IN UINT_PTR address ) 
+static PCMI VIRTUALFindRegionInformation( IN UINT_PTR address )
 {
     PCMI pEntry = NULL;
-    
+
     TRACE( "VIRTUALFindRegionInformation( %#x )\n", address );
 
     pEntry = pVirtualMemory;
-    
+
     while( pEntry )
     {
         if ( pEntry->startBoundary > address )
@@ -551,11 +551,11 @@ static PCMI VIRTUALFindRegionInformation( IN UINT_PTR address )
             pEntry = NULL;
             break;
         }
-        if ( pEntry->startBoundary + pEntry->memSize > address ) 
+        if ( pEntry->startBoundary + pEntry->memSize > address )
         {
             break;
         }
-        
+
         pEntry = pEntry->pNext;
     }
     return pEntry;
@@ -572,7 +572,7 @@ BOOL VIRTUALOwnedRegion( IN UINT_PTR address )
 {
     PCMI pEntry = NULL;
     CPalThread * pthrCurrent = InternalGetCurrentThread();
-    
+
     InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
     pEntry = VIRTUALFindRegionInformation( address );
     InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
@@ -584,15 +584,15 @@ BOOL VIRTUALOwnedRegion( IN UINT_PTR address )
 Function :
 
     VIRTUALReleaseMemory
-    
+
     Removes a PCMI entry from the list.
-    
+
     Returns true on success. FALSE otherwise.
 --*/
 static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased )
 {
     BOOL bRetVal = TRUE;
-    
+
     if ( !pMemoryToBeReleased )
     {
         ASSERT( "Invalid pointer.\n" );
@@ -615,7 +615,7 @@ static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased )
         {
             pMemoryToBeReleased->pLast->pNext = pMemoryToBeReleased->pNext;
         }
-        
+
         if ( pMemoryToBeReleased->pNext )
         {
             pMemoryToBeReleased->pNext->pLast = pMemoryToBeReleased->pLast;
@@ -630,7 +630,7 @@ static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased )
 
     InternalFree( pMemoryToBeReleased->pAllocState );
     pMemoryToBeReleased->pAllocState = NULL;
-    
+
     InternalFree( pMemoryToBeReleased->pProtectionState );
     pMemoryToBeReleased->pProtectionState = NULL;
 
@@ -646,7 +646,7 @@ static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased )
 }
 
 /****
- *  VIRTUALConvertWinFlags() - 
+ *  VIRTUALConvertWinFlags() -
  *          Converts win32 protection flags to
  *          internal VIRTUAL flags.
  *
@@ -675,7 +675,7 @@ static BYTE VIRTUALConvertWinFlags( IN DWORD flProtect )
     case PAGE_EXECUTE_READWRITE:
         MemAccessControl = VIRTUAL_EXECUTE_READWRITE;
         break;
-    
+
     default :
         MemAccessControl = 0;
         ERROR( "Incorrect or no protection flags specified.\n" );
@@ -684,7 +684,7 @@ static BYTE VIRTUALConvertWinFlags( IN DWORD flProtect )
     return MemAccessControl;
 }
 /****
- *  VIRTUALConvertVirtualFlags() - 
+ *  VIRTUALConvertVirtualFlags() -
  *              Converts internal virtual protection
  *              flags to their win32 counterparts.
  */
@@ -770,7 +770,7 @@ static void VIRTUALDisplayList( void  )
         count++;
         p = p->pNext;
     }
-    
+
     InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
 }
 #endif
@@ -781,7 +781,7 @@ static void VIRTUALDisplayList( void  )
  *      Stores the allocation information in the linked list.
  *      NOTE: The caller must own the critical section.
  */
-static BOOL VIRTUALStoreAllocationInfo( 
+static BOOL VIRTUALStoreAllocationInfo(
             IN UINT_PTR startBoundary,      /* Start of the region. */
             IN SIZE_T memSize,            /* Size of the region. */
             IN DWORD flAllocationType,  /* Allocation Types. */
@@ -798,32 +798,32 @@ static BOOL VIRTUALStoreAllocationInfo(
         bRetVal =  FALSE;
         goto done;
     }
-    
+
     if ( !(pNewEntry = ( PCMI )InternalMalloc( sizeof( *pNewEntry )) ) )
     {
         ERROR( "Unable to allocate memory for the structure.\n");
         bRetVal =  FALSE;
         goto done;
     }
-    
+
     pNewEntry->startBoundary    = startBoundary;
     pNewEntry->memSize          = memSize;
     pNewEntry->allocationType   = flAllocationType;
     pNewEntry->accessProtection = flProtection;
-    
+
     nBufferSize = memSize / VIRTUAL_PAGE_SIZE / CHAR_BIT;
     if ( ( memSize / VIRTUAL_PAGE_SIZE ) % CHAR_BIT != 0 )
     {
         nBufferSize++;
     }
-    
+
     pNewEntry->pAllocState      = (BYTE*)InternalMalloc( nBufferSize  );
     pNewEntry->pProtectionState = (BYTE*)InternalMalloc( (memSize / VIRTUAL_PAGE_SIZE)  );
 #if MMAP_DOESNOT_ALLOW_REMAP
     pNewEntry->pDirtyPages  = (BYTE*)InternalMalloc( nBufferSize );
-#endif // 
+#endif //
 
-    if ( pNewEntry->pAllocState && pNewEntry->pProtectionState 
+    if ( pNewEntry->pAllocState && pNewEntry->pProtectionState
 #if MMAP_DOESNOT_ALLOW_REMAP
         && pNewEntry->pDirtyPages
 #endif // MMAP_DOESNOT_ALLOW_REMAP
@@ -846,39 +846,39 @@ static BOOL VIRTUALStoreAllocationInfo(
 #if MMAP_DOESNOT_ALLOW_REMAP
         if (pNewEntry->pDirtyPages) InternalFree( pNewEntry->pDirtyPages );
         pNewEntry->pDirtyPages = NULL;
-#endif // 
+#endif //
 
         if (pNewEntry->pProtectionState) InternalFree( pNewEntry->pProtectionState );
         pNewEntry->pProtectionState = NULL;
-        
+
         if (pNewEntry->pAllocState) InternalFree( pNewEntry->pAllocState );
         pNewEntry->pAllocState = NULL;
 
         InternalFree( pNewEntry );
         pNewEntry = NULL;
-        
+
         goto done;
     }
-    
+
     pMemInfo = pVirtualMemory;
 
     if ( pMemInfo && pMemInfo->startBoundary < startBoundary )
     {
         /* Look for the correct insert point */
         TRACE( "Looking for the correct insert location.\n");
-        while ( pMemInfo->pNext && ( pMemInfo->pNext->startBoundary < startBoundary ) ) 
+        while ( pMemInfo->pNext && ( pMemInfo->pNext->startBoundary < startBoundary ) )
         {
             pMemInfo = pMemInfo->pNext;
         }
-        
+
         pNewEntry->pNext = pMemInfo->pNext;
         pNewEntry->pLast = pMemInfo;
-        
-        if ( pNewEntry->pNext ) 
+
+        if ( pNewEntry->pNext )
         {
             pNewEntry->pNext->pLast = pNewEntry;
         }
-        
+
         pMemInfo->pNext = pNewEntry;
     }
     else
@@ -887,12 +887,12 @@ static BOOL VIRTUALStoreAllocationInfo(
         /* This is the first entry in the list. */
         pNewEntry->pNext = pMemInfo;
         pNewEntry->pLast = NULL;
-        
-        if ( pNewEntry->pNext ) 
+
+        if ( pNewEntry->pNext )
         {
             pNewEntry->pNext->pLast = pNewEntry;
         }
-        
+
         pVirtualMemory = pNewEntry ;
     }
 done:
@@ -926,7 +926,7 @@ static LPVOID VIRTUALReserveMemory(
     // page-aligned and at multiples of the page size.
     StartBoundary = (UINT_PTR)lpAddress & ~BOUNDARY_64K;
     /* Add the sizes, and round down to the nearest page boundary. */
-    MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) - 
+    MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) -
                StartBoundary;
 
     InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
@@ -1098,7 +1098,7 @@ static LPVOID VIRTUALCommitMemory(
     {
         StartBoundary = (UINT_PTR)lpAddress & ~VIRTUAL_PAGE_MASK;
         /* Add the sizes, and round down to the nearest page boundary. */
-        MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) - 
+        MemSize = ( ((UINT_PTR)lpAddress + dwSize + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK ) -
                   StartBoundary;
     }
     else
@@ -1108,25 +1108,25 @@ static LPVOID VIRTUALCommitMemory(
 
     /* See if we have already reserved this memory. */
     pInformation = VIRTUALFindRegionInformation( StartBoundary );
-    
+
     if ( !pInformation )
     {
         /* According to the new MSDN docs, if MEM_COMMIT is specified,
         and the memory is not reserved, you reserve and then commit.
         */
-        LPVOID pReservedMemory = 
-                VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize, 
+        LPVOID pReservedMemory =
+                VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize,
                                       flAllocationType, flProtect );
-        
+
         TRACE( "Reserve and commit the memory!\n " );
 
         if ( pReservedMemory )
         {
             /* Re-align the addresses and try again to find the memory. */
             StartBoundary = (UINT_PTR)pReservedMemory & ~VIRTUAL_PAGE_MASK;
-            MemSize = ( ((UINT_PTR)pReservedMemory + dwSize + VIRTUAL_PAGE_MASK) 
+            MemSize = ( ((UINT_PTR)pReservedMemory + dwSize + VIRTUAL_PAGE_MASK)
                         & ~VIRTUAL_PAGE_MASK ) - StartBoundary;
-            
+
             pInformation = VIRTUALFindRegionInformation( StartBoundary );
 
             if ( !pInformation )
@@ -1146,19 +1146,19 @@ static LPVOID VIRTUALCommitMemory(
             goto done;
         }
     }
-               
+
     TRACE( "Committing the memory now..\n");
-    
+
     // Pages that aren't already committed need to be committed. Pages that
     // are committed don't need to be committed, but they might need to have
     // their permissions changed.
     // To get this right, we find runs of pages with similar states and
-    // permissions. If a run is not committed, we commit it and then set 
-    // its permissions. If a run is committed but has different permissions 
-    // from what we're trying to set, we set its permissions. Finally, 
-    // if a run is already committed and has the right permissions, 
+    // permissions. If a run is not committed, we commit it and then set
+    // its permissions. If a run is committed but has different permissions
+    // from what we're trying to set, we set its permissions. Finally,
+    // if a run is already committed and has the right permissions,
     // we don't need to do anything to it.
-    
+
     totalPages = MemSize / VIRTUAL_PAGE_SIZE;
     runStart = (StartBoundary - pInformation->startBoundary) /
                 VIRTUAL_PAGE_SIZE;   // Page index
@@ -1219,7 +1219,7 @@ static LPVOID VIRTUALCommitMemory(
                         // This page is being recommitted after being decommitted,
                         // therefore the memory needs to be cleared
                         memset (temp, 0, VIRTUAL_PAGE_SIZE);
-                    } 
+                    }
 
                     temp += VIRTUAL_PAGE_SIZE;
                 }
@@ -1259,7 +1259,7 @@ static LPVOID VIRTUALCommitMemory(
                 goto error;
             }
         }
-        
+
         runStart = index;
         runLength = 1;
         allocationType = curAllocationType;
@@ -1300,22 +1300,22 @@ Function:
     VIRTUALReserveFromBackingFile
 
     Locates a reserved but unallocated block of memory in the free list.
-    
+
     If addr is not zero, this will only find a block that starts at addr
     and is at least large enough to hold the requested size.
-    
+
     If addr is zero, this finds the first block of memory in the free list
     of the right size.
-    
+
     Once the block is located, it is split if necessary to allocate only
     the requested size. The function then calls mmap() with MAP_FIXED to
     map the located block at its address on an anonymous fd.
-    
+
     This function requires that length be a multiple of the page size. If
     length is not a multiple of the page size, subsequently allocated blocks
     may be allocated on addresses that are not page-size-aligned, which is
     invalid.
-    
+
     Returns the base address of the mapped block, or MAP_FAILED if no
     suitable block exists or mapping fails.
 --*/
@@ -1325,7 +1325,7 @@ static void *VIRTUALReserveFromBackingFile(UINT_PTR addr, size_t length)
     FREE_BLOCK *prev;
     FREE_BLOCK *temp;
     char *returnAddress;
-    
+
     block = NULL;
     prev = NULL;
     for(temp = pFreeMemory; temp != NULL; temp = temp->next)
@@ -1353,7 +1353,7 @@ static void *VIRTUALReserveFromBackingFile(UINT_PTR addr, size_t length)
         // No acceptable page exists.
         return MAP_FAILED;
     }
-    
+
     // Grab the return address before we adjust the free list.
     if (addr == 0)
     {
@@ -1422,7 +1422,7 @@ Function:
 
     Adds the given block to our free list. Coalesces the list if necessary.
     The block should already have been mapped back onto the backing file.
-    
+
     Returns TRUE if the block was added to the free list.
 --*/
 static BOOL VIRTUALAddToFreeList(const PCMI pMemoryToBeReleased)
@@ -1430,8 +1430,8 @@ static BOOL VIRTUALAddToFreeList(const PCMI pMemoryToBeReleased)
     FREE_BLOCK *temp;
     FREE_BLOCK *lastBlock;
     FREE_BLOCK *newBlock;
-    BOOL coalesced; 
-    
+    BOOL coalesced;
+
     lastBlock = NULL;
     for(temp = pFreeMemory; temp != NULL; temp = temp->next)
     {
@@ -1449,11 +1449,11 @@ static BOOL VIRTUALAddToFreeList(const PCMI pMemoryToBeReleased)
         }
         lastBlock = temp;
     }
-    
+
     // Check to see if we're going to coalesce blocks before we
     // allocate anything.
     coalesced = FALSE;
-    
+
     // First, are we coalescing with the next block?
     if (temp != NULL)
     {
@@ -1489,13 +1489,13 @@ static BOOL VIRTUALAddToFreeList(const PCMI pMemoryToBeReleased)
             coalesced = TRUE;
         }
     }
-    
+
     // If we coalesced anything, we're done.
     if (coalesced)
     {
         return TRUE;
     }
-    
+
     // At this point we know we're not coalescing anything and we need
     // a new block.
     newBlock = (FREE_BLOCK *) InternalMalloc(sizeof(FREE_BLOCK));
@@ -1528,10 +1528,10 @@ Function:
     Ensures that we have a set of pages that correspond to a backing file.
     We use the PAL as the backing file merely because we're pretty confident
     it exists.
-    
+
     When the backing file hasn't been created, we create it, mmap pages
     onto it, and create the free list.
-    
+
     Returns TRUE if we could locate our backing file, open it, mmap
     pages onto it, and create the free list. Does nothing if we already
     have a mapping.
@@ -1540,9 +1540,9 @@ static BOOL VIRTUALGetBackingFile(CPalThread *pthrCurrent)
 {
     BOOL result = FALSE;
     char palName[MAX_PATH_FNAME];
-    
+
     InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
-    
+
     if (gBackingFile != -1)
     {
         result = TRUE;
@@ -1608,6 +1608,17 @@ done:
 }
 #endif // RESERVE_FROM_BACKING_FILE
 
+LPVOID
+PALAPI
+VirtualAllocEx(
+         IN HANDLE hProcess,
+         IN LPVOID lpAddress,       /* Region to reserve or commit */
+         IN SIZE_T dwSize,          /* Size of Region */
+         IN DWORD flAllocationType, /* Type of allocation */
+         IN DWORD flProtect)        /* Type of access protection */
+{
+    return VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
+}
 /*++
 Function:
   VirtualAlloc
@@ -1615,7 +1626,7 @@ Function:
 Note:
   MEM_TOP_DOWN, MEM_PHYSICAL, MEM_WRITE_WATCH are not supported.
   Unsupported flags are ignored.
-  
+
   Page size on i386 is set to 4k.
 
 See MSDN doc.
@@ -1663,13 +1674,13 @@ VirtualAlloc(
     {
         WARN( "Ignoring the allocation flag MEM_TOP_DOWN.\n" );
     }
-    
+
 #if RESERVE_FROM_BACKING_FILE
     // Make sure we have memory to map before we try to use it.
     VIRTUALGetBackingFile(pthrCurrent);
 #endif  // RESERVE_FROM_BACKING_FILE
 
-    if ( flAllocationType & MEM_RESERVE ) 
+    if ( flAllocationType & MEM_RESERVE )
     {
         InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
         pRetVal = VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize, flAllocationType, flProtect );
@@ -1688,18 +1699,18 @@ VirtualAlloc(
         if ( pRetVal != NULL )
         {
             /* We are reserving and committing. */
-            pRetVal = VIRTUALCommitMemory( pthrCurrent, pRetVal, dwSize, 
-                                    flAllocationType, flProtect );    
+            pRetVal = VIRTUALCommitMemory( pthrCurrent, pRetVal, dwSize,
+                                    flAllocationType, flProtect );
         }
         else
         {
             /* Just a commit. */
-            pRetVal = VIRTUALCommitMemory( pthrCurrent, lpAddress, dwSize, 
+            pRetVal = VIRTUALCommitMemory( pthrCurrent, lpAddress, dwSize,
                                     flAllocationType, flProtect );
         }
         InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
-    }                      
-    
+    }
+
 done:
 #if defined _DEBUG
     VIRTUALDisplayList();
@@ -1709,6 +1720,16 @@ done:
     return pRetVal;
 }
 
+BOOL
+PALAPI
+VirtualFreeEx(
+        IN HANDLE hProcess,
+        IN LPVOID lpAddress,    /* Address of region. */
+        IN SIZE_T dwSize,       /* Size of region. */
+        IN DWORD dwFreeType )   /* Operation type. */
+{
+    return VirtualFree(lpAddress, dwSize, dwFreeType);
+}
 
 /*++
 Function:
@@ -1771,12 +1792,12 @@ VirtualFree(
             bRetVal = FALSE;
             goto VirtualFreeExit;
         }
-        /* 
+        /*
          * A two byte range straddling 2 pages caues both pages to be either
-         * released or decommitted. So round the dwSize up to the next page 
+         * released or decommitted. So round the dwSize up to the next page
          * boundary and round the lpAddress down to the next page boundary.
          */
-        MemSize = (((UINT_PTR)(dwSize) + ((UINT_PTR)(lpAddress) & VIRTUAL_PAGE_MASK) 
+        MemSize = (((UINT_PTR)(dwSize) + ((UINT_PTR)(lpAddress) & VIRTUAL_PAGE_MASK)
                     + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK);
 
         StartBoundary = (UINT_PTR)lpAddress & ~VIRTUAL_PAGE_MASK;
@@ -1791,11 +1812,11 @@ VirtualFree(
             goto VirtualFreeExit;
         }
 
-        TRACE( "Un-committing the following page(s) %d to %d.\n", 
+        TRACE( "Un-committing the following page(s) %d to %d.\n",
                StartBoundary, MemSize );
 
 #if MMAP_DOESNOT_ALLOW_REMAP
-        // if no double mapping is supported, 
+        // if no double mapping is supported,
         // just mprotect the memory with no access
         if (mprotect((LPVOID)StartBoundary, MemSize, PROT_NONE) == 0)
 #else // MMAP_DOESNOT_ALLOW_REMAP
@@ -1829,16 +1850,16 @@ VirtualFree(
 
             /* We can now commit this memory by calling VirtualAlloc().*/
             index = (StartBoundary - pUnCommittedMem->startBoundary) / VIRTUAL_PAGE_SIZE;
-            
+
             nNumOfPagesToChange = MemSize / VIRTUAL_PAGE_SIZE;
-            VIRTUALSetAllocState( MEM_RESERVE, index, 
-                                  nNumOfPagesToChange, pUnCommittedMem ); 
+            VIRTUALSetAllocState( MEM_RESERVE, index,
+                                  nNumOfPagesToChange, pUnCommittedMem );
 #if MMAP_DOESNOT_ALLOW_REMAP
-            VIRTUALSetDirtyPages( 1, index, 
-                                  nNumOfPagesToChange, pUnCommittedMem ); 
+            VIRTUALSetDirtyPages( 1, index,
+                                  nNumOfPagesToChange, pUnCommittedMem );
 #endif // MMAP_DOESNOT_ALLOW_REMAP
 
-            goto VirtualFreeExit;    
+            goto VirtualFreeExit;
         }
         else
         {
@@ -1848,12 +1869,12 @@ VirtualFree(
             goto VirtualFreeExit;
         }
     }
-    
+
     if ( dwFreeType & MEM_RELEASE )
     {
-        PCMI pMemoryToBeReleased = 
+        PCMI pMemoryToBeReleased =
             VIRTUALFindRegionInformation( (UINT_PTR)lpAddress );
-        
+
         if ( !pMemoryToBeReleased )
         {
             ERROR( "lpAddress must be the base address returned by VirtualAlloc.\n" );
@@ -1869,9 +1890,9 @@ VirtualFree(
             goto VirtualFreeExit;
         }
 
-        TRACE( "Releasing the following memory %d to %d.\n", 
+        TRACE( "Releasing the following memory %d to %d.\n",
                pMemoryToBeReleased->startBoundary, pMemoryToBeReleased->memSize );
-        
+
 #if (MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP)
         if (mmap((void *) pMemoryToBeReleased->startBoundary,
                  pMemoryToBeReleased->memSize, PROT_NONE,
@@ -1879,7 +1900,7 @@ VirtualFree(
                  (char *) pMemoryToBeReleased->startBoundary -
                  (char *) gBackingBaseAddress) != MAP_FAILED)
 #else   // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
-        if ( munmap( (LPVOID)pMemoryToBeReleased->startBoundary, 
+        if ( munmap( (LPVOID)pMemoryToBeReleased->startBoundary,
                      pMemoryToBeReleased->memSize ) == 0 )
 #endif  // MMAP_IGNORES_HINT && !MMAP_DOESNOT_ALLOW_REMAP
         {
@@ -1914,6 +1935,17 @@ VirtualFreeExit:
     return bRetVal;
 }
 
+BOOL
+PALAPI
+VirtualProtectEx(
+           IN HANDLE hProcess,
+           IN LPVOID lpAddress,
+           IN SIZE_T dwSize,
+           IN DWORD flNewProtect,
+           OUT PDWORD lpflOldProtect)
+{
+    return VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect);
+}
 
 /*++
 Function:
@@ -1945,7 +1977,7 @@ VirtualProtect(
 
     pthrCurrent = InternalGetCurrentThread();
     InternalEnterCriticalSection(pthrCurrent, &virtual_critsec);
-    
+
     StartBoundary = (UINT_PTR)lpAddress & ~VIRTUAL_PAGE_MASK;
     MemSize = (((UINT_PTR)(dwSize) + ((UINT_PTR)(lpAddress) & VIRTUAL_PAGE_MASK)
                 + VIRTUAL_PAGE_MASK) & ~VIRTUAL_PAGE_MASK);
@@ -1973,14 +2005,14 @@ VirtualProtect(
         Index = OffSet = StartBoundary - pEntry->startBoundary == 0 ?
              0 : ( StartBoundary - pEntry->startBoundary ) / VIRTUAL_PAGE_SIZE;
         NumberOfPagesToChange = MemSize / VIRTUAL_PAGE_SIZE;
-        
+
         TRACE( "Number of pages to check %d, starting page %d \n",
                NumberOfPagesToChange, Index );
-    
+
         for ( ; Index < NumberOfPagesToChange; Index++  )
         {
             if ( !VIRTUALIsPageCommitted( Index, pEntry ) )
-            {     
+            {
                 ERROR( "You can only change the protection attributes"
                        " on committed memory.\n" )
                 SetLastError( ERROR_INVALID_ADDRESS );
@@ -1989,7 +2021,7 @@ VirtualProtect(
         }
     }
 
-    if ( 0 == mprotect( (LPVOID)StartBoundary, MemSize, 
+    if ( 0 == mprotect( (LPVOID)StartBoundary, MemSize,
                    W32toUnixAccessControl( flNewProtect ) ) )
     {
         /* Reset the access protection. */
@@ -2004,8 +2036,8 @@ VirtualProtect(
         {
             *lpflOldProtect =
                 VIRTUALConvertVirtualFlags( pEntry->pProtectionState[ OffSet ] );
-            
-            memset( pEntry->pProtectionState + OffSet, 
+
+            memset( pEntry->pProtectionState + OffSet,
                     VIRTUALConvertWinFlags( flNewProtect ),
                     NumberOfPagesToChange );
         }
@@ -2123,7 +2155,7 @@ static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATIO
 #endif
 
     vm_address = (vm_address_t)lpAddress;
-#ifdef BIT64    
+#ifdef BIT64
     MachRet = vm_region_64(
 #else
     MachRet = vm_region(
@@ -2172,13 +2204,24 @@ static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATIO
     }
     else
     {
-        // What should this be?  It's either MEM_MAPPED or MEM_IMAGE, but without an image list, 
+        // What should this be?  It's either MEM_MAPPED or MEM_IMAGE, but without an image list,
         // we can't determine which one it is.
         lpBuffer->Type = MEM_MAPPED;
     }
 }
 #endif // HAVE_VM_ALLOCATE
 
+SIZE_T
+PALAPI
+VirtualQueryEx(
+    IN HANDLE hProcess,
+    IN LPCVOID lpAddress,
+    OUT PMEMORY_BASIC_INFORMATION lpBuffer,
+    IN SIZE_T dwLength)
+{
+    return VirtualQuery(lpAddress, lpBuffer, dwLength);
+}
+
 /*++
 Function:
   VirtualQuery
@@ -2253,10 +2296,10 @@ VirtualQuery(
         /* Can't find a match, or no list present. */
         /* Next, looking for this region in file maps */
         if (!MAPGetRegionInfo((LPVOID)StartBoundary, lpBuffer))
-        { 
+        {
             // When all else fails, call vm_region() if it's available.
 
-            // Initialize the State to be MEM_FREE, in which case AllocationBase, AllocationProtect, 
+            // Initialize the State to be MEM_FREE, in which case AllocationBase, AllocationProtect,
             // Protect, and Type are all undefined.
             lpBuffer->BaseAddress = (LPVOID)StartBoundary;
             lpBuffer->RegionSize = 0;
@@ -2305,7 +2348,7 @@ VirtualQuery(
 ExitVirtualQuery:
 
     InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec);
-    
+
     LOGEXIT( "VirtualQuery returning %d.\n", sizeof( *lpBuffer ) );
     PERF_EXIT(VirtualQuery);
     return sizeof( *lpBuffer );
@@ -2317,8 +2360,8 @@ Function:
 
 See MSDN doc.
 --*/
-UINT 
-PALAPI 
+UINT
+PALAPI
 GetWriteWatch(
   IN DWORD dwFlags,
   IN PVOID lpBaseAddress,
@@ -2341,8 +2384,8 @@ Function:
 
 See MSDN doc.
 --*/
-UINT 
-PALAPI 
+UINT
+PALAPI
 ResetWriteWatch(
   IN LPVOID lpBaseAddress,
   IN SIZE_T dwRegionSize