PageAllocator.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. #include "PageAllocatorDefines.h"
  7. #include "Exceptions/ExceptionBase.h"
  8. #ifdef ENABLE_BASIC_TELEMETRY
  9. #include "AllocatorTelemetryStats.h"
  10. #endif
  11. #ifdef PROFILE_MEM
  12. struct PageMemoryData;
  13. #endif
  14. #if !FLOATVAR
  15. class CodeGenNumberThreadAllocator;
  16. struct XProcNumberPageSegmentManager;
  17. #endif
  18. namespace Memory
  19. {
  20. typedef void* FunctionTableHandle;
  21. #if DBG_DUMP
  22. #define GUARD_PAGE_TRACE(...) \
  23. if (Js::Configuration::Global.flags.PrintGuardPageBounds) \
  24. { \
  25. Output::Print(__VA_ARGS__); \
  26. }
  27. #define PAGE_ALLOC_TRACE(format, ...) PAGE_ALLOC_TRACE_EX(false, false, format, ##__VA_ARGS__)
  28. #define PAGE_ALLOC_VERBOSE_TRACE(format, ...) PAGE_ALLOC_TRACE_EX(true, false, format, ##__VA_ARGS__)
  29. #define PAGE_ALLOC_VERBOSE_TRACE_0(format) PAGE_ALLOC_TRACE_EX(true, false, format, "")
  30. #define PAGE_ALLOC_TRACE_AND_STATS(format, ...) PAGE_ALLOC_TRACE_EX(false, true, format, ##__VA_ARGS__)
  31. #define PAGE_ALLOC_TRACE_AND_STATS_0(format) PAGE_ALLOC_TRACE_EX(false, true, format, "")
  32. #define PAGE_ALLOC_VERBOSE_TRACE_AND_STATS(format, ...) PAGE_ALLOC_TRACE_EX(true, true, format, ##__VA_ARGS__)
  33. #define PAGE_ALLOC_VERBOSE_TRACE_AND_STATS_0(format) PAGE_ALLOC_TRACE_EX(true, true, format, "")
  34. #define PAGE_ALLOC_TRACE_EX(verbose, stats, format, ...) \
  35. if (this->pageAllocatorFlagTable.Trace.IsEnabled(Js::PageAllocatorPhase)) \
  36. { \
  37. if (!verbose || this->pageAllocatorFlagTable.Verbose) \
  38. { \
  39. Output::Print(_u("%p : %p> PageAllocator(%p): "), GetCurrentThreadContextId(), ::GetCurrentThreadId(), this); \
  40. if (debugName != nullptr) \
  41. { \
  42. Output::Print(_u("[%s] "), this->debugName); \
  43. } \
  44. Output::Print(format, ##__VA_ARGS__); \
  45. Output::Print(_u("\n")); \
  46. if (stats && this->pageAllocatorFlagTable.Stats.IsEnabled(Js::PageAllocatorPhase)) \
  47. { \
  48. this->DumpStats(); \
  49. } \
  50. Output::Flush(); \
  51. } \
  52. }
  53. #else
  54. #define PAGE_ALLOC_TRACE(format, ...)
  55. #define PAGE_ALLOC_VERBOSE_TRACE(format, ...)
  56. #define PAGE_ALLOC_VERBOSE_TRACE_0(format)
  57. #define PAGE_ALLOC_TRACE_AND_STATS(format, ...)
  58. #define PAGE_ALLOC_VERBOSE_TRACE_AND_STATS(format, ...)
  59. #define PAGE_ALLOC_TRACE_AND_STATS_0(format)
  60. #define PAGE_ALLOC_VERBOSE_TRACE_AND_STATS_0(format)
  61. #endif
  62. #ifdef TARGET_64
  63. #define XDATA_RESERVE_PAGE_COUNT (2) // Number of pages per page segment (32 pages) reserved for xdata.
  64. #else
  65. #define XDATA_RESERVE_PAGE_COUNT (0) // ARM uses the heap, so it's not required.
  66. #endif
  67. //
  68. // Allocation done by the secondary allocator
  69. //
  70. struct SecondaryAllocation
  71. {
  72. BYTE* address; // address of the allocation by the secondary allocator
  73. SecondaryAllocation() : address(nullptr)
  74. {
  75. }
  76. };
  77. #if defined(TARGET_64)
  78. struct XDataInfo
  79. {
  80. RUNTIME_FUNCTION pdata;
  81. FunctionTableHandle functionTable;
  82. };
  83. #elif defined(_M_ARM)
  84. struct XDataInfo
  85. {
  86. ushort pdataCount;
  87. ushort xdataSize;
  88. FunctionTableHandle functionTable;
  89. };
  90. #endif
  91. //
  92. // For every page segment a page allocator can create a secondary allocator which can have a specified
  93. // number of pages reserved for secondary allocations. These pages are always reserved at the end of the
  94. // segment. The PageAllocator itself cannot allocate from the region demarcated for the secondary allocator.
  95. // Currently this is used for xdata allocations.
  96. //
  97. class SecondaryAllocator
  98. {
  99. public:
  100. virtual bool Alloc(ULONG_PTR functionStart, DWORD functionSize, DECLSPEC_GUARD_OVERFLOW ushort pdataCount, DECLSPEC_GUARD_OVERFLOW ushort xdataSize, SecondaryAllocation* xdata) = 0;
  101. virtual void Release(const SecondaryAllocation& allocation) = 0;
  102. virtual void Delete() = 0;
  103. virtual bool CanAllocate() = 0;
  104. virtual ~SecondaryAllocator() {};
  105. };
  106. class PageAllocatorBaseCommon;
  107. class SegmentBaseCommon
  108. {
  109. // Disable create instance of PageAllocatorBaseCommon directly
  110. protected:
  111. SegmentBaseCommon(PageAllocatorBaseCommon* allocator);
  112. ~SegmentBaseCommon() {}
  113. protected:
  114. PageAllocatorBaseCommon* allocator;
  115. public:
  116. bool IsInPreReservedHeapPageAllocator() const;
  117. };
  118. /*
  119. * A segment is a collection of pages. A page corresponds to the concept of an
  120. * OS memory page. Segments allocate memory using the OS VirtualAlloc call.
  121. * It'll allocate the pageCount * page size number of bytes, the latter being
  122. * a system-wide constant.
  123. */
  124. template<typename TVirtualAlloc>
  125. class SegmentBase: public SegmentBaseCommon
  126. {
  127. PREVENT_STANDALONE_HEAPINSTANCE();
  128. public:
  129. SegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, DECLSPEC_GUARD_OVERFLOW size_t pageCount, bool enableWriteBarrier);
  130. ~SegmentBase();
  131. size_t GetPageCount() const { return segmentPageCount; }
  132. // Some pages are reserved upfront for secondary allocations
  133. // which are done by a secondary allocator as opposed to the PageAllocator
  134. size_t GetAvailablePageCount() const { return segmentPageCount - secondaryAllocPageCount; }
  135. char* GetSecondaryAllocStartAddress() const { return (this->address + GetAvailablePageCount() * AutoSystemInfo::PageSize); }
  136. uint GetSecondaryAllocSize() const { return this->secondaryAllocPageCount * AutoSystemInfo::PageSize; }
  137. uint GetSecondaryAllocPageCount() const { return this->secondaryAllocPageCount; }
  138. char* GetAddress() const { return address; }
  139. char* GetEndAddress() const { return GetSecondaryAllocStartAddress(); }
  140. bool CanAllocSecondary() { Assert(secondaryAllocator); return secondaryAllocator->CanAllocate(); }
  141. PageAllocatorBase<TVirtualAlloc>* GetAllocator() const
  142. {
  143. return static_cast<PageAllocatorBase<TVirtualAlloc>*>(allocator);
  144. }
  145. bool Initialize(DWORD allocFlags, bool excludeGuardPages);
  146. #if DBG
  147. bool IsPageSegment() const
  148. {
  149. return isPageSegment;
  150. }
  151. #endif
  152. bool IsInSegment(void* address) const
  153. {
  154. void* start = static_cast<void*>(GetAddress());
  155. void* end = static_cast<void*>(GetEndAddress());
  156. return (address >= start && address < end);
  157. }
  158. bool IsInCustomHeapAllocator() const
  159. {
  160. return this->GetAllocator()->type == PageAllocatorType::PageAllocatorType_CustomHeap;
  161. }
  162. SecondaryAllocator* GetSecondaryAllocator() { return secondaryAllocator; }
  163. #if defined(TARGET_64) && defined(RECYCLER_WRITE_BARRIER)
  164. bool IsWriteBarrierAllowed()
  165. {
  166. return isWriteBarrierAllowed;
  167. }
  168. bool IsWriteBarrierEnabled()
  169. {
  170. return this->isWriteBarrierEnabled;
  171. }
  172. #endif
  173. protected:
  174. #if TARGET_32
  175. static const uint VirtualAllocThreshold = 524288; // 512kb As per spec
  176. #else // TARGET_64
  177. static const uint VirtualAllocThreshold = 1048576; // 1MB As per spec : when we cross this threshold of bytes, we should add guard pages
  178. #endif
  179. static const uint maxGuardPages = 15;
  180. static const uint minGuardPages = 1;
  181. SecondaryAllocator* secondaryAllocator;
  182. char * address;
  183. size_t segmentPageCount;
  184. uint trailingGuardPageCount;
  185. uint leadingGuardPageCount;
  186. uint secondaryAllocPageCount;
  187. bool isWriteBarrierAllowed : 1;
  188. bool isWriteBarrierEnabled : 1;
  189. #if DBG
  190. bool isPageSegment : 1;
  191. #endif
  192. };
  193. /*
  194. * Page Segments allows a client to deal with virtual memory on a page level
  195. * unlike Segment, which gives you access on a segment basis. Pages managed
  196. * by the page segment are initially in a "free list", and have the no access
  197. * bit set on them. When a client wants pages, we get them from the free list
  198. * and commit them into memory. When the client no longer needs those pages,
  199. * we simply decommit them- this means that the pages are still reserved for
  200. * the process but are not a part of its working set and has no physical
  201. * storage associated with it.
  202. */
  203. template<typename TVirtualAlloc>
  204. class PageSegmentBase : public SegmentBase<TVirtualAlloc>
  205. {
  206. PREVENT_STANDALONE_HEAPINSTANCE();
  207. typedef SegmentBase<TVirtualAlloc> Base;
  208. public:
  209. PageSegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, bool committed, bool allocated, bool enableWriteBarrier);
  210. PageSegmentBase(PageAllocatorBase<TVirtualAlloc> * allocator, void* address, uint pageCount, uint committedCount, bool enableWriteBarrier);
  211. // Maximum possible size of a PageSegment; may be smaller.
  212. static const uint MaxDataPageCount = 256; // 1 MB
  213. static const uint MaxGuardPageCount = 16;
  214. static const uint MaxPageCount = MaxDataPageCount + MaxGuardPageCount; // 272 Pages
  215. typedef BVStatic<MaxPageCount> PageBitVector;
  216. uint GetAvailablePageCount() const
  217. {
  218. size_t availablePageCount = Base::GetAvailablePageCount();
  219. Assert(availablePageCount < MAXUINT32);
  220. return static_cast<uint>(availablePageCount);
  221. }
  222. void Prime();
  223. #ifdef PAGEALLOCATOR_PROTECT_FREEPAGE
  224. bool Initialize(DWORD allocFlags, bool excludeGuardPages);
  225. #endif
  226. uint GetFreePageCount() const { return freePageCount; }
  227. uint GetDecommitPageCount() const { return decommitPageCount; }
  228. static bool IsAllocationPageAligned(__in char* address, size_t pageCount, uint *nextIndex = nullptr);
  229. template <typename T, bool notPageAligned>
  230. char * AllocDecommitPages(DECLSPEC_GUARD_OVERFLOW uint pageCount, T freePages, T decommitPages);
  231. template <bool notPageAligned>
  232. char * AllocPages(DECLSPEC_GUARD_OVERFLOW uint pageCount);
  233. void ReleasePages(__in void * address, uint pageCount);
  234. template <bool onlyUpdateState>
  235. void DecommitPages(__in void * address, uint pageCount);
  236. uint GetCountOfFreePages() const;
  237. uint GetNextBitInFreePagesBitVector(uint index) const;
  238. BOOLEAN TestRangeInFreePagesBitVector(uint index, uint pageCount) const;
  239. BOOLEAN TestInFreePagesBitVector(uint index) const;
  240. void ClearAllInFreePagesBitVector();
  241. void ClearRangeInFreePagesBitVector(uint index, uint pageCount);
  242. void SetRangeInFreePagesBitVector(uint index, uint pageCount);
  243. void ClearBitInFreePagesBitVector(uint index);
  244. uint GetCountOfDecommitPages() const;
  245. BOOLEAN TestInDecommitPagesBitVector(uint index) const;
  246. BOOLEAN TestRangeInDecommitPagesBitVector(uint index, uint pageCount) const;
  247. void SetRangeInDecommitPagesBitVector(uint index, uint pageCount);
  248. void SetBitInDecommitPagesBitVector(uint index);
  249. void ClearRangeInDecommitPagesBitVector(uint index, uint pageCount);
  250. template <bool notPageAligned>
  251. char * DoAllocDecommitPages(DECLSPEC_GUARD_OVERFLOW uint pageCount);
  252. uint GetMaxPageCount();
  253. size_t DecommitFreePages(size_t pageToDecommit);
  254. bool IsEmpty() const
  255. {
  256. return this->freePageCount == this->GetAvailablePageCount();
  257. }
  258. //
  259. // If a segment has decommitted pages - then it's not considered full as allocations can take place from it
  260. // However, if secondary allocations cannot be made from it - it's considered full nonetheless
  261. //
  262. bool IsFull() const
  263. {
  264. return (this->freePageCount == 0 && !ShouldBeInDecommittedList()) ||
  265. (this->secondaryAllocator != nullptr && !this->secondaryAllocator->CanAllocate());
  266. }
  267. bool IsAllDecommitted() const
  268. {
  269. return this->GetAvailablePageCount() == this->decommitPageCount;
  270. }
  271. bool ShouldBeInDecommittedList() const
  272. {
  273. return this->decommitPageCount != 0;
  274. }
  275. bool IsFreeOrDecommitted(void* address, uint pageCount) const
  276. {
  277. Assert(this->IsInSegment(address));
  278. uint base = GetBitRangeBase(address);
  279. return this->TestRangeInDecommitPagesBitVector(base, pageCount) || this->TestRangeInFreePagesBitVector(base, pageCount);
  280. }
  281. bool IsFreeOrDecommitted(void* address) const
  282. {
  283. Assert(this->IsInSegment(address));
  284. uint base = GetBitRangeBase(address);
  285. return this->TestInDecommitPagesBitVector(base) || this->TestInFreePagesBitVector(base);
  286. }
  287. PageBitVector GetUnAllocatedPages() const
  288. {
  289. PageBitVector unallocPages = freePages;
  290. unallocPages.Or(&decommitPages);
  291. return unallocPages;
  292. }
  293. void ChangeSegmentProtection(DWORD protectFlags, DWORD expectedOldProtectFlags);
  294. //---------- Private members ---------------/
  295. private:
  296. void DecommitFreePagesInternal(uint index, uint pageCount);
  297. uint GetBitRangeBase(void* address) const
  298. {
  299. uint base = ((uint)(((char *)address) - this->address)) / AutoSystemInfo::PageSize;
  300. return base;
  301. }
  302. PageBitVector freePages;
  303. PageBitVector decommitPages;
  304. uint freePageCount;
  305. uint decommitPageCount;
  306. };
  307. template<typename TVirtualAlloc = VirtualAllocWrapper>
  308. class HeapPageAllocator;
  309. /*
  310. * A Page Allocation is an allocation made by a page allocator
  311. * This has a base address, and tracks the number of pages that
  312. * were allocated from the segment
  313. */
  314. class PageAllocation
  315. {
  316. public:
  317. char * GetAddress() const { return ((char *)this) + sizeof(PageAllocation); }
  318. size_t GetSize() const { return pageCount * AutoSystemInfo::PageSize - sizeof(PageAllocation); }
  319. size_t GetPageCount() const { return pageCount; }
  320. void* GetSegment() const { return segment; }
  321. private:
  322. size_t pageCount;
  323. void * segment;
  324. friend class PageAllocatorBase<VirtualAllocWrapper>;
  325. #if ENABLE_NATIVE_CODEGEN
  326. friend class PageAllocatorBase<PreReservedVirtualAllocWrapper>;
  327. #endif
  328. #if ENABLE_OOP_NATIVE_CODEGEN
  329. friend class PageAllocatorBase<SectionAllocWrapper>;
  330. friend class PageAllocatorBase<PreReservedSectionAllocWrapper>;
  331. #endif
  332. friend class HeapPageAllocator<>;
  333. };
  334. class MemoryOperationLastError
  335. {
  336. public:
  337. static void RecordLastError()
  338. {
  339. #if ENABLE_OOP_NATIVE_CODEGEN
  340. if (MemOpLastError == S_OK)
  341. {
  342. MemOpLastError = HRESULT_FROM_WIN32(::GetLastError());
  343. }
  344. #endif
  345. }
  346. static void RecordError(HRESULT error)
  347. {
  348. #if ENABLE_OOP_NATIVE_CODEGEN
  349. if (MemOpLastError == S_OK)
  350. {
  351. MemOpLastError = error;
  352. }
  353. #endif
  354. }
  355. static void ClearLastError()
  356. {
  357. #if ENABLE_OOP_NATIVE_CODEGEN
  358. MemOpLastError = S_OK;
  359. #endif
  360. }
  361. static HRESULT GetLastError()
  362. {
  363. #if ENABLE_OOP_NATIVE_CODEGEN
  364. return MemOpLastError;
  365. #else
  366. return S_OK;
  367. #endif
  368. }
  369. #if ENABLE_OOP_NATIVE_CODEGEN
  370. private:
  371. THREAD_LOCAL static HRESULT MemOpLastError;
  372. #endif
  373. };
  374. class PageAllocatorBaseCommon
  375. {
  376. protected:
  377. // Disable create instance of PageAllocatorBaseCommon directly
  378. PageAllocatorBaseCommon() :
  379. virtualAllocator(nullptr),
  380. allocatorType(AllocatorType::VirtualAlloc)
  381. {}
  382. ~PageAllocatorBaseCommon() {}
  383. public:
  384. enum class AllocatorType
  385. {
  386. VirtualAlloc,
  387. #if ENABLE_NATIVE_CODEGEN
  388. PreReservedVirtualAlloc,
  389. #endif
  390. #if ENABLE_OOP_NATIVE_CODEGEN
  391. SectionAlloc,
  392. PreReservedSectionAlloc
  393. #endif
  394. };
  395. template<typename TVirtualAlloc>
  396. static AllocatorType GetAllocatorType();
  397. AllocatorType GetAllocatorType() const { return this->allocatorType; }
  398. protected:
  399. void* virtualAllocator;
  400. AllocatorType allocatorType;
  401. };
  402. template<> inline PageAllocatorBaseCommon::AllocatorType PageAllocatorBaseCommon::GetAllocatorType<VirtualAllocWrapper>() { return AllocatorType::VirtualAlloc; };
  403. #if ENABLE_NATIVE_CODEGEN
  404. template<> inline PageAllocatorBaseCommon::AllocatorType PageAllocatorBaseCommon::GetAllocatorType<PreReservedVirtualAllocWrapper>() { return AllocatorType::PreReservedVirtualAlloc; };
  405. #endif
  406. #if ENABLE_OOP_NATIVE_CODEGEN
  407. template<> inline PageAllocatorBaseCommon::AllocatorType PageAllocatorBaseCommon::GetAllocatorType<SectionAllocWrapper>() { return AllocatorType::SectionAlloc; };
  408. template<> inline PageAllocatorBaseCommon::AllocatorType PageAllocatorBaseCommon::GetAllocatorType<PreReservedSectionAllocWrapper>() { return AllocatorType::PreReservedSectionAlloc; };
  409. #endif
  410. /*
  411. * This allocator is responsible for allocating and freeing pages. It does
  412. * so by virtue of allocating segments for groups of pages, and then handing
  413. * out memory in these segments. It's also responsible for free segments.
  414. * This class also controls the idle decommit thread, which decommits pages
  415. * when they're no longer needed
  416. */
  417. template<typename TVirtualAlloc, typename TSegment, typename TPageSegment>
  418. class PageAllocatorBase: public PageAllocatorBaseCommon
  419. {
  420. #if !FLOATVAR
  421. friend class ::CodeGenNumberThreadAllocator;
  422. friend struct ::XProcNumberPageSegmentManager;
  423. #endif
  424. // Allowing recycler to report external memory allocation.
  425. friend class HeapInfo;
  426. public:
  427. static uint const DefaultMaxFreePageCount = 0x400; // 4 MB
  428. static uint const DefaultLowMaxFreePageCount = 0x100; // 1 MB for low-memory process
  429. static uint const MinPartialDecommitFreePageCount = 0x1000; // 16 MB
  430. static uint const DefaultMaxAllocPageCount = 32; // 128K
  431. static uint const DefaultSecondaryAllocPageCount = 0;
  432. static size_t GetProcessUsedBytes();
  433. static size_t GetAndResetMaxUsedBytes();
  434. // xplat TODO: implement a platform agnostic version of interlocked linked lists
  435. #if ENABLE_BACKGROUND_PAGE_FREEING
  436. struct FreePageEntry
  437. #if SUPPORT_WIN32_SLIST
  438. : public SLIST_ENTRY
  439. #endif
  440. {
  441. #if !SUPPORT_WIN32_SLIST
  442. FreePageEntry* Next;
  443. #endif
  444. PageSegmentBase<TVirtualAlloc> * segment;
  445. uint pageCount;
  446. };
  447. struct BackgroundPageQueue
  448. {
  449. #if SUPPORT_WIN32_SLIST
  450. SLIST_HEADER bgFreePageList;
  451. #else
  452. FreePageEntry* bgFreePageList;
  453. #endif
  454. CriticalSection backgroundPageQueueCriticalSection;
  455. #if DBG
  456. bool isZeroPageQueue;
  457. #endif
  458. BackgroundPageQueue()
  459. #if !SUPPORT_WIN32_SLIST
  460. :bgFreePageList(nullptr)
  461. #endif
  462. {
  463. #if SUPPORT_WIN32_SLIST
  464. ::InitializeSListHead(&bgFreePageList);
  465. #endif
  466. DebugOnly(this->isZeroPageQueue = false);
  467. }
  468. FreePageEntry* PopFreePageEntry()
  469. {
  470. #if SUPPORT_WIN32_SLIST
  471. return (FreePageEntry *)::InterlockedPopEntrySList(&bgFreePageList);
  472. #else
  473. AutoCriticalSection autoCS(&backgroundPageQueueCriticalSection);
  474. FreePageEntry* head = bgFreePageList;
  475. if (head)
  476. {
  477. bgFreePageList = bgFreePageList->Next;
  478. }
  479. return head;
  480. #endif
  481. }
  482. void PushFreePageEntry(FreePageEntry* entry)
  483. {
  484. #if SUPPORT_WIN32_SLIST
  485. ::InterlockedPushEntrySList(&bgFreePageList, entry);
  486. #else
  487. AutoCriticalSection autoCS(&backgroundPageQueueCriticalSection);
  488. entry->Next = bgFreePageList;
  489. bgFreePageList = entry;
  490. #endif
  491. }
  492. };
  493. #if ENABLE_BACKGROUND_PAGE_ZEROING
  494. struct ZeroPageQueue : BackgroundPageQueue
  495. {
  496. #if SUPPORT_WIN32_SLIST
  497. SLIST_HEADER pendingZeroPageList;
  498. #else
  499. FreePageEntry* pendingZeroPageList;
  500. #endif
  501. ZeroPageQueue()
  502. #if !SUPPORT_WIN32_SLIST
  503. :BackgroundPageQueue(), pendingZeroPageList(nullptr)
  504. #endif
  505. {
  506. #if SUPPORT_WIN32_SLIST
  507. ::InitializeSListHead(&pendingZeroPageList);
  508. #endif
  509. DebugOnly(this->isZeroPageQueue = true);
  510. }
  511. FreePageEntry* PopZeroPageEntry()
  512. {
  513. #if SUPPORT_WIN32_SLIST
  514. return (FreePageEntry *)::InterlockedPopEntrySList(&pendingZeroPageList);
  515. #else
  516. AutoCriticalSection autoCS(&this->backgroundPageQueueCriticalSection);
  517. FreePageEntry* head = pendingZeroPageList;
  518. if (head)
  519. {
  520. pendingZeroPageList = pendingZeroPageList->Next;
  521. }
  522. return head;
  523. #endif
  524. }
  525. void PushZeroPageEntry(FreePageEntry* entry)
  526. {
  527. #if SUPPORT_WIN32_SLIST
  528. ::InterlockedPushEntrySList(&pendingZeroPageList, entry);
  529. #else
  530. AutoCriticalSection autoCS(&this->backgroundPageQueueCriticalSection);
  531. entry->Next = pendingZeroPageList;
  532. pendingZeroPageList = entry;
  533. #endif
  534. }
  535. USHORT QueryDepth()
  536. {
  537. #if SUPPORT_WIN32_SLIST
  538. return QueryDepthSList(&pendingZeroPageList);
  539. #else
  540. AutoCriticalSection autoCS(&this->backgroundPageQueueCriticalSection);
  541. FreePageEntry* head = pendingZeroPageList;
  542. size_t count = 0;
  543. while (head)
  544. {
  545. head = head->Next;
  546. count++;
  547. }
  548. // If the specified singly linked list contains more than 65535 entries, QueryDepthSList returns the number of entries in the list modulo 65535
  549. return (USHORT)(count % 65536);
  550. #endif
  551. }
  552. };
  553. #endif
  554. #endif
  555. PageAllocatorBase(AllocationPolicyManager * policyManager,
  556. Js::ConfigFlagsTable& flags = Js::Configuration::Global.flags,
  557. PageAllocatorType type = PageAllocatorType_Max,
  558. uint maxFreePageCount = DefaultMaxFreePageCount,
  559. bool zeroPages = false,
  560. #if ENABLE_BACKGROUND_PAGE_FREEING
  561. BackgroundPageQueue * backgroundPageQueue = nullptr,
  562. #endif
  563. uint maxAllocPageCount = DefaultMaxAllocPageCount,
  564. uint secondaryAllocPageCount = DefaultSecondaryAllocPageCount,
  565. bool stopAllocationOnOutOfMemory = false,
  566. bool excludeGuardPages = false,
  567. HANDLE processHandle = GetCurrentProcess(),
  568. bool enableWriteBarrier = false
  569. );
  570. ~PageAllocatorBase();
  571. bool IsClosed() const { return isClosed; }
  572. void Close() { Assert(!isClosed); isClosed = true; }
  573. AllocationPolicyManager * GetAllocationPolicyManager() const { return policyManager; }
  574. uint GetMaxAllocPageCount();
  575. //VirtualAllocator APIs
  576. TVirtualAlloc * GetVirtualAllocator() const;
  577. PageAllocation * AllocPagesForBytes(DECLSPEC_GUARD_OVERFLOW size_t requestedBytes);
  578. PageAllocation * AllocAllocation(DECLSPEC_GUARD_OVERFLOW size_t pageCount);
  579. void ReleaseAllocation(PageAllocation * allocation);
  580. void ReleaseAllocationNoSuspend(PageAllocation * allocation);
  581. char * Alloc(size_t * pageCount, TSegment ** segment);
  582. void Release(void * address, size_t pageCount, void * segment);
  583. char * AllocPages(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  584. char * AllocPagesPageAligned(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  585. void ReleasePages(__in void * address, uint pageCount, __in void * pageSegment);
  586. #if ENABLE_BACKGROUND_PAGE_FREEING
  587. void BackgroundReleasePages(void * address, uint pageCount, TPageSegment * pageSegment);
  588. #endif
  589. void MemSetLocal(_In_ void *dst, int val, size_t sizeInBytes);
  590. // Decommit
  591. void DecommitNow(bool all = true);
  592. void SuspendIdleDecommit();
  593. void ResumeIdleDecommit();
  594. #if ENABLE_BACKGROUND_PAGE_ZEROING
  595. void StartQueueZeroPage();
  596. void StopQueueZeroPage();
  597. void ZeroQueuedPages();
  598. void BackgroundZeroQueuedPages();
  599. #endif
  600. #if ENABLE_BACKGROUND_PAGE_FREEING
  601. void FlushBackgroundPages();
  602. #endif
  603. bool DisableAllocationOutOfMemory() const { return disableAllocationOutOfMemory; }
  604. void ResetDisableAllocationOutOfMemory() { disableAllocationOutOfMemory = false; }
  605. #ifdef RECYCLER_MEMORY_VERIFY
  606. void EnableVerify() { verifyEnabled = true; }
  607. #endif
  608. #if defined(RECYCLER_NO_PAGE_REUSE) || defined(ARENA_MEMORY_VERIFY)
  609. void ReenablePageReuse() { Assert(disablePageReuse); disablePageReuse = false; }
  610. bool DisablePageReuse() { bool wasDisablePageReuse = disablePageReuse; disablePageReuse = true; return wasDisablePageReuse; }
  611. bool IsPageReuseDisabled() { return disablePageReuse; }
  612. #endif
  613. #if DBG
  614. #if ENABLE_BACKGROUND_PAGE_ZEROING
  615. bool HasZeroQueuedPages() const;
  616. #endif
  617. virtual void SetDisableThreadAccessCheck() { disableThreadAccessCheck = true;}
  618. virtual void SetEnableThreadAccessCheck() { disableThreadAccessCheck = false; }
  619. virtual bool IsIdleDecommitPageAllocator() const { return false; }
  620. virtual bool HasMultiThreadAccess() const { return false; }
  621. bool ValidThreadAccess()
  622. {
  623. DWORD currentThreadId = ::GetCurrentThreadId();
  624. return disableThreadAccessCheck ||
  625. (this->concurrentThreadId == -1 && this->threadContextHandle == NULL) || // JIT thread after close
  626. (this->concurrentThreadId != -1 && this->concurrentThreadId == currentThreadId) ||
  627. this->threadContextHandle == GetCurrentThreadContextId();
  628. }
  629. virtual void UpdateThreadContextHandle(ThreadContextId updatedThreadContextHandle) { threadContextHandle = updatedThreadContextHandle; }
  630. void SetConcurrentThreadId(DWORD threadId) { this->concurrentThreadId = threadId; }
  631. void ClearConcurrentThreadId() { this->concurrentThreadId = (DWORD)-1; }
  632. DWORD GetConcurrentThreadId() { return this->concurrentThreadId; }
  633. DWORD HasConcurrentThreadId() { return this->concurrentThreadId != -1; }
  634. #endif
  635. bool IsWriteWatchEnabled()
  636. {
  637. return (allocFlags & MEM_WRITE_WATCH) == MEM_WRITE_WATCH;
  638. }
  639. #if DBG_DUMP
  640. char16 const * debugName;
  641. #endif
  642. #ifdef ENABLE_BASIC_TELEMETRY
  643. AllocatorDecommitStats* GetDecommitStats() { return this->decommitStats; }
  644. void SetDecommitStats(AllocatorDecommitStats* val) { this->decommitStats = val; }
  645. #endif
  646. protected:
  647. void InitVirtualAllocator(TVirtualAlloc * virtualAllocator);
  648. TSegment * AllocSegment(DECLSPEC_GUARD_OVERFLOW size_t pageCount);
  649. void ReleaseSegment(TSegment * segment);
  650. template <bool doPageAlign>
  651. char * AllocInternal(size_t * pageCount, TSegment ** segment);
  652. template <bool notPageAligned>
  653. char * SnailAllocPages(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  654. void OnAllocFromNewSegment(DECLSPEC_GUARD_OVERFLOW uint pageCount, __in void* pages, TSegment* segment);
  655. template <bool notPageAligned>
  656. char * TryAllocFreePages(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  657. #if ENABLE_BACKGROUND_PAGE_FREEING
  658. char * TryAllocFromZeroPagesList(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment, BackgroundPageQueue* bgPageQueue, bool isPendingZeroList);
  659. #endif
  660. char * TryAllocFromZeroPages(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  661. template <bool notPageAligned>
  662. char * TryAllocDecommittedPages(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  663. DListBase<TPageSegment> * GetSegmentList(TPageSegment * segment);
  664. void TransferSegment(TPageSegment * segment, DListBase<TPageSegment> * fromSegmentList);
  665. void FillAllocPages(__in void * address, uint pageCount);
  666. void FillFreePages(__in void * address, uint pageCount);
  667. bool IsPageSegment(TSegment* segment)
  668. {
  669. return segment->GetAvailablePageCount() <= maxAllocPageCount;
  670. }
  671. #if DBG_DUMP
  672. virtual void DumpStats() const;
  673. #endif
  674. TPageSegment * AddPageSegment(DListBase<TPageSegment>& segmentList);
  675. static TPageSegment * AllocPageSegment(
  676. DListBase<TPageSegment>& segmentList,
  677. PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment> * pageAllocator,
  678. void* address, uint pageCount, uint committedCount, bool enableWriteBarrier);
  679. static TPageSegment * AllocPageSegment(
  680. DListBase<TPageSegment>& segmentList,
  681. PageAllocatorBase<TVirtualAlloc, TSegment, TPageSegment> * pageAllocator,
  682. bool committed, bool allocated, bool enableWriteBarrier);
  683. // Zero Pages
  684. #if ENABLE_BACKGROUND_PAGE_ZEROING
  685. void AddPageToZeroQueue(__in void * address, uint pageCount, __in TPageSegment * pageSegment);
  686. bool HasZeroPageQueue() const;
  687. #endif
  688. bool ZeroPages() const { return zeroPages; }
  689. #if ENABLE_BACKGROUND_PAGE_ZEROING
  690. bool QueueZeroPages() const { return queueZeroPages; }
  691. #endif
  692. #if ENABLE_BACKGROUND_PAGE_FREEING
  693. FreePageEntry * PopPendingZeroPage();
  694. #endif
  695. #if DBG
  696. void Check();
  697. bool disableThreadAccessCheck;
  698. #endif
  699. protected:
  700. // Data
  701. DListBase<TPageSegment> segments;
  702. DListBase<TPageSegment> fullSegments;
  703. DListBase<TPageSegment> emptySegments;
  704. DListBase<TPageSegment> decommitSegments;
  705. DListBase<TSegment> largeSegments;
  706. uint maxAllocPageCount;
  707. DWORD allocFlags;
  708. uint maxFreePageCount;
  709. size_t freePageCount;
  710. uint secondaryAllocPageCount;
  711. bool isClosed;
  712. bool stopAllocationOnOutOfMemory;
  713. bool disableAllocationOutOfMemory;
  714. bool excludeGuardPages;
  715. bool enableWriteBarrier;
  716. AllocationPolicyManager * policyManager;
  717. Js::ConfigFlagsTable& pageAllocatorFlagTable;
  718. // zero pages
  719. bool zeroPages;
  720. #if ENABLE_BACKGROUND_PAGE_FREEING
  721. BackgroundPageQueue * backgroundPageQueue;
  722. #if ENABLE_BACKGROUND_PAGE_ZEROING
  723. bool queueZeroPages;
  724. bool hasZeroQueuedPages;
  725. #endif
  726. #endif
  727. // Idle Decommit
  728. bool isUsed;
  729. // A flag to indicate we are trying to enter IdleDecommit again and back-off from decommit in DecommitNow. This is to prevent
  730. // blocking UI thread for too long. We have seen hangs under AppVerifier and believe this may be due to the decommit being slower
  731. // under AppVerifier. This shouldn't be a problem otherwise.
  732. bool waitingToEnterIdleDecommit;
  733. #if DBG
  734. uint idleDecommitBackOffCount;
  735. #endif
  736. size_t minFreePageCount;
  737. uint idleDecommitEnterCount;
  738. void UpdateMinFreePageCount();
  739. void ResetMinFreePageCount();
  740. void ClearMinFreePageCount();
  741. void AddFreePageCount(uint pageCount);
  742. static uint GetFreePageLimit() { return 0; }
  743. #if DBG
  744. size_t debugMinFreePageCount;
  745. ThreadContextId threadContextHandle;
  746. DWORD concurrentThreadId;
  747. #endif
  748. #if DBG_DUMP
  749. size_t decommitPageCount;
  750. #endif
  751. #ifdef RECYCLER_MEMORY_VERIFY
  752. bool verifyEnabled;
  753. #endif
  754. #if defined(RECYCLER_NO_PAGE_REUSE) || defined(ARENA_MEMORY_VERIFY)
  755. bool disablePageReuse;
  756. #endif
  757. friend TSegment;
  758. friend TPageSegment;
  759. friend class IdleDecommit;
  760. protected:
  761. virtual bool CreateSecondaryAllocator(TSegment* segment, bool committed, SecondaryAllocator** allocator)
  762. {
  763. *allocator = nullptr;
  764. return true;
  765. }
  766. bool IsAddressInSegment(__in void* address, const TPageSegment& segment);
  767. bool IsAddressInSegment(__in void* address, const TSegment& segment);
  768. HANDLE processHandle;
  769. private:
  770. uint GetSecondaryAllocPageCount() const { return this->secondaryAllocPageCount; }
  771. void IntegrateSegments(DListBase<TPageSegment>& segmentList, uint segmentCount, size_t pageCount);
  772. #if ENABLE_BACKGROUND_PAGE_FREEING
  773. void QueuePages(void * address, uint pageCount, TPageSegment * pageSegment);
  774. #endif
  775. template <bool notPageAligned>
  776. char* AllocPagesInternal(DECLSPEC_GUARD_OVERFLOW uint pageCount, TPageSegment ** pageSegment);
  777. #ifdef PROFILE_MEM
  778. PageMemoryData * memoryData;
  779. #endif
  780. #ifdef ENABLE_BASIC_TELEMETRY
  781. AllocatorDecommitStats* decommitStats;
  782. #endif
  783. size_t usedBytes;
  784. PageAllocatorType type;
  785. size_t reservedBytes;
  786. size_t committedBytes;
  787. size_t numberOfSegments;
  788. #ifdef PERF_COUNTERS
  789. PerfCounter::Counter& GetReservedSizeCounter() const
  790. {
  791. return PerfCounter::PageAllocatorCounterSet::GetReservedSizeCounter(type);
  792. }
  793. PerfCounter::Counter& GetCommittedSizeCounter() const
  794. {
  795. return PerfCounter::PageAllocatorCounterSet::GetCommittedSizeCounter(type);
  796. }
  797. PerfCounter::Counter& GetUsedSizeCounter() const
  798. {
  799. return PerfCounter::PageAllocatorCounterSet::GetUsedSizeCounter(type);
  800. }
  801. PerfCounter::Counter& GetTotalReservedSizeCounter() const
  802. {
  803. return PerfCounter::PageAllocatorCounterSet::GetTotalReservedSizeCounter();
  804. }
  805. PerfCounter::Counter& GetTotalCommittedSizeCounter() const
  806. {
  807. return PerfCounter::PageAllocatorCounterSet::GetTotalCommittedSizeCounter();
  808. }
  809. PerfCounter::Counter& GetTotalUsedSizeCounter() const
  810. {
  811. return PerfCounter::PageAllocatorCounterSet::GetTotalUsedSizeCounter();
  812. }
  813. #endif
  814. void AddReservedBytes(size_t bytes);
  815. void SubReservedBytes(size_t bytes);
  816. void AddCommittedBytes(size_t bytes);
  817. void SubCommittedBytes(size_t bytes);
  818. void AddUsedBytes(size_t bytes);
  819. void SubUsedBytes(size_t bytes);
  820. void AddNumberOfSegments(size_t segmentCount);
  821. void SubNumberOfSegments(size_t segmentCount);
  822. public:
  823. size_t GetReservedBytes() const { return this->reservedBytes; };
  824. size_t GetCommittedBytes() const { return this->committedBytes; }
  825. size_t GetUsedBytes() const { return this->usedBytes; }
  826. size_t GetNumberOfSegments() const { return this->numberOfSegments; }
  827. private:
  828. bool RequestAlloc(size_t byteCount)
  829. {
  830. if (disableAllocationOutOfMemory)
  831. {
  832. return false;
  833. }
  834. if (policyManager != nullptr)
  835. {
  836. return policyManager->RequestAlloc(byteCount);
  837. }
  838. return true;
  839. }
  840. void ReportExternalAlloc(size_t byteCount)
  841. {
  842. if (policyManager != nullptr)
  843. {
  844. policyManager->RequestAlloc(byteCount, true);
  845. }
  846. }
  847. void ReportFree(size_t byteCount)
  848. {
  849. if (policyManager != nullptr)
  850. {
  851. policyManager->ReportFree(byteCount);
  852. }
  853. }
  854. template <typename T>
  855. void ReleaseSegmentList(DListBase<T> * segmentList);
  856. protected:
  857. // Instrumentation
  858. void LogAllocSegment(TSegment * segment);
  859. void LogAllocSegment(uint segmentCount, size_t pageCount);
  860. void LogFreeSegment(TSegment * segment);
  861. void LogFreeDecommittedSegment(TSegment * segment);
  862. void LogFreePartiallyDecommittedPageSegment(TPageSegment * pageSegment);
  863. void LogAllocPages(size_t pageCount);
  864. void LogFreePages(size_t pageCount);
  865. void LogCommitPages(size_t pageCount);
  866. void LogRecommitPages(size_t pageCount);
  867. void LogDecommitPages(size_t pageCount);
  868. void ReportFailure(size_t byteCount)
  869. {
  870. if (this->stopAllocationOnOutOfMemory)
  871. {
  872. this->disableAllocationOutOfMemory = true;
  873. }
  874. if (policyManager != nullptr)
  875. {
  876. policyManager->ReportFailure(byteCount);
  877. }
  878. }
  879. };
  880. template<typename TVirtualAlloc>
  881. class HeapPageAllocator : public PageAllocatorBase<TVirtualAlloc>
  882. {
  883. typedef PageAllocatorBase<TVirtualAlloc> Base;
  884. PREVENT_STANDALONE_HEAPINSTANCE();
  885. public:
  886. HeapPageAllocator(AllocationPolicyManager * policyManager, bool allocXdata, bool excludeGuardPages, TVirtualAlloc * virtualAllocator, HANDLE processHandle = nullptr);
  887. BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag);
  888. bool AllocSecondary(void* segment, ULONG_PTR functionStart, DWORD functionSize, DECLSPEC_GUARD_OVERFLOW ushort pdataCount, DECLSPEC_GUARD_OVERFLOW ushort xdataSize, SecondaryAllocation* allocation);
  889. bool ReleaseSecondary(const SecondaryAllocation& allocation, void* segment);
  890. void TrackDecommittedPages(void * address, uint pageCount, __in void* segment);
  891. void DecommitPages(__in char* address, size_t pageCount = 1);
  892. // Release pages that has already been decommitted
  893. void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment);
  894. bool IsAddressFromAllocator(__in void* address);
  895. bool AllocXdata() { return allocXdata; }
  896. private:
  897. bool allocXdata;
  898. void ReleaseDecommittedSegment(__in SegmentBase<TVirtualAlloc>* segment);
  899. #if PDATA_ENABLED
  900. virtual bool CreateSecondaryAllocator(SegmentBase<TVirtualAlloc>* segment, bool committed, SecondaryAllocator** allocator) override;
  901. #endif
  902. };
  903. }