CustomHeap.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. namespace Memory
  7. {
  8. #define VerboseHeapTrace(...) { \
  9. OUTPUT_VERBOSE_TRACE(Js::CustomHeapPhase, __VA_ARGS__); \
  10. }
  11. #define HeapTrace(...) { \
  12. Output::Print(__VA_ARGS__); \
  13. Output::Flush(); \
  14. }
  15. namespace CustomHeap
  16. {
  17. enum BucketId
  18. {
  19. InvalidBucket = -1,
  20. SmallObjectList,
  21. Bucket256,
  22. Bucket512,
  23. Bucket1024,
  24. Bucket2048,
  25. Bucket4096,
  26. LargeObjectList,
  27. NumBuckets
  28. };
  29. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  30. struct Page
  31. {
  32. bool inFullList;
  33. bool isDecommitted;
  34. void* segment;
  35. BVUnit freeBitVector;
  36. char* address;
  37. BucketId currentBucket;
  38. bool HasNoSpace()
  39. {
  40. return freeBitVector.IsEmpty();
  41. }
  42. bool IsEmpty()
  43. {
  44. return freeBitVector.IsFull();
  45. }
  46. bool CanAllocate(BucketId targetBucket)
  47. {
  48. return freeBitVector.FirstStringOfOnes(targetBucket + 1) != BVInvalidIndex;
  49. }
  50. Page(__in char* address, void* segment, BucketId bucket):
  51. address(address),
  52. segment(segment),
  53. currentBucket(bucket),
  54. freeBitVector(0xFFFFFFFF),
  55. isDecommitted(false),
  56. inFullList(false)
  57. {
  58. }
  59. // Each bit in the bit vector corresponds to 128 bytes of memory
  60. // This implies that 128 bytes is the smallest allocation possible
  61. static const uint Alignment = 128;
  62. static const uint MaxAllocationSize = 4096;
  63. };
  64. struct Allocation
  65. {
  66. union
  67. {
  68. Page* page;
  69. struct
  70. {
  71. void* segment;
  72. bool isDecommitted;
  73. } largeObjectAllocation;
  74. };
  75. __field_bcount(size) char* address;
  76. size_t size;
  77. bool IsLargeAllocation() const { return size > Page::MaxAllocationSize; }
  78. size_t GetPageCount() const { Assert(this->IsLargeAllocation()); return size / AutoSystemInfo::PageSize; }
  79. #if DBG
  80. // Initialized to false, this is set to true when the allocation
  81. // is actually used by the emit buffer manager
  82. // This is almost always true- it's there only for assertion purposes
  83. bool isAllocationUsed: 1;
  84. bool isNotExecutableBecauseOOM: 1;
  85. #endif
  86. #if PDATA_ENABLED
  87. XDataAllocation xdata;
  88. XDataAllocator* GetXDataAllocator()
  89. {
  90. XDataAllocator* allocator;
  91. if (!this->IsLargeAllocation())
  92. {
  93. allocator = static_cast<XDataAllocator*>(((Segment*)(this->page->segment))->GetSecondaryAllocator());
  94. }
  95. else
  96. {
  97. allocator = static_cast<XDataAllocator*>(((Segment*) (largeObjectAllocation.segment))->GetSecondaryAllocator());
  98. }
  99. return allocator;
  100. }
  101. #endif
  102. };
  103. // Wrapper for the two HeapPageAllocator with and without the prereserved segment.
  104. // Supports multiple thread access. Require explicit locking (via AutoCriticalSection)
  105. template <typename TAlloc, typename TPreReservedAlloc>
  106. class CodePageAllocators
  107. {
  108. public:
  109. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, PreReservedVirtualAllocWrapper * virtualAllocator, HANDLE processHandle) :
  110. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, nullptr, processHandle),
  111. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  112. cs(4000),
  113. secondaryAllocStateChangedCount(0),
  114. processHandle(processHandle)
  115. {
  116. #if DBG
  117. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  118. this->pageAllocator.ClearConcurrentThreadId();
  119. #endif
  120. }
  121. #if _WIN32
  122. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, SectionAllocWrapper * sectionAllocator, PreReservedSectionAllocWrapper * virtualAllocator, HANDLE processHandle) :
  123. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, sectionAllocator, processHandle),
  124. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  125. cs(4000),
  126. secondaryAllocStateChangedCount(0),
  127. processHandle(processHandle)
  128. {
  129. #if DBG
  130. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  131. this->pageAllocator.ClearConcurrentThreadId();
  132. #endif
  133. }
  134. #endif
  135. bool AllocXdata()
  136. {
  137. // Simple immutable data access, no need for lock
  138. return preReservedHeapAllocator.AllocXdata();
  139. }
  140. bool IsPreReservedSegment(void * segment)
  141. {
  142. // Simple immutable data access, no need for lock
  143. Assert(segment);
  144. return reinterpret_cast<SegmentBaseCommon*>(segment)->IsInPreReservedHeapPageAllocator();
  145. }
  146. bool IsInNonPreReservedPageAllocator(__in void *address)
  147. {
  148. Assert(this->cs.IsLocked());
  149. return this->pageAllocator.IsAddressFromAllocator(address);
  150. }
  151. char * Alloc(size_t * pages, void ** segment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  152. {
  153. Assert(this->cs.IsLocked());
  154. char* address = nullptr;
  155. if (canAllocInPreReservedHeapPageSegment)
  156. {
  157. address = this->preReservedHeapAllocator.Alloc(pages, (SegmentBase<TPreReservedAlloc>**)(segment));
  158. }
  159. if (address == nullptr)
  160. {
  161. if (isAnyJittedCode)
  162. {
  163. *isAllJITCodeInPreReservedRegion = false;
  164. }
  165. address = this->pageAllocator.Alloc(pages, (SegmentBase<TAlloc>**)segment);
  166. }
  167. return address;
  168. }
  169. char * AllocLocal(char * remoteAddr, size_t size, void * segment);
  170. void FreeLocal(char * addr, void * segment);
  171. char * AllocPages(DECLSPEC_GUARD_OVERFLOW uint pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  172. {
  173. Assert(this->cs.IsLocked());
  174. char * address = nullptr;
  175. if (canAllocInPreReservedHeapPageSegment)
  176. {
  177. address = this->preReservedHeapAllocator.AllocPages(pages, (PageSegmentBase<TPreReservedAlloc>**)pageSegment);
  178. if (address == nullptr)
  179. {
  180. VerboseHeapTrace(_u("PRE-RESERVE: PreReserved Segment CANNOT be allocated \n"));
  181. }
  182. }
  183. if (address == nullptr) // if no space in Pre-reserved Page Segment, then allocate in regular ones.
  184. {
  185. if (isAnyJittedCode)
  186. {
  187. *isAllJITCodeInPreReservedRegion = false;
  188. }
  189. address = this->pageAllocator.AllocPages(pages, (PageSegmentBase<TAlloc>**)pageSegment);
  190. }
  191. else
  192. {
  193. VerboseHeapTrace(_u("PRE-RESERVE: Allocing new page in PreReserved Segment \n"));
  194. }
  195. return address;
  196. }
  197. void ReleasePages(void* pageAddress, uint pageCount, __in void* segment)
  198. {
  199. Assert(this->cs.IsLocked());
  200. Assert(segment);
  201. if (IsPreReservedSegment(segment))
  202. {
  203. this->GetPreReservedPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  204. }
  205. else
  206. {
  207. this->GetPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  208. }
  209. }
  210. BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
  211. {
  212. // This is merely a wrapper for VirtualProtect, no need to synchornize, and doesn't touch any data.
  213. // No need to assert locked.
  214. Assert(segment);
  215. if (IsPreReservedSegment(segment))
  216. {
  217. return this->GetPreReservedPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  218. }
  219. else
  220. {
  221. return this->GetPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  222. }
  223. }
  224. void TrackDecommittedPages(void * address, uint pageCount, __in void* segment)
  225. {
  226. Assert(this->cs.IsLocked());
  227. Assert(segment);
  228. if (IsPreReservedSegment(segment))
  229. {
  230. this->GetPreReservedPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  231. }
  232. else
  233. {
  234. this->GetPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  235. }
  236. }
  237. void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment)
  238. {
  239. Assert(this->cs.IsLocked());
  240. Assert(segment);
  241. if (IsPreReservedSegment(segment))
  242. {
  243. secondaryAllocStateChangedCount += (uint)this->GetPreReservedPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  244. }
  245. else
  246. {
  247. secondaryAllocStateChangedCount += (uint)this->GetPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  248. }
  249. }
  250. bool HasSecondaryAllocStateChanged(uint * lastSecondaryAllocStateChangedCount)
  251. {
  252. if (secondaryAllocStateChangedCount != *lastSecondaryAllocStateChangedCount)
  253. {
  254. *lastSecondaryAllocStateChangedCount = secondaryAllocStateChangedCount;
  255. return true;
  256. }
  257. return false;
  258. }
  259. void DecommitPages(__in char* address, size_t pageCount, void* segment)
  260. {
  261. // This is merely a wrapper for VirtualFree, no need to synchornize, and doesn't touch any data.
  262. // No need to assert locked.
  263. Assert(segment);
  264. if (IsPreReservedSegment(segment))
  265. {
  266. this->GetPreReservedPageAllocator(segment)->DecommitPages(address, pageCount);
  267. }
  268. else
  269. {
  270. this->GetPageAllocator(segment)->DecommitPages(address, pageCount);
  271. }
  272. }
  273. bool AllocSecondary(void* segment, ULONG_PTR functionStart, size_t functionSize_t, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
  274. {
  275. Assert(this->cs.IsLocked());
  276. Assert(functionSize_t <= MAXUINT32);
  277. DWORD functionSize = static_cast<DWORD>(functionSize_t);
  278. Assert(segment);
  279. if (IsPreReservedSegment(segment))
  280. {
  281. return this->GetPreReservedPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  282. }
  283. else
  284. {
  285. return this->GetPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  286. }
  287. }
  288. void Release(void * address, size_t pageCount, void * segment)
  289. {
  290. Assert(this->cs.IsLocked());
  291. Assert(segment);
  292. if (IsPreReservedSegment(segment))
  293. {
  294. this->GetPreReservedPageAllocator(segment)->Release(address, pageCount, segment);
  295. }
  296. else
  297. {
  298. this->GetPageAllocator(segment)->Release(address, pageCount, segment);
  299. }
  300. }
  301. void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment)
  302. {
  303. Assert(this->cs.IsLocked());
  304. Assert(segment);
  305. if (IsPreReservedSegment(segment))
  306. {
  307. this->GetPreReservedPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  308. }
  309. else
  310. {
  311. this->GetPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  312. }
  313. }
  314. CriticalSection cs;
  315. private:
  316. template<typename T>
  317. HeapPageAllocator<T>* GetPageAllocator(Page * page)
  318. {
  319. AssertMsg(page, "Why is page null?");
  320. return GetPageAllocator<T>(page->segment);
  321. }
  322. HeapPageAllocator<TAlloc>* GetPageAllocator(void * segmentParam)
  323. {
  324. SegmentBase<TAlloc> * segment = (SegmentBase<TAlloc>*)segmentParam;
  325. AssertMsg(segment, "Why is segment null?");
  326. Assert((HeapPageAllocator<TAlloc>*)(segment->GetAllocator()) == &this->pageAllocator);
  327. return (HeapPageAllocator<TAlloc> *)(segment->GetAllocator());
  328. }
  329. HeapPageAllocator<TPreReservedAlloc>* GetPreReservedPageAllocator(void * segmentParam)
  330. {
  331. SegmentBase<TPreReservedAlloc> * segment = (SegmentBase<TPreReservedAlloc>*)segmentParam;
  332. AssertMsg(segment, "Why is segment null?");
  333. Assert((HeapPageAllocator<TPreReservedAlloc>*)(segment->GetAllocator()) == &this->preReservedHeapAllocator);
  334. return (HeapPageAllocator<TPreReservedAlloc> *)(segment->GetAllocator());
  335. }
  336. HeapPageAllocator<TAlloc> pageAllocator;
  337. HeapPageAllocator<TPreReservedAlloc> preReservedHeapAllocator;
  338. HANDLE processHandle;
  339. // Track the number of time a segment's secondary allocate change from full to available to allocate.
  340. // So that we know whether CustomHeap to know when to update their "full page"
  341. // It is ok to overflow this variable. All we care is if the state has changed.
  342. // If in the unlikely scenario that we do overflow, then we delay the full pages in CustomHeap from
  343. // being made available.
  344. uint secondaryAllocStateChangedCount;
  345. };
  346. typedef CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcCodePageAllocators;
  347. #if _WIN32
  348. typedef CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPCodePageAllocators;
  349. #endif
  350. /*
  351. * Simple free-listing based heap allocator
  352. *
  353. * Each allocation is tracked using a "HeapAllocation" record
  354. * Once we alloc, we start assigning chunks sliced from the end of a HeapAllocation
  355. * If we don't have enough to slice off, we push a new heap allocation record to the record stack, and try and assign from that
  356. *
  357. * Single thread only. Require external locking. (Currently, EmitBufferManager manage the locking)
  358. */
  359. template <typename TAlloc, typename TPreReservedAlloc>
  360. class Heap
  361. {
  362. public:
  363. Heap(ArenaAllocator * alloc, CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators, HANDLE processHandle);
  364. Allocation* Alloc(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  365. void Free(__in Allocation* allocation);
  366. void DecommitAll();
  367. void FreeAll();
  368. bool IsInHeap(__in void* address);
  369. // A page should be in full list if:
  370. // 1. It does not have any space
  371. // 2. Parent segment cannot allocate any more XDATA
  372. bool ShouldBeInFullList(Page* page)
  373. {
  374. return page->HasNoSpace() || (codePageAllocators->AllocXdata() && !((Segment*)(page->segment))->CanAllocSecondary());
  375. }
  376. BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
  377. BOOL ProtectAllocationWithExecuteReadWrite(Allocation *allocation, __in_opt char* addressInPage = nullptr);
  378. BOOL ProtectAllocationWithExecuteReadOnly(Allocation *allocation, __in_opt char* addressInPage = nullptr);
  379. ~Heap();
  380. #if DBG_DUMP
  381. void DumpStats();
  382. #endif
  383. private:
  384. /**
  385. * Inline methods
  386. */
  387. inline unsigned int GetChunkSizeForBytes(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  388. {
  389. return (bytes > Page::Alignment ? static_cast<unsigned int>(bytes) / Page::Alignment : 1);
  390. }
  391. inline size_t GetNumPagesForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  392. {
  393. size_t allocSize = AllocSizeMath::Add(bytes, AutoSystemInfo::PageSize);
  394. if (allocSize == (size_t) -1)
  395. {
  396. return 0;
  397. }
  398. return ((allocSize - 1)/ AutoSystemInfo::PageSize);
  399. }
  400. inline BVIndex GetFreeIndexForPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes)
  401. {
  402. unsigned int length = GetChunkSizeForBytes(bytes);
  403. BVIndex index = page->freeBitVector.FirstStringOfOnes(length);
  404. return index;
  405. }
  406. /**
  407. * Large object methods
  408. */
  409. Allocation* AllocLargeObject(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  410. void FreeLargeObject(Allocation* header);
  411. void FreeLargeObjects();
  412. //Called during Free
  413. DWORD EnsurePageWriteable(Page* page);
  414. // this get called when freeing the whole page
  415. DWORD EnsureAllocationWriteable(Allocation* allocation);
  416. // this get called when only freeing a part in the page
  417. DWORD EnsureAllocationExecuteWriteable(Allocation* allocation);
  418. template<DWORD readWriteFlags>
  419. DWORD EnsurePageReadWrite(Page* page)
  420. {
  421. Assert(!page->isDecommitted);
  422. this->codePageAllocators->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE);
  423. return PAGE_EXECUTE;
  424. }
  425. template<DWORD readWriteFlags>
  426. DWORD EnsureAllocationReadWrite(Allocation* allocation)
  427. {
  428. if (allocation->IsLargeAllocation())
  429. {
  430. this->ProtectAllocation(allocation, readWriteFlags, PAGE_EXECUTE);
  431. return PAGE_EXECUTE;
  432. }
  433. else
  434. {
  435. return EnsurePageReadWrite<readWriteFlags>(allocation->page);
  436. }
  437. }
  438. /**
  439. * Freeing Methods
  440. */
  441. void FreeBuckets(bool freeOnlyEmptyPages);
  442. void FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages);
  443. void FreePage(Page* page);
  444. bool FreeAllocation(Allocation* allocation);
  445. void FreeAllocationHelper(Allocation * allocation, BVIndex index, uint length);
  446. #if PDATA_ENABLED
  447. void FreeXdata(XDataAllocation* xdata, void* segment);
  448. #endif
  449. void FreeDecommittedBuckets();
  450. void FreeDecommittedLargeObjects();
  451. /**
  452. * Page methods
  453. */
  454. Page* AddPageToBucket(Page* page, BucketId bucket, bool wasFull = false);
  455. bool AllocInPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
  456. Page* AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  457. Page* FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages = false);
  458. bool UpdateFullPages();
  459. Page * GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment);
  460. BVIndex GetIndexInPage(__in Page* page, __in char* address);
  461. bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
  462. bool IsInHeap(DListBase<Page> const& buckets, __in void *address);
  463. bool IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
  464. /**
  465. * Stats
  466. */
  467. #if DBG_DUMP
  468. size_t totalAllocationSize;
  469. size_t freeObjectSize;
  470. size_t allocationsSinceLastCompact;
  471. size_t freesSinceLastCompact;
  472. #endif
  473. /**
  474. * Allocator stuff
  475. */
  476. CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators;
  477. ArenaAllocator* auxiliaryAllocator;
  478. /*
  479. * Various tracking lists
  480. */
  481. DListBase<Page> buckets[NumBuckets];
  482. DListBase<Page> fullPages[NumBuckets];
  483. DListBase<Allocation> largeObjectAllocations;
  484. DListBase<Page> decommittedPages;
  485. DListBase<Allocation> decommittedLargeObjects;
  486. uint lastSecondaryAllocStateChangedCount;
  487. HANDLE processHandle;
  488. #if DBG
  489. bool inDtor;
  490. #endif
  491. };
  492. typedef Heap<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcHeap;
  493. #if _WIN32
  494. typedef Heap<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPHeap;
  495. #endif
  496. // Helpers
  497. unsigned int log2(size_t number);
  498. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  499. void FillDebugBreak(_Out_writes_bytes_all_(byteCount) BYTE* buffer, _In_ size_t byteCount);
  500. } // namespace CustomHeap
  501. } // namespace Memory