CustomHeap.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. #if ENABLE_NATIVE_CODEGEN || DYNAMIC_INTERPRETER_THUNK
  7. namespace Memory
  8. {
  9. #define VerboseHeapTrace(...) { \
  10. OUTPUT_VERBOSE_TRACE(Js::CustomHeapPhase, __VA_ARGS__); \
  11. }
  12. #define HeapTrace(...) { \
  13. Output::Print(__VA_ARGS__); \
  14. Output::Flush(); \
  15. }
  16. namespace CustomHeap
  17. {
  18. enum BucketId
  19. {
  20. InvalidBucket = -1,
  21. SmallObjectList,
  22. Bucket256,
  23. Bucket512,
  24. Bucket1024,
  25. Bucket2048,
  26. Bucket4096,
  27. LargeObjectList,
  28. NumBuckets
  29. };
  30. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  31. struct Page
  32. {
  33. bool inFullList;
  34. bool isDecommitted;
  35. void* segment;
  36. BVUnit freeBitVector;
  37. char* address;
  38. BucketId currentBucket;
  39. bool HasNoSpace()
  40. {
  41. return freeBitVector.IsEmpty();
  42. }
  43. bool IsEmpty()
  44. {
  45. return freeBitVector.IsFull();
  46. }
  47. bool CanAllocate(BucketId targetBucket)
  48. {
  49. return freeBitVector.FirstStringOfOnes(1 << targetBucket) != BVInvalidIndex;
  50. }
  51. Page(__in char* address, void* segment, BucketId bucket):
  52. address(address),
  53. segment(segment),
  54. currentBucket(bucket),
  55. freeBitVector(0xFFFFFFFF),
  56. isDecommitted(false),
  57. inFullList(false)
  58. {
  59. }
  60. // Each bit in the bit vector corresponds to 128 bytes of memory
  61. // This implies that 128 bytes is the smallest allocation possible
  62. static const uint MaxAllocationSize = 4096;
  63. static const uint sizePerBit = MaxAllocationSize / 32; // pagesize / freeBitVector bit count
  64. static const uint Alignment = sizePerBit; // 128
  65. };
  66. struct Allocation
  67. {
  68. union
  69. {
  70. Page* page;
  71. struct
  72. {
  73. void* segment;
  74. bool isDecommitted;
  75. } largeObjectAllocation;
  76. };
  77. uintptr_t thunkAddress;
  78. __field_bcount(size) char* address;
  79. size_t size;
  80. bool IsLargeAllocation() const { return size > Page::MaxAllocationSize; }
  81. size_t GetPageCount() const { Assert(this->IsLargeAllocation()); return size / AutoSystemInfo::PageSize; }
  82. #if DBG
  83. // Initialized to false, this is set to true when the allocation
  84. // is actually used by the emit buffer manager
  85. // This is almost always true- it's there only for assertion purposes
  86. bool isAllocationUsed: 1;
  87. bool isNotExecutableBecauseOOM: 1;
  88. #endif
  89. #if PDATA_ENABLED
  90. XDataAllocation xdata;
  91. XDataAllocator* GetXDataAllocator()
  92. {
  93. XDataAllocator* allocator;
  94. if (!this->IsLargeAllocation())
  95. {
  96. allocator = static_cast<XDataAllocator*>(((Segment*)(this->page->segment))->GetSecondaryAllocator());
  97. }
  98. else
  99. {
  100. allocator = static_cast<XDataAllocator*>(((Segment*) (largeObjectAllocation.segment))->GetSecondaryAllocator());
  101. }
  102. return allocator;
  103. }
  104. #endif
  105. };
  106. // Wrapper for the two HeapPageAllocator with and without the prereserved segment.
  107. // Supports multiple thread access. Require explicit locking (via AutoCriticalSection)
  108. template <typename TAlloc, typename TPreReservedAlloc>
  109. class CodePageAllocators
  110. {
  111. public:
  112. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, PreReservedVirtualAllocWrapper * virtualAllocator, HANDLE processHandle) :
  113. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, nullptr, processHandle),
  114. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  115. cs(4000),
  116. secondaryAllocStateChangedCount(0),
  117. processHandle(processHandle)
  118. {
  119. #if DBG
  120. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  121. this->pageAllocator.ClearConcurrentThreadId();
  122. #endif
  123. }
  124. #if _WIN32
  125. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, SectionAllocWrapper * sectionAllocator, PreReservedSectionAllocWrapper * virtualAllocator, HANDLE processHandle) :
  126. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, sectionAllocator, processHandle),
  127. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  128. cs(4000),
  129. secondaryAllocStateChangedCount(0),
  130. processHandle(processHandle)
  131. {
  132. #if DBG
  133. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  134. this->pageAllocator.ClearConcurrentThreadId();
  135. #endif
  136. }
  137. #endif
  138. bool AllocXdata()
  139. {
  140. // Simple immutable data access, no need for lock
  141. return preReservedHeapAllocator.AllocXdata();
  142. }
  143. bool IsPreReservedSegment(void * segment)
  144. {
  145. // Simple immutable data access, no need for lock
  146. Assert(segment);
  147. return reinterpret_cast<SegmentBaseCommon*>(segment)->IsInPreReservedHeapPageAllocator();
  148. }
  149. bool IsInNonPreReservedPageAllocator(__in void *address)
  150. {
  151. Assert(this->cs.IsLocked());
  152. return this->pageAllocator.IsAddressFromAllocator(address);
  153. }
  154. char * Alloc(size_t * pages, void ** segment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  155. {
  156. Assert(this->cs.IsLocked());
  157. char* address = nullptr;
  158. if (canAllocInPreReservedHeapPageSegment)
  159. {
  160. address = this->preReservedHeapAllocator.Alloc(pages, (SegmentBase<TPreReservedAlloc>**)(segment));
  161. }
  162. if (address == nullptr)
  163. {
  164. if (isAnyJittedCode)
  165. {
  166. *isAllJITCodeInPreReservedRegion = false;
  167. }
  168. address = this->pageAllocator.Alloc(pages, (SegmentBase<TAlloc>**)segment);
  169. }
  170. return address;
  171. }
  172. char * AllocLocal(char * remoteAddr, size_t size, void * segment);
  173. void FreeLocal(char * addr, void * segment);
  174. char * AllocPages(DECLSPEC_GUARD_OVERFLOW uint pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  175. {
  176. Assert(this->cs.IsLocked());
  177. char * address = nullptr;
  178. if (canAllocInPreReservedHeapPageSegment)
  179. {
  180. address = this->preReservedHeapAllocator.AllocPages(pages, (PageSegmentBase<TPreReservedAlloc>**)pageSegment);
  181. if (address == nullptr)
  182. {
  183. VerboseHeapTrace(_u("PRE-RESERVE: PreReserved Segment CANNOT be allocated \n"));
  184. }
  185. }
  186. if (address == nullptr) // if no space in Pre-reserved Page Segment, then allocate in regular ones.
  187. {
  188. if (isAnyJittedCode)
  189. {
  190. *isAllJITCodeInPreReservedRegion = false;
  191. }
  192. address = this->pageAllocator.AllocPages(pages, (PageSegmentBase<TAlloc>**)pageSegment);
  193. }
  194. else
  195. {
  196. VerboseHeapTrace(_u("PRE-RESERVE: Allocing new page in PreReserved Segment \n"));
  197. }
  198. return address;
  199. }
  200. void ReleasePages(void* pageAddress, uint pageCount, __in void* segment)
  201. {
  202. Assert(this->cs.IsLocked());
  203. Assert(segment);
  204. if (IsPreReservedSegment(segment))
  205. {
  206. this->GetPreReservedPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  207. }
  208. else
  209. {
  210. this->GetPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  211. }
  212. }
  213. BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
  214. {
  215. // This is merely a wrapper for VirtualProtect, no need to synchornize, and doesn't touch any data.
  216. // No need to assert locked.
  217. Assert(segment);
  218. if (IsPreReservedSegment(segment))
  219. {
  220. return this->GetPreReservedPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  221. }
  222. else
  223. {
  224. return this->GetPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  225. }
  226. }
  227. void TrackDecommittedPages(void * address, uint pageCount, __in void* segment)
  228. {
  229. Assert(this->cs.IsLocked());
  230. Assert(segment);
  231. if (IsPreReservedSegment(segment))
  232. {
  233. this->GetPreReservedPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  234. }
  235. else
  236. {
  237. this->GetPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  238. }
  239. }
  240. void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment)
  241. {
  242. Assert(this->cs.IsLocked());
  243. Assert(segment);
  244. if (IsPreReservedSegment(segment))
  245. {
  246. secondaryAllocStateChangedCount += (uint)this->GetPreReservedPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  247. }
  248. else
  249. {
  250. secondaryAllocStateChangedCount += (uint)this->GetPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  251. }
  252. }
  253. bool HasSecondaryAllocStateChanged(uint * lastSecondaryAllocStateChangedCount)
  254. {
  255. if (secondaryAllocStateChangedCount != *lastSecondaryAllocStateChangedCount)
  256. {
  257. *lastSecondaryAllocStateChangedCount = secondaryAllocStateChangedCount;
  258. return true;
  259. }
  260. return false;
  261. }
  262. void DecommitPages(__in char* address, size_t pageCount, void* segment)
  263. {
  264. // This is merely a wrapper for VirtualFree, no need to synchornize, and doesn't touch any data.
  265. // No need to assert locked.
  266. Assert(segment);
  267. if (IsPreReservedSegment(segment))
  268. {
  269. this->GetPreReservedPageAllocator(segment)->DecommitPages(address, pageCount);
  270. }
  271. else
  272. {
  273. this->GetPageAllocator(segment)->DecommitPages(address, pageCount);
  274. }
  275. }
  276. bool AllocSecondary(void* segment, ULONG_PTR functionStart, size_t functionSize_t, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
  277. {
  278. Assert(this->cs.IsLocked());
  279. Assert(functionSize_t <= MAXUINT32);
  280. DWORD functionSize = static_cast<DWORD>(functionSize_t);
  281. Assert(segment);
  282. if (IsPreReservedSegment(segment))
  283. {
  284. return this->GetPreReservedPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  285. }
  286. else
  287. {
  288. return this->GetPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  289. }
  290. }
  291. void Release(void * address, size_t pageCount, void * segment)
  292. {
  293. Assert(this->cs.IsLocked());
  294. Assert(segment);
  295. if (IsPreReservedSegment(segment))
  296. {
  297. this->GetPreReservedPageAllocator(segment)->Release(address, pageCount, segment);
  298. }
  299. else
  300. {
  301. this->GetPageAllocator(segment)->Release(address, pageCount, segment);
  302. }
  303. }
  304. void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment)
  305. {
  306. Assert(this->cs.IsLocked());
  307. Assert(segment);
  308. if (IsPreReservedSegment(segment))
  309. {
  310. this->GetPreReservedPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  311. }
  312. else
  313. {
  314. this->GetPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  315. }
  316. }
  317. CriticalSection cs;
  318. private:
  319. template<typename T>
  320. HeapPageAllocator<T>* GetPageAllocator(Page * page)
  321. {
  322. AssertMsg(page, "Why is page null?");
  323. return GetPageAllocator<T>(page->segment);
  324. }
  325. HeapPageAllocator<TAlloc>* GetPageAllocator(void * segmentParam)
  326. {
  327. SegmentBase<TAlloc> * segment = (SegmentBase<TAlloc>*)segmentParam;
  328. AssertMsg(segment, "Why is segment null?");
  329. Assert((HeapPageAllocator<TAlloc>*)(segment->GetAllocator()) == &this->pageAllocator);
  330. return (HeapPageAllocator<TAlloc> *)(segment->GetAllocator());
  331. }
  332. HeapPageAllocator<TPreReservedAlloc>* GetPreReservedPageAllocator(void * segmentParam)
  333. {
  334. SegmentBase<TPreReservedAlloc> * segment = (SegmentBase<TPreReservedAlloc>*)segmentParam;
  335. AssertMsg(segment, "Why is segment null?");
  336. Assert((HeapPageAllocator<TPreReservedAlloc>*)(segment->GetAllocator()) == &this->preReservedHeapAllocator);
  337. return (HeapPageAllocator<TPreReservedAlloc> *)(segment->GetAllocator());
  338. }
  339. HeapPageAllocator<TAlloc> pageAllocator;
  340. HeapPageAllocator<TPreReservedAlloc> preReservedHeapAllocator;
  341. HANDLE processHandle;
  342. // Track the number of time a segment's secondary allocate change from full to available to allocate.
  343. // So that we know whether CustomHeap to know when to update their "full page"
  344. // It is ok to overflow this variable. All we care is if the state has changed.
  345. // If in the unlikely scenario that we do overflow, then we delay the full pages in CustomHeap from
  346. // being made available.
  347. uint secondaryAllocStateChangedCount;
  348. };
  349. typedef CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcCodePageAllocators;
  350. #if _WIN32
  351. typedef CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPCodePageAllocators;
  352. #endif
  353. /*
  354. * Simple free-listing based heap allocator
  355. *
  356. * Each allocation is tracked using a "HeapAllocation" record
  357. * Once we alloc, we start assigning chunks sliced from the end of a HeapAllocation
  358. * If we don't have enough to slice off, we push a new heap allocation record to the record stack, and try and assign from that
  359. *
  360. * Single thread only. Require external locking. (Currently, EmitBufferManager manage the locking)
  361. */
  362. template <typename TAlloc, typename TPreReservedAlloc>
  363. class Heap
  364. {
  365. public:
  366. Heap(ArenaAllocator * alloc, CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators, HANDLE processHandle);
  367. Allocation* Alloc(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  368. void Free(__in Allocation* allocation);
  369. void DecommitAll();
  370. void FreeAll();
  371. bool IsInHeap(__in void* address);
  372. // A page should be in full list if:
  373. // 1. It does not have any space
  374. // 2. Parent segment cannot allocate any more XDATA
  375. bool ShouldBeInFullList(Page* page)
  376. {
  377. return page->HasNoSpace() || (codePageAllocators->AllocXdata() && !((Segment*)(page->segment))->CanAllocSecondary());
  378. }
  379. BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
  380. BOOL ProtectAllocationWithExecuteReadWrite(Allocation *allocation, __in_opt char* addressInPage = nullptr);
  381. BOOL ProtectAllocationWithExecuteReadOnly(__in Allocation *allocation, __in_opt char* addressInPage = nullptr);
  382. ~Heap();
  383. #if DBG_DUMP
  384. void DumpStats();
  385. #endif
  386. private:
  387. /**
  388. * Inline methods
  389. */
  390. inline unsigned int GetChunkSizeForBytes(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  391. {
  392. return (bytes > Page::Alignment ? static_cast<unsigned int>(bytes) / Page::Alignment : 1);
  393. }
  394. inline size_t GetNumPagesForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  395. {
  396. size_t allocSize = AllocSizeMath::Add(bytes, AutoSystemInfo::PageSize);
  397. if (allocSize == (size_t) -1)
  398. {
  399. return 0;
  400. }
  401. return ((allocSize - 1)/ AutoSystemInfo::PageSize);
  402. }
  403. inline BVIndex GetFreeIndexForPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes)
  404. {
  405. unsigned int length = GetChunkSizeForBytes(bytes);
  406. BVIndex index = page->freeBitVector.FirstStringOfOnes(length);
  407. return index;
  408. }
  409. /**
  410. * Large object methods
  411. */
  412. Allocation* AllocLargeObject(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  413. void FreeLargeObject(Allocation* header);
  414. void FreeLargeObjects();
  415. //Called during Free
  416. DWORD EnsurePageWriteable(Page* page);
  417. // this get called when freeing the whole page
  418. DWORD EnsureAllocationWriteable(Allocation* allocation);
  419. // this get called when only freeing a part in the page
  420. DWORD EnsureAllocationExecuteWriteable(Allocation* allocation);
  421. template<DWORD readWriteFlags>
  422. DWORD EnsurePageReadWrite(Page* page)
  423. {
  424. Assert(!page->isDecommitted);
  425. this->codePageAllocators->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE_READ);
  426. return PAGE_EXECUTE_READ;
  427. }
  428. template<DWORD readWriteFlags>
  429. DWORD EnsureAllocationReadWrite(Allocation* allocation)
  430. {
  431. if (allocation->IsLargeAllocation())
  432. {
  433. this->ProtectAllocation(allocation, readWriteFlags, PAGE_EXECUTE_READ);
  434. return PAGE_EXECUTE_READ;
  435. }
  436. else
  437. {
  438. return EnsurePageReadWrite<readWriteFlags>(allocation->page);
  439. }
  440. }
  441. /**
  442. * Freeing Methods
  443. */
  444. void FreeBuckets(bool freeOnlyEmptyPages);
  445. void FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages);
  446. void FreePage(Page* page);
  447. bool FreeAllocation(Allocation* allocation);
  448. void FreeAllocationHelper(Allocation * allocation, BVIndex index, uint length);
  449. #if PDATA_ENABLED
  450. void FreeXdata(XDataAllocation* xdata, void* segment);
  451. #endif
  452. void FreeDecommittedBuckets();
  453. void FreeDecommittedLargeObjects();
  454. /**
  455. * Page methods
  456. */
  457. Page* AddPageToBucket(Page* page, BucketId bucket, bool wasFull = false);
  458. bool AllocInPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
  459. Page* AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  460. Page* FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages = false);
  461. bool UpdateFullPages();
  462. Page * GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment);
  463. BVIndex GetIndexInPage(__in Page* page, __in char* address);
  464. bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
  465. bool IsInHeap(DListBase<Page> const& buckets, __in void *address);
  466. bool IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
  467. /**
  468. * Stats
  469. */
  470. #if DBG_DUMP
  471. size_t totalAllocationSize;
  472. size_t freeObjectSize;
  473. size_t allocationsSinceLastCompact;
  474. size_t freesSinceLastCompact;
  475. #endif
  476. /**
  477. * Allocator stuff
  478. */
  479. CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators;
  480. ArenaAllocator* auxiliaryAllocator;
  481. /*
  482. * Various tracking lists
  483. */
  484. DListBase<Page> buckets[NumBuckets];
  485. DListBase<Page> fullPages[NumBuckets];
  486. DListBase<Allocation> largeObjectAllocations;
  487. DListBase<Page> decommittedPages;
  488. DListBase<Allocation> decommittedLargeObjects;
  489. uint lastSecondaryAllocStateChangedCount;
  490. HANDLE processHandle;
  491. #if DBG
  492. bool inDtor;
  493. #endif
  494. };
  495. typedef Heap<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcHeap;
  496. #if _WIN32
  497. typedef Heap<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPHeap;
  498. #endif
  499. // Helpers
  500. unsigned int log2(size_t number);
  501. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  502. void FillDebugBreak(_Out_writes_bytes_all_(byteCount) BYTE* buffer, _In_ size_t byteCount);
  503. } // namespace CustomHeap
  504. } // namespace Memory
  505. #endif