CustomHeap.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #pragma once
  6. namespace Memory
  7. {
  8. #define VerboseHeapTrace(...) { \
  9. OUTPUT_VERBOSE_TRACE(Js::CustomHeapPhase, __VA_ARGS__); \
  10. }
  11. #define HeapTrace(...) { \
  12. Output::Print(__VA_ARGS__); \
  13. Output::Flush(); \
  14. }
  15. namespace CustomHeap
  16. {
  17. enum BucketId
  18. {
  19. InvalidBucket = -1,
  20. SmallObjectList,
  21. Bucket256,
  22. Bucket512,
  23. Bucket1024,
  24. Bucket2048,
  25. Bucket4096,
  26. LargeObjectList,
  27. NumBuckets
  28. };
  29. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  30. struct Page
  31. {
  32. bool inFullList;
  33. bool isDecommitted;
  34. void* segment;
  35. BVUnit freeBitVector;
  36. char* address;
  37. BucketId currentBucket;
  38. bool HasNoSpace()
  39. {
  40. return freeBitVector.IsEmpty();
  41. }
  42. bool IsEmpty()
  43. {
  44. return freeBitVector.IsFull();
  45. }
  46. bool CanAllocate(BucketId targetBucket)
  47. {
  48. return freeBitVector.FirstStringOfOnes(targetBucket + 1) != BVInvalidIndex;
  49. }
  50. Page(__in char* address, void* segment, BucketId bucket):
  51. address(address),
  52. segment(segment),
  53. currentBucket(bucket),
  54. freeBitVector(0xFFFFFFFF),
  55. isDecommitted(false),
  56. inFullList(false)
  57. {
  58. }
  59. // Each bit in the bit vector corresponds to 128 bytes of memory
  60. // This implies that 128 bytes is the smallest allocation possible
  61. static const uint Alignment = 128;
  62. static const uint MaxAllocationSize = 4096;
  63. };
  64. struct Allocation
  65. {
  66. union
  67. {
  68. Page* page;
  69. struct
  70. {
  71. void* segment;
  72. bool isDecommitted;
  73. } largeObjectAllocation;
  74. };
  75. uintptr_t thunkAddress;
  76. __field_bcount(size) char* address;
  77. size_t size;
  78. bool IsLargeAllocation() const { return size > Page::MaxAllocationSize; }
  79. size_t GetPageCount() const { Assert(this->IsLargeAllocation()); return size / AutoSystemInfo::PageSize; }
  80. #if DBG
  81. // Initialized to false, this is set to true when the allocation
  82. // is actually used by the emit buffer manager
  83. // This is almost always true- it's there only for assertion purposes
  84. bool isAllocationUsed: 1;
  85. bool isNotExecutableBecauseOOM: 1;
  86. #endif
  87. #if PDATA_ENABLED
  88. XDataAllocation xdata;
  89. XDataAllocator* GetXDataAllocator()
  90. {
  91. XDataAllocator* allocator;
  92. if (!this->IsLargeAllocation())
  93. {
  94. allocator = static_cast<XDataAllocator*>(((Segment*)(this->page->segment))->GetSecondaryAllocator());
  95. }
  96. else
  97. {
  98. allocator = static_cast<XDataAllocator*>(((Segment*) (largeObjectAllocation.segment))->GetSecondaryAllocator());
  99. }
  100. return allocator;
  101. }
  102. #endif
  103. };
  104. // Wrapper for the two HeapPageAllocator with and without the prereserved segment.
  105. // Supports multiple thread access. Require explicit locking (via AutoCriticalSection)
  106. template <typename TAlloc, typename TPreReservedAlloc>
  107. class CodePageAllocators
  108. {
  109. public:
  110. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, PreReservedVirtualAllocWrapper * virtualAllocator, HANDLE processHandle) :
  111. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, nullptr, processHandle),
  112. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  113. cs(4000),
  114. secondaryAllocStateChangedCount(0),
  115. processHandle(processHandle)
  116. {
  117. #if DBG
  118. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  119. this->pageAllocator.ClearConcurrentThreadId();
  120. #endif
  121. }
  122. #if _WIN32
  123. CodePageAllocators(AllocationPolicyManager * policyManager, bool allocXdata, SectionAllocWrapper * sectionAllocator, PreReservedSectionAllocWrapper * virtualAllocator, HANDLE processHandle) :
  124. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, sectionAllocator, processHandle),
  125. preReservedHeapAllocator(policyManager, allocXdata, true /*excludeGuardPages*/, virtualAllocator, processHandle),
  126. cs(4000),
  127. secondaryAllocStateChangedCount(0),
  128. processHandle(processHandle)
  129. {
  130. #if DBG
  131. this->preReservedHeapAllocator.ClearConcurrentThreadId();
  132. this->pageAllocator.ClearConcurrentThreadId();
  133. #endif
  134. }
  135. #endif
  136. bool AllocXdata()
  137. {
  138. // Simple immutable data access, no need for lock
  139. return preReservedHeapAllocator.AllocXdata();
  140. }
  141. bool IsPreReservedSegment(void * segment)
  142. {
  143. // Simple immutable data access, no need for lock
  144. Assert(segment);
  145. return reinterpret_cast<SegmentBaseCommon*>(segment)->IsInPreReservedHeapPageAllocator();
  146. }
  147. bool IsInNonPreReservedPageAllocator(__in void *address)
  148. {
  149. Assert(this->cs.IsLocked());
  150. return this->pageAllocator.IsAddressFromAllocator(address);
  151. }
  152. char * Alloc(size_t * pages, void ** segment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  153. {
  154. Assert(this->cs.IsLocked());
  155. char* address = nullptr;
  156. if (canAllocInPreReservedHeapPageSegment)
  157. {
  158. address = this->preReservedHeapAllocator.Alloc(pages, (SegmentBase<TPreReservedAlloc>**)(segment));
  159. }
  160. if (address == nullptr)
  161. {
  162. if (isAnyJittedCode)
  163. {
  164. *isAllJITCodeInPreReservedRegion = false;
  165. }
  166. address = this->pageAllocator.Alloc(pages, (SegmentBase<TAlloc>**)segment);
  167. }
  168. return address;
  169. }
  170. char * AllocLocal(char * remoteAddr, size_t size, void * segment);
  171. void FreeLocal(char * addr, void * segment);
  172. char * AllocPages(DECLSPEC_GUARD_OVERFLOW uint pages, void ** pageSegment, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, bool * isAllJITCodeInPreReservedRegion)
  173. {
  174. Assert(this->cs.IsLocked());
  175. char * address = nullptr;
  176. if (canAllocInPreReservedHeapPageSegment)
  177. {
  178. address = this->preReservedHeapAllocator.AllocPages(pages, (PageSegmentBase<TPreReservedAlloc>**)pageSegment);
  179. if (address == nullptr)
  180. {
  181. VerboseHeapTrace(_u("PRE-RESERVE: PreReserved Segment CANNOT be allocated \n"));
  182. }
  183. }
  184. if (address == nullptr) // if no space in Pre-reserved Page Segment, then allocate in regular ones.
  185. {
  186. if (isAnyJittedCode)
  187. {
  188. *isAllJITCodeInPreReservedRegion = false;
  189. }
  190. address = this->pageAllocator.AllocPages(pages, (PageSegmentBase<TAlloc>**)pageSegment);
  191. }
  192. else
  193. {
  194. VerboseHeapTrace(_u("PRE-RESERVE: Allocing new page in PreReserved Segment \n"));
  195. }
  196. return address;
  197. }
  198. void ReleasePages(void* pageAddress, uint pageCount, __in void* segment)
  199. {
  200. Assert(this->cs.IsLocked());
  201. Assert(segment);
  202. if (IsPreReservedSegment(segment))
  203. {
  204. this->GetPreReservedPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  205. }
  206. else
  207. {
  208. this->GetPageAllocator(segment)->ReleasePages(pageAddress, pageCount, segment);
  209. }
  210. }
  211. BOOL ProtectPages(__in char* address, size_t pageCount, __in void* segment, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag)
  212. {
  213. // This is merely a wrapper for VirtualProtect, no need to synchornize, and doesn't touch any data.
  214. // No need to assert locked.
  215. Assert(segment);
  216. if (IsPreReservedSegment(segment))
  217. {
  218. return this->GetPreReservedPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  219. }
  220. else
  221. {
  222. return this->GetPageAllocator(segment)->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  223. }
  224. }
  225. void TrackDecommittedPages(void * address, uint pageCount, __in void* segment)
  226. {
  227. Assert(this->cs.IsLocked());
  228. Assert(segment);
  229. if (IsPreReservedSegment(segment))
  230. {
  231. this->GetPreReservedPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  232. }
  233. else
  234. {
  235. this->GetPageAllocator(segment)->TrackDecommittedPages(address, pageCount, segment);
  236. }
  237. }
  238. void ReleaseSecondary(const SecondaryAllocation& allocation, void* segment)
  239. {
  240. Assert(this->cs.IsLocked());
  241. Assert(segment);
  242. if (IsPreReservedSegment(segment))
  243. {
  244. secondaryAllocStateChangedCount += (uint)this->GetPreReservedPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  245. }
  246. else
  247. {
  248. secondaryAllocStateChangedCount += (uint)this->GetPageAllocator(segment)->ReleaseSecondary(allocation, segment);
  249. }
  250. }
  251. bool HasSecondaryAllocStateChanged(uint * lastSecondaryAllocStateChangedCount)
  252. {
  253. if (secondaryAllocStateChangedCount != *lastSecondaryAllocStateChangedCount)
  254. {
  255. *lastSecondaryAllocStateChangedCount = secondaryAllocStateChangedCount;
  256. return true;
  257. }
  258. return false;
  259. }
  260. void DecommitPages(__in char* address, size_t pageCount, void* segment)
  261. {
  262. // This is merely a wrapper for VirtualFree, no need to synchornize, and doesn't touch any data.
  263. // No need to assert locked.
  264. Assert(segment);
  265. if (IsPreReservedSegment(segment))
  266. {
  267. this->GetPreReservedPageAllocator(segment)->DecommitPages(address, pageCount);
  268. }
  269. else
  270. {
  271. this->GetPageAllocator(segment)->DecommitPages(address, pageCount);
  272. }
  273. }
  274. bool AllocSecondary(void* segment, ULONG_PTR functionStart, size_t functionSize_t, ushort pdataCount, ushort xdataSize, SecondaryAllocation* allocation)
  275. {
  276. Assert(this->cs.IsLocked());
  277. Assert(functionSize_t <= MAXUINT32);
  278. DWORD functionSize = static_cast<DWORD>(functionSize_t);
  279. Assert(segment);
  280. if (IsPreReservedSegment(segment))
  281. {
  282. return this->GetPreReservedPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  283. }
  284. else
  285. {
  286. return this->GetPageAllocator(segment)->AllocSecondary(segment, functionStart, functionSize, pdataCount, xdataSize, allocation);
  287. }
  288. }
  289. void Release(void * address, size_t pageCount, void * segment)
  290. {
  291. Assert(this->cs.IsLocked());
  292. Assert(segment);
  293. if (IsPreReservedSegment(segment))
  294. {
  295. this->GetPreReservedPageAllocator(segment)->Release(address, pageCount, segment);
  296. }
  297. else
  298. {
  299. this->GetPageAllocator(segment)->Release(address, pageCount, segment);
  300. }
  301. }
  302. void ReleaseDecommitted(void * address, size_t pageCount, __in void * segment)
  303. {
  304. Assert(this->cs.IsLocked());
  305. Assert(segment);
  306. if (IsPreReservedSegment(segment))
  307. {
  308. this->GetPreReservedPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  309. }
  310. else
  311. {
  312. this->GetPageAllocator(segment)->ReleaseDecommitted(address, pageCount, segment);
  313. }
  314. }
  315. CriticalSection cs;
  316. private:
  317. template<typename T>
  318. HeapPageAllocator<T>* GetPageAllocator(Page * page)
  319. {
  320. AssertMsg(page, "Why is page null?");
  321. return GetPageAllocator<T>(page->segment);
  322. }
  323. HeapPageAllocator<TAlloc>* GetPageAllocator(void * segmentParam)
  324. {
  325. SegmentBase<TAlloc> * segment = (SegmentBase<TAlloc>*)segmentParam;
  326. AssertMsg(segment, "Why is segment null?");
  327. Assert((HeapPageAllocator<TAlloc>*)(segment->GetAllocator()) == &this->pageAllocator);
  328. return (HeapPageAllocator<TAlloc> *)(segment->GetAllocator());
  329. }
  330. HeapPageAllocator<TPreReservedAlloc>* GetPreReservedPageAllocator(void * segmentParam)
  331. {
  332. SegmentBase<TPreReservedAlloc> * segment = (SegmentBase<TPreReservedAlloc>*)segmentParam;
  333. AssertMsg(segment, "Why is segment null?");
  334. Assert((HeapPageAllocator<TPreReservedAlloc>*)(segment->GetAllocator()) == &this->preReservedHeapAllocator);
  335. return (HeapPageAllocator<TPreReservedAlloc> *)(segment->GetAllocator());
  336. }
  337. HeapPageAllocator<TAlloc> pageAllocator;
  338. HeapPageAllocator<TPreReservedAlloc> preReservedHeapAllocator;
  339. HANDLE processHandle;
  340. // Track the number of time a segment's secondary allocate change from full to available to allocate.
  341. // So that we know whether CustomHeap to know when to update their "full page"
  342. // It is ok to overflow this variable. All we care is if the state has changed.
  343. // If in the unlikely scenario that we do overflow, then we delay the full pages in CustomHeap from
  344. // being made available.
  345. uint secondaryAllocStateChangedCount;
  346. };
  347. typedef CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcCodePageAllocators;
  348. #if _WIN32
  349. typedef CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPCodePageAllocators;
  350. #endif
  351. /*
  352. * Simple free-listing based heap allocator
  353. *
  354. * Each allocation is tracked using a "HeapAllocation" record
  355. * Once we alloc, we start assigning chunks sliced from the end of a HeapAllocation
  356. * If we don't have enough to slice off, we push a new heap allocation record to the record stack, and try and assign from that
  357. *
  358. * Single thread only. Require external locking. (Currently, EmitBufferManager manage the locking)
  359. */
  360. template <typename TAlloc, typename TPreReservedAlloc>
  361. class Heap
  362. {
  363. public:
  364. Heap(ArenaAllocator * alloc, CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators, HANDLE processHandle);
  365. Allocation* Alloc(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  366. void Free(__in Allocation* allocation);
  367. void DecommitAll();
  368. void FreeAll();
  369. bool IsInHeap(__in void* address);
  370. // A page should be in full list if:
  371. // 1. It does not have any space
  372. // 2. Parent segment cannot allocate any more XDATA
  373. bool ShouldBeInFullList(Page* page)
  374. {
  375. return page->HasNoSpace() || (codePageAllocators->AllocXdata() && !((Segment*)(page->segment))->CanAllocSecondary());
  376. }
  377. BOOL ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage = nullptr);
  378. BOOL ProtectAllocationWithExecuteReadWrite(Allocation *allocation, __in_opt char* addressInPage = nullptr);
  379. BOOL ProtectAllocationWithExecuteReadOnly(__in Allocation *allocation, __in_opt char* addressInPage = nullptr);
  380. ~Heap();
  381. #if DBG_DUMP
  382. void DumpStats();
  383. #endif
  384. private:
  385. /**
  386. * Inline methods
  387. */
  388. inline unsigned int GetChunkSizeForBytes(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  389. {
  390. return (bytes > Page::Alignment ? static_cast<unsigned int>(bytes) / Page::Alignment : 1);
  391. }
  392. inline size_t GetNumPagesForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes)
  393. {
  394. size_t allocSize = AllocSizeMath::Add(bytes, AutoSystemInfo::PageSize);
  395. if (allocSize == (size_t) -1)
  396. {
  397. return 0;
  398. }
  399. return ((allocSize - 1)/ AutoSystemInfo::PageSize);
  400. }
  401. inline BVIndex GetFreeIndexForPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes)
  402. {
  403. unsigned int length = GetChunkSizeForBytes(bytes);
  404. BVIndex index = page->freeBitVector.FirstStringOfOnes(length);
  405. return index;
  406. }
  407. /**
  408. * Large object methods
  409. */
  410. Allocation* AllocLargeObject(DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  411. void FreeLargeObject(Allocation* header);
  412. void FreeLargeObjects();
  413. //Called during Free
  414. DWORD EnsurePageWriteable(Page* page);
  415. // this get called when freeing the whole page
  416. DWORD EnsureAllocationWriteable(Allocation* allocation);
  417. // this get called when only freeing a part in the page
  418. DWORD EnsureAllocationExecuteWriteable(Allocation* allocation);
  419. template<DWORD readWriteFlags>
  420. DWORD EnsurePageReadWrite(Page* page)
  421. {
  422. Assert(!page->isDecommitted);
  423. this->codePageAllocators->ProtectPages(page->address, 1, page->segment, readWriteFlags, PAGE_EXECUTE_READ);
  424. return PAGE_EXECUTE_READ;
  425. }
  426. template<DWORD readWriteFlags>
  427. DWORD EnsureAllocationReadWrite(Allocation* allocation)
  428. {
  429. if (allocation->IsLargeAllocation())
  430. {
  431. this->ProtectAllocation(allocation, readWriteFlags, PAGE_EXECUTE_READ);
  432. return PAGE_EXECUTE_READ;
  433. }
  434. else
  435. {
  436. return EnsurePageReadWrite<readWriteFlags>(allocation->page);
  437. }
  438. }
  439. /**
  440. * Freeing Methods
  441. */
  442. void FreeBuckets(bool freeOnlyEmptyPages);
  443. void FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages);
  444. void FreePage(Page* page);
  445. bool FreeAllocation(Allocation* allocation);
  446. void FreeAllocationHelper(Allocation * allocation, BVIndex index, uint length);
  447. #if PDATA_ENABLED
  448. void FreeXdata(XDataAllocation* xdata, void* segment);
  449. #endif
  450. void FreeDecommittedBuckets();
  451. void FreeDecommittedLargeObjects();
  452. /**
  453. * Page methods
  454. */
  455. Page* AddPageToBucket(Page* page, BucketId bucket, bool wasFull = false);
  456. bool AllocInPage(Page* page, DECLSPEC_GUARD_OVERFLOW size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocation);
  457. Page* AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion);
  458. Page* FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages = false);
  459. bool UpdateFullPages();
  460. Page * GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment);
  461. BVIndex GetIndexInPage(__in Page* page, __in char* address);
  462. bool IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void *address);
  463. bool IsInHeap(DListBase<Page> const& buckets, __in void *address);
  464. bool IsInHeap(DListBase<Allocation> const& allocations, __in void *address);
  465. /**
  466. * Stats
  467. */
  468. #if DBG_DUMP
  469. size_t totalAllocationSize;
  470. size_t freeObjectSize;
  471. size_t allocationsSinceLastCompact;
  472. size_t freesSinceLastCompact;
  473. #endif
  474. /**
  475. * Allocator stuff
  476. */
  477. CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators;
  478. ArenaAllocator* auxiliaryAllocator;
  479. /*
  480. * Various tracking lists
  481. */
  482. DListBase<Page> buckets[NumBuckets];
  483. DListBase<Page> fullPages[NumBuckets];
  484. DListBase<Allocation> largeObjectAllocations;
  485. DListBase<Page> decommittedPages;
  486. DListBase<Allocation> decommittedLargeObjects;
  487. uint lastSecondaryAllocStateChangedCount;
  488. HANDLE processHandle;
  489. #if DBG
  490. bool inDtor;
  491. #endif
  492. };
  493. typedef Heap<VirtualAllocWrapper, PreReservedVirtualAllocWrapper> InProcHeap;
  494. #if _WIN32
  495. typedef Heap<SectionAllocWrapper, PreReservedSectionAllocWrapper> OOPHeap;
  496. #endif
  497. // Helpers
  498. unsigned int log2(size_t number);
  499. BucketId GetBucketForSize(DECLSPEC_GUARD_OVERFLOW size_t bytes);
  500. void FillDebugBreak(_Out_writes_bytes_all_(byteCount) BYTE* buffer, _In_ size_t byteCount);
  501. } // namespace CustomHeap
  502. } // namespace Memory