CustomHeap.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_X64
  7. #include "Memory\amd64\XDataAllocator.h"
  8. #elif defined(_M_ARM)
  9. #include "Memory\arm\XDataAllocator.h"
  10. #include <wchar.h>
  11. #elif defined(_M_ARM64)
  12. #include "Memory\arm64\XDataAllocator.h"
  13. #endif
  14. #include "CustomHeap.h"
  15. namespace Memory
  16. {
  17. namespace CustomHeap
  18. {
  19. #pragma region "Constructor and Destructor"
  20. Heap::Heap(AllocationPolicyManager * policyManager, ArenaAllocator * alloc, bool allocXdata):
  21. auxilliaryAllocator(alloc),
  22. allocXdata(allocXdata),
  23. #if DBG_DUMP
  24. freeObjectSize(0),
  25. totalAllocationSize(0),
  26. allocationsSinceLastCompact(0),
  27. freesSinceLastCompact(0),
  28. #endif
  29. #if DBG
  30. inDtor(false),
  31. #endif
  32. pageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/),
  33. preReservedHeapPageAllocator(policyManager, allocXdata, true /*excludeGuardPages*/),
  34. cs(4000)
  35. {
  36. for (int i = 0; i < NumBuckets; i++)
  37. {
  38. this->buckets[i].Reset();
  39. }
  40. }
  41. Heap::~Heap()
  42. {
  43. #if DBG
  44. inDtor = true;
  45. #endif
  46. this->FreeAll();
  47. }
  48. #pragma endregion
  49. #pragma region "Public routines"
  50. void Heap::FreeAll()
  51. {
  52. FreeBuckets(false);
  53. FreeLargeObjects();
  54. FreeDecommittedBuckets();
  55. FreeDecommittedLargeObjects();
  56. }
  57. bool Heap::Free(__in Allocation* object)
  58. {
  59. Assert(object != nullptr);
  60. if (object == nullptr)
  61. {
  62. return false;
  63. }
  64. BucketId bucket = (BucketId) GetBucketForSize(object->size);
  65. if (bucket == BucketId::LargeObjectList)
  66. {
  67. #if PDATA_ENABLED
  68. if(!object->xdata.IsFreed())
  69. {
  70. FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
  71. }
  72. #endif
  73. if (object->largeObjectAllocation.isDecommitted)
  74. {
  75. return true;
  76. }
  77. return FreeLargeObject<false>(object);
  78. }
  79. #if PDATA_ENABLED
  80. if(!object->xdata.IsFreed())
  81. {
  82. FreeXdata(&object->xdata, object->page->segment);
  83. }
  84. #endif
  85. if (object->page->isDecommitted)
  86. {
  87. return true;
  88. }
  89. return FreeAllocation(object);
  90. }
  91. bool Heap::Decommit(__in Allocation* object)
  92. {
  93. // This function doesn't really touch the page allocator data structure.
  94. // DecommitPages is merely a wrapper for VirtualFree
  95. // So no need to take the critical section to synchronize
  96. Assert(object != nullptr);
  97. if (object == nullptr)
  98. {
  99. return false;
  100. }
  101. Assert(object->isAllocationUsed);
  102. BucketId bucket = (BucketId) GetBucketForSize(object->size);
  103. if (bucket == BucketId::LargeObjectList)
  104. {
  105. Assert(!object->largeObjectAllocation.isDecommitted);
  106. if (!object->largeObjectAllocation.isDecommitted)
  107. {
  108. #if PDATA_ENABLED
  109. if(!object->xdata.IsFreed())
  110. {
  111. FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
  112. }
  113. #endif
  114. this->DecommitPages(object->address, object->GetPageCount(), object->largeObjectAllocation.segment);
  115. this->largeObjectAllocations.MoveElementTo(object, &this->decommittedLargeObjects);
  116. object->largeObjectAllocation.isDecommitted = true;
  117. return true;
  118. }
  119. }
  120. // Skip asserting here- multiple objects could be on the same page
  121. // Review: should we really decommit here or decommit only when all objects
  122. // on the page have been decommitted?
  123. if (!object->page->isDecommitted)
  124. {
  125. #if PDATA_ENABLED
  126. if(!object->xdata.IsFreed())
  127. {
  128. FreeXdata(&object->xdata, object->page->segment);
  129. }
  130. #endif
  131. bucket = object->page->currentBucket;
  132. this->DecommitPages(object->page->address, 1, object->page->segment);
  133. if (this->ShouldBeInFullList(object->page))
  134. {
  135. this->fullPages[bucket].MoveElementTo(object->page, &this->decommittedPages);
  136. }
  137. else
  138. {
  139. this->buckets[bucket].MoveElementTo(object->page, &this->decommittedPages);
  140. }
  141. object->page->isDecommitted = true;
  142. }
  143. return true;
  144. }
  145. bool Heap::IsInRange(__in void* address)
  146. {
  147. AutoCriticalSection autocs(&this->cs);
  148. return (this->preReservedHeapPageAllocator.GetVirtualAllocator()->IsInRange(address) || this->pageAllocator.IsAddressFromAllocator(address));
  149. }
  150. /*
  151. * Algorithm:
  152. * - Find bucket
  153. * - Check bucket pages - if it has enough free space, allocate that chunk
  154. * - Check pages in bigger buckets - if that has enough space, split that page and allocate from that chunk
  155. * - Allocate new page
  156. */
  157. Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  158. {
  159. Assert(bytes > 0);
  160. Assert((allocXdata || pdataCount == 0) && (!allocXdata || pdataCount > 0));
  161. Assert(pdataCount > 0 || (pdataCount == 0 && xdataSize == 0));
  162. // Round up to power of two to allocate, and figure out which bucket to allocate in
  163. size_t bytesToAllocate = PowerOf2Policy::GetSize(bytes);
  164. BucketId bucket = (BucketId) GetBucketForSize(bytesToAllocate);
  165. Allocation* allocation;
  166. if (bucket == BucketId::LargeObjectList)
  167. {
  168. allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  169. #if defined(DBG)
  170. MEMORY_BASIC_INFORMATION memBasicInfo;
  171. size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
  172. Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
  173. #endif
  174. return allocation;
  175. }
  176. VerboseHeapTrace(L"Bucket is %d\n", bucket);
  177. VerboseHeapTrace(L"Requested: %d bytes. Allocated: %d bytes\n", bytes, bytesToAllocate);
  178. Page* page = nullptr;
  179. if(!this->buckets[bucket].Empty())
  180. {
  181. page = &this->buckets[bucket].Head();
  182. }
  183. else
  184. {
  185. page = FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
  186. }
  187. if(page == nullptr)
  188. {
  189. page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  190. }
  191. // Out of memory
  192. if (page == nullptr)
  193. {
  194. return nullptr;
  195. }
  196. #if defined(DBG)
  197. MEMORY_BASIC_INFORMATION memBasicInfo;
  198. size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
  199. Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
  200. #endif
  201. allocation = AllocInPage(page, bytesToAllocate, pdataCount, xdataSize);
  202. return allocation;
  203. }
  204. BOOL Heap::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage)
  205. {
  206. DWORD protectFlags = 0;
  207. if (AutoSystemInfo::Data.IsCFGEnabled())
  208. {
  209. protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
  210. }
  211. else
  212. {
  213. protectFlags = PAGE_EXECUTE_READWRITE;
  214. }
  215. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE, addressInPage);
  216. }
  217. BOOL Heap::ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage)
  218. {
  219. DWORD protectFlags = 0;
  220. if (AutoSystemInfo::Data.IsCFGEnabled())
  221. {
  222. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  223. }
  224. else
  225. {
  226. protectFlags = PAGE_EXECUTE;
  227. }
  228. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
  229. }
  230. BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
  231. {
  232. // Allocate at the page level so that our protections don't
  233. // transcend allocation page boundaries. Here, allocation->address is page
  234. // aligned if the object is a large object allocation. If it isn't, in the else
  235. // branch of the following if statement, we set it to the allocation's page's
  236. // address. This ensures that the address being protected is always page aligned
  237. Assert(allocation != nullptr);
  238. Assert(allocation->isAllocationUsed);
  239. Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
  240. char* address = allocation->address;
  241. size_t pageCount;
  242. void * segment;
  243. if (allocation->IsLargeAllocation())
  244. {
  245. #if DBG_DUMP || defined(RECYCLER_TRACE)
  246. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  247. {
  248. Output::Print(L"Protecting large allocation\n");
  249. }
  250. #endif
  251. segment = allocation->largeObjectAllocation.segment;
  252. if (addressInPage != nullptr)
  253. {
  254. if (addressInPage >= allocation->address + AutoSystemInfo::PageSize)
  255. {
  256. size_t page = (addressInPage - allocation->address) / AutoSystemInfo::PageSize;
  257. address = allocation->address + (page * AutoSystemInfo::PageSize);
  258. }
  259. pageCount = 1;
  260. }
  261. else
  262. {
  263. pageCount = allocation->GetPageCount();
  264. }
  265. VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
  266. return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  267. }
  268. else
  269. {
  270. #if DBG_DUMP || defined(RECYCLER_TRACE)
  271. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  272. {
  273. Output::Print(L"Protecting small allocation\n");
  274. }
  275. #endif
  276. segment = allocation->page->segment;
  277. address = allocation->page->address;
  278. pageCount = 1;
  279. VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
  280. return this->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  281. }
  282. }
  283. #pragma endregion
  284. #pragma region "Large object methods"
  285. Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  286. {
  287. size_t pages = GetNumPagesForSize(bytes);
  288. if (pages == 0)
  289. {
  290. return nullptr;
  291. }
  292. void * segment = nullptr;
  293. char* address = nullptr;
  294. #if PDATA_ENABLED
  295. XDataAllocation xdata;
  296. #endif
  297. {
  298. AutoCriticalSection autocs(&this->cs);
  299. if (canAllocInPreReservedHeapPageSegment)
  300. {
  301. address = this->preReservedHeapPageAllocator.Alloc(&pages, (SegmentBase<PreReservedVirtualAllocWrapper>**)(&segment));
  302. }
  303. if (address == nullptr)
  304. {
  305. if (isAnyJittedCode)
  306. {
  307. *isAllJITCodeInPreReservedRegion = false;
  308. }
  309. address = this->pageAllocator.Alloc(&pages, (Segment**)&segment);
  310. }
  311. // Out of memory
  312. if (address == nullptr)
  313. {
  314. return nullptr;
  315. }
  316. FillDebugBreak((BYTE*) address, pages*AutoSystemInfo::PageSize);
  317. DWORD protectFlags = 0;
  318. if (AutoSystemInfo::Data.IsCFGEnabled())
  319. {
  320. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  321. }
  322. else
  323. {
  324. protectFlags = PAGE_EXECUTE;
  325. }
  326. this->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
  327. #if PDATA_ENABLED
  328. if(pdataCount > 0)
  329. {
  330. if (!this->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
  331. {
  332. AutoCriticalSection autocs(&this->cs);
  333. this->Release(address, pages, segment);
  334. return nullptr;
  335. }
  336. }
  337. #endif
  338. }
  339. Allocation* allocation = this->largeObjectAllocations.PrependNode(this->auxilliaryAllocator);
  340. if (allocation == nullptr)
  341. {
  342. AutoCriticalSection autocs(&this->cs);
  343. this->Release(address, pages, segment);
  344. #if PDATA_ENABLED
  345. if(pdataCount > 0)
  346. {
  347. this->ReleaseSecondary(xdata, segment);
  348. }
  349. #endif
  350. return nullptr;
  351. }
  352. allocation->address = address;
  353. allocation->largeObjectAllocation.segment = segment;
  354. allocation->largeObjectAllocation.isDecommitted = false;
  355. allocation->size = pages * AutoSystemInfo::PageSize;
  356. #if PDATA_ENABLED
  357. allocation->xdata = xdata;
  358. if (((Segment*)segment)->GetSecondaryAllocator() != nullptr && !((Segment*)segment)->CanAllocSecondary())
  359. {
  360. TransferPages(
  361. [&](Page* currentPage) -> bool
  362. {
  363. bool transfer = currentPage->segment == segment;
  364. if(transfer)
  365. {
  366. VerboseHeapTrace(L"Moving page from bucket %d to full list because no XDATA allocations can be made\n", currentPage->currentBucket);
  367. }
  368. return transfer;
  369. } , this->buckets, this->fullPages);
  370. }
  371. #endif
  372. return allocation;
  373. }
  374. void Heap::FreeDecommittedLargeObjects()
  375. {
  376. // This is only call when the heap is being destroy, so don't need to sync with the background thread.
  377. Assert(inDtor);
  378. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->decommittedLargeObjects, largeObjectIter)
  379. {
  380. VerboseHeapTrace(L"Decommitting large object at address 0x%p of size %u\n", allocation.address, allocation.size);
  381. this->ReleaseDecommited(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  382. largeObjectIter.RemoveCurrent(this->auxilliaryAllocator);
  383. }
  384. NEXT_DLISTBASE_ENTRY_EDITING;
  385. }
  386. //Called during Free (while shutting down)
  387. DWORD Heap::EnsurePageWriteable(Page* page)
  388. {
  389. return EnsurePageReadWrite<PAGE_READWRITE>(page);
  390. }
  391. // this get called when freeing the whole page
  392. DWORD Heap::EnsureAllocationWriteable(Allocation* allocation)
  393. {
  394. return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
  395. }
  396. // this get called when only freeing a part in the page
  397. DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
  398. {
  399. if (AutoSystemInfo::Data.IsCFGEnabled())
  400. {
  401. return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
  402. }
  403. else
  404. {
  405. return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
  406. }
  407. }
  408. template <bool freeAll>
  409. bool Heap::FreeLargeObject(Allocation* address)
  410. {
  411. AutoCriticalSection autocs(&this->cs);
  412. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
  413. {
  414. if (address == (&allocation) || freeAll)
  415. {
  416. EnsureAllocationWriteable(&allocation);
  417. #if PDATA_ENABLED
  418. Assert(allocation.xdata.IsFreed());
  419. #endif
  420. this->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  421. largeObjectIter.RemoveCurrent(this->auxilliaryAllocator);
  422. if (!freeAll) return true;
  423. }
  424. }
  425. NEXT_DLISTBASE_ENTRY_EDITING;
  426. // If we're not freeing everything, and we hit this point, that means that
  427. // something that wasn't in the large object list was asked to be free.
  428. // So, assert that we're freeing everything if we get to this point.
  429. Assert(freeAll);
  430. return false;
  431. }
  432. #pragma endregion
  433. #pragma region "Page methods"
  434. Allocation* Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize)
  435. {
  436. Assert(Math::IsPow2((int32)bytes));
  437. uint length = GetChunkSizeForBytes(bytes);
  438. BVIndex index = GetFreeIndexForPage(page, bytes);
  439. Assert(index != BVInvalidIndex);
  440. char* address = page->address + Page::Alignment * index;
  441. #if PDATA_ENABLED
  442. XDataAllocation xdata;
  443. if(pdataCount > 0)
  444. {
  445. AutoCriticalSection autocs(&this->cs);
  446. {
  447. if(!this->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
  448. {
  449. return nullptr;
  450. }
  451. }
  452. }
  453. #endif
  454. Allocation* allocation = AnewNoThrowStruct(this->auxilliaryAllocator, Allocation);
  455. if (allocation == nullptr)
  456. {
  457. #if PDATA_ENABLED
  458. if(pdataCount > 0)
  459. {
  460. AutoCriticalSection autocs(&this->cs);
  461. this->ReleaseSecondary(xdata, page->segment);
  462. }
  463. #endif
  464. return nullptr;
  465. }
  466. #if DBG
  467. allocation->isAllocationUsed = false;
  468. allocation->isNotExecutableBecauseOOM = false;
  469. #endif
  470. allocation->page = page;
  471. allocation->size = bytes;
  472. allocation->address = address;
  473. #if DBG_DUMP
  474. this->allocationsSinceLastCompact += bytes;
  475. this->freeObjectSize -= bytes;
  476. #endif
  477. page->freeBitVector.ClearRange(index, length);
  478. VerboseHeapTrace(L"ChunkSize: %d, Index: %d, Free bit vector in page: ", length, index);
  479. #if VERBOSE_HEAP
  480. page->freeBitVector.DumpWord();
  481. #endif
  482. VerboseHeapTrace(L"\n");
  483. if (this->ShouldBeInFullList(page))
  484. {
  485. BucketId bucket = page->currentBucket;
  486. VerboseHeapTrace(L"Moving page from bucket %d to full list\n", bucket);
  487. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  488. }
  489. #if PDATA_ENABLED
  490. allocation->xdata = xdata;
  491. if(((Segment*)page->segment)->GetSecondaryAllocator() != nullptr && !((Segment*)page->segment)->CanAllocSecondary())
  492. {
  493. TransferPages(
  494. [&](Page* currentPage) -> bool
  495. {
  496. bool transfer = currentPage->segment == page->segment;
  497. if(transfer)
  498. {
  499. VerboseHeapTrace(L"Moving page from bucket %d to full list because no XDATA allocations can be made\n", page->currentBucket);
  500. }
  501. return transfer;
  502. } , this->buckets, this->fullPages);
  503. }
  504. #endif
  505. return allocation;
  506. }
  507. char *
  508. Heap::EnsurePreReservedPageAllocation(PreReservedVirtualAllocWrapper * preReservedVirtualAllocator)
  509. {
  510. AutoCriticalSection autocs(&this->cs);
  511. Assert(preReservedVirtualAllocator != nullptr);
  512. Assert(preReservedHeapPageAllocator.GetVirtualAllocator() == preReservedVirtualAllocator);
  513. char * preReservedRegionStartAddress = (char*)preReservedVirtualAllocator->GetPreReservedStartAddress();
  514. if (preReservedRegionStartAddress == nullptr)
  515. {
  516. preReservedRegionStartAddress = preReservedHeapPageAllocator.InitPageSegment();
  517. }
  518. if (preReservedRegionStartAddress == nullptr)
  519. {
  520. VerboseHeapTrace(L"PRE-RESERVE: PreReserved Segment CANNOT be allocated \n");
  521. }
  522. return preReservedRegionStartAddress;
  523. }
  524. Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  525. {
  526. void* pageSegment = nullptr;
  527. char* address = nullptr;
  528. {
  529. AutoCriticalSection autocs(&this->cs);
  530. if (canAllocInPreReservedHeapPageSegment)
  531. {
  532. address = this->preReservedHeapPageAllocator.AllocPages(1, (PageSegmentBase<PreReservedVirtualAllocWrapper>**)&pageSegment);
  533. if (address == nullptr)
  534. {
  535. VerboseHeapTrace(L"PRE-RESERVE: PreReserved Segment CANNOT be allocated \n");
  536. }
  537. }
  538. if (address == nullptr) // if no space in Pre-reserved Page Segment, then allocate in regular ones.
  539. {
  540. if (isAnyJittedCode)
  541. {
  542. *isAllJITCodeInPreReservedRegion = false;
  543. }
  544. address = this->pageAllocator.AllocPages(1, (PageSegmentBase<VirtualAllocWrapper>**)&pageSegment);
  545. }
  546. else
  547. {
  548. VerboseHeapTrace(L"PRE-RESERVE: Allocing new page in PreReserved Segment \n");
  549. }
  550. }
  551. if (address == nullptr)
  552. {
  553. return nullptr;
  554. }
  555. FillDebugBreak((BYTE*) address, AutoSystemInfo::PageSize);
  556. DWORD protectFlags = 0;
  557. if (AutoSystemInfo::Data.IsCFGEnabled())
  558. {
  559. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  560. }
  561. else
  562. {
  563. protectFlags = PAGE_EXECUTE;
  564. }
  565. //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
  566. ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
  567. // Switch to allocating on a list of pages so we can do leak tracking later
  568. VerboseHeapTrace(L"Allocing new page in bucket %d\n", bucket);
  569. Page* page = this->buckets[bucket].PrependNode(this->auxilliaryAllocator, address, pageSegment, bucket);
  570. if (page == nullptr)
  571. {
  572. AutoCriticalSection autocs(&this->cs);
  573. this->ReleasePages(address, 1, pageSegment);
  574. return nullptr;
  575. }
  576. #if DBG_DUMP
  577. this->totalAllocationSize += AutoSystemInfo::PageSize;
  578. this->freeObjectSize += AutoSystemInfo::PageSize;
  579. #endif
  580. return page;
  581. }
  582. Page* Heap::AddPageToBucket(Page* page, BucketId bucket, bool wasFull)
  583. {
  584. Assert(bucket > BucketId::InvalidBucket && bucket < BucketId::NumBuckets);
  585. BucketId oldBucket = page->currentBucket;
  586. page->currentBucket = bucket;
  587. if (wasFull)
  588. {
  589. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  590. this->fullPages[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  591. }
  592. else
  593. {
  594. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  595. this->buckets[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  596. }
  597. return page;
  598. }
  599. /*
  600. * This method goes through the buckets greater than the target bucket
  601. * and if the higher bucket has a page with enough free space to allocate
  602. * something in the smaller bucket, then we bring the page to the smaller
  603. * bucket.
  604. * Note that if we allocate something from a page in the given bucket,
  605. * and then that page is split into a lower bucket, freeing is still not
  606. * a problem since the larger allocation is a multiple of the smaller one.
  607. * This gets more complicated if we can coalesce buckets. In that case,
  608. * we need to make sure that if a page was coalesced, and an allocation
  609. * pre-coalescing was freed, the page would need to get split upon free
  610. * to ensure correctness. For now, we've skipped implementing coalescing.
  611. * findPreReservedHeapPages - true, if we need to find pages only belonging to PreReservedHeapSegment
  612. */
  613. Page* Heap::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages)
  614. {
  615. for (BucketId b = (BucketId)(targetBucket + 1); b < BucketId::NumBuckets; b = (BucketId) (b + 1))
  616. {
  617. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  618. FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
  619. {
  620. if (findPreReservedHeapPages && !IsPreReservedSegment(pageInBucket.segment))
  621. {
  622. //Find only pages that are pre-reserved using preReservedHeapPageAllocator
  623. continue;
  624. }
  625. if (pageInBucket.CanAllocate(targetBucket))
  626. {
  627. Page* page = &pageInBucket;
  628. if (findPreReservedHeapPages)
  629. {
  630. VerboseHeapTrace(L"PRE-RESERVE: Found page for splitting in Pre Reserved Segment\n");
  631. }
  632. VerboseHeapTrace(L"Found page to split. Moving from bucket %d to %d\n", b, targetBucket);
  633. return AddPageToBucket(page, targetBucket);
  634. }
  635. }
  636. NEXT_DLISTBASE_ENTRY_EDITING;
  637. }
  638. return nullptr;
  639. }
  640. void Heap::RemovePageFromFullList(Page* pageToRemove)
  641. {
  642. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->fullPages[pageToRemove->currentBucket], pageIter)
  643. {
  644. if (&page == pageToRemove)
  645. {
  646. pageIter.RemoveCurrent(this->auxilliaryAllocator);
  647. return;
  648. }
  649. }
  650. NEXT_DLISTBASE_ENTRY_EDITING;
  651. // Page not found- why?
  652. Assert(false);
  653. }
  654. BVIndex Heap::GetIndexInPage(__in Page* page, __in char* address)
  655. {
  656. Assert(page->address <= address && address < page->address + AutoSystemInfo::PageSize);
  657. return (BVIndex) ((address - page->address) / Page::Alignment);
  658. }
  659. #pragma endregion
  660. /**
  661. * Free List methods
  662. */
  663. #pragma region "Freeing methods"
  664. bool Heap::FreeAllocation(Allocation* object)
  665. {
  666. Page* page = object->page;
  667. void* segment = page->segment;
  668. size_t pageSize = AutoSystemInfo::PageSize;
  669. unsigned int length = GetChunkSizeForBytes(object->size);
  670. BVIndex index = GetIndexInPage(page, object->address);
  671. #if DBG
  672. // Make sure that it's not already been freed
  673. for (BVIndex i = index; i < length; i++)
  674. {
  675. Assert(!page->freeBitVector.Test(i));
  676. }
  677. #endif
  678. if (this->ShouldBeInFullList(page))
  679. {
  680. VerboseHeapTrace(L"Recycling page 0x%p because address 0x%p of size %d was freed\n", page->address, object->address, object->size);
  681. // If the object being freed is equal to the page size, we're
  682. // going to remove it anyway so don't add it to a bucket
  683. if (object->size != pageSize)
  684. {
  685. AddPageToBucket(page, page->currentBucket, true);
  686. }
  687. else
  688. {
  689. EnsureAllocationWriteable(object);
  690. // Fill the old buffer with debug breaks
  691. CustomHeap::FillDebugBreak((BYTE *)object->address, object->size);
  692. void* pageAddress = page->address;
  693. RemovePageFromFullList(page);
  694. // The page is not in any bucket- just update the stats, free the allocation
  695. // and dump the page- we don't need to update free object size since the object
  696. // size is equal to the page size so they cancel each other out
  697. #if DBG_DUMP
  698. this->totalAllocationSize -= pageSize;
  699. #endif
  700. this->auxilliaryAllocator->Free(object, sizeof(Allocation));
  701. {
  702. AutoCriticalSection autocs(&this->cs);
  703. this->ReleasePages(pageAddress, 1, segment);
  704. }
  705. VerboseHeapTrace(L"FastPath: freeing page-sized object directly\n");
  706. return true;
  707. }
  708. }
  709. // If the page is about to become empty then we should not need
  710. // to set it to executable and we don't expect to restore the
  711. // previous protection settings.
  712. if (page->freeBitVector.Count() == BVUnit::BitsPerWord - length)
  713. {
  714. EnsureAllocationWriteable(object);
  715. }
  716. else
  717. {
  718. EnsureAllocationExecuteWriteable(object);
  719. }
  720. // Fill the old buffer with debug breaks
  721. CustomHeap::FillDebugBreak((BYTE *)object->address, object->size);
  722. VerboseHeapTrace(L"Setting %d bits starting at bit %d, Free bit vector in page was ", length, index);
  723. #if VERBOSE_HEAP
  724. page->freeBitVector.DumpWord();
  725. #endif
  726. VerboseHeapTrace(L"\n");
  727. page->freeBitVector.SetRange(index, length);
  728. VerboseHeapTrace(L"Free bit vector in page: ", length, index);
  729. #if VERBOSE_HEAP
  730. page->freeBitVector.DumpWord();
  731. #endif
  732. VerboseHeapTrace(L"\n");
  733. #if DBG_DUMP
  734. this->freeObjectSize += object->size;
  735. this->freesSinceLastCompact += object->size;
  736. #endif
  737. this->auxilliaryAllocator->Free(object, sizeof(Allocation));
  738. if (page->IsEmpty())
  739. {
  740. // Find the page and remove it from the buckets- the page is going to be freed anyway
  741. FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[page->currentBucket], pageIter)
  742. {
  743. // Templatize this to remove branches/make code more compact?
  744. if (&pageInBucket == page)
  745. {
  746. VerboseHeapTrace(L"Removing page in bucket %d\n", page->currentBucket);
  747. {
  748. AutoCriticalSection autocs(&this->cs);
  749. this->ReleasePages(page->address, 1, page->segment);
  750. }
  751. pageIter.RemoveCurrent(this->auxilliaryAllocator);
  752. #if DBG_DUMP
  753. this->freeObjectSize -= pageSize;
  754. this->totalAllocationSize -= pageSize;
  755. #endif
  756. return true;
  757. }
  758. }
  759. NEXT_DLISTBASE_ENTRY_EDITING;
  760. return false;
  761. }
  762. else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
  763. {
  764. DWORD protectFlags = 0;
  765. if (AutoSystemInfo::Data.IsCFGEnabled())
  766. {
  767. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  768. }
  769. else
  770. {
  771. protectFlags = PAGE_EXECUTE;
  772. }
  773. this->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
  774. return true;
  775. }
  776. }
  777. void Heap::FreeDecommittedBuckets()
  778. {
  779. // This is only call when the heap is being destroy, so don't need to sync with the background thread.
  780. Assert(inDtor);
  781. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->decommittedPages, iter)
  782. {
  783. this->TrackDecommitedPages(page.address, 1, page.segment);
  784. iter.RemoveCurrent(this->auxilliaryAllocator);
  785. }
  786. NEXT_DLISTBASE_ENTRY_EDITING;
  787. }
  788. void Heap::FreePage(Page* page)
  789. {
  790. // This is only call when the heap is being destroy, so don't need to sync with the background thread.
  791. Assert(inDtor);
  792. DWORD pageSize = AutoSystemInfo::PageSize;
  793. EnsurePageWriteable(page);
  794. size_t freeSpace = page->freeBitVector.Count() * Page::Alignment;
  795. VerboseHeapTrace(L"Removing page in bucket %d, freeSpace: %d\n", page->currentBucket, freeSpace);
  796. this->ReleasePages(page->address, 1, page->segment);
  797. #if DBG_DUMP
  798. this->freeObjectSize -= freeSpace;
  799. this->totalAllocationSize -= pageSize;
  800. #endif
  801. }
  802. void Heap::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
  803. {
  804. // This is only call when the heap is being destroy, so don't need to sync with the background thread.
  805. Assert(inDtor);
  806. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, bucket, pageIter)
  807. {
  808. // Templatize this to remove branches/make code more compact?
  809. if (!freeOnlyEmptyPages || page.IsEmpty())
  810. {
  811. FreePage(&page);
  812. pageIter.RemoveCurrent(this->auxilliaryAllocator);
  813. }
  814. }
  815. NEXT_DLISTBASE_ENTRY_EDITING;
  816. }
  817. void Heap::FreeBuckets(bool freeOnlyEmptyPages)
  818. {
  819. // This is only call when the heap is being destroy, so don't need to sync with the background thread.
  820. Assert(inDtor);
  821. for (int i = 0; i < NumBuckets; i++)
  822. {
  823. FreeBucket(&this->buckets[i], freeOnlyEmptyPages);
  824. FreeBucket(&this->fullPages[i], freeOnlyEmptyPages);
  825. }
  826. #if DBG_DUMP
  827. this->allocationsSinceLastCompact = 0;
  828. this->freesSinceLastCompact = 0;
  829. #endif
  830. }
  831. #if PDATA_ENABLED
  832. void Heap::FreeXdata(XDataAllocation* xdata, void* segment)
  833. {
  834. Assert(!xdata->IsFreed());
  835. if(!((Segment*)segment)->CanAllocSecondary())
  836. {
  837. this->TransferPages([&](Page* currentPage) -> bool
  838. {
  839. bool transfer = currentPage->segment == segment && !currentPage->HasNoSpace();
  840. if(transfer)
  841. {
  842. VerboseHeapTrace(L"Recycling page 0x%p because XDATA was freed\n", currentPage->address);
  843. }
  844. return transfer;
  845. }, this->fullPages, this->buckets);
  846. }
  847. {
  848. AutoCriticalSection autocs(&this->cs);
  849. this->ReleaseSecondary(*xdata, segment);
  850. xdata->Free();
  851. }
  852. }
  853. #endif
  854. #if DBG_DUMP
  855. void Heap::DumpStats()
  856. {
  857. HeapTrace(L"Total allocation size: %d\n", totalAllocationSize);
  858. HeapTrace(L"Total free size: %d\n", freeObjectSize);
  859. HeapTrace(L"Total allocations since last compact: %d\n", allocationsSinceLastCompact);
  860. HeapTrace(L"Total frees since last compact: %d\n", freesSinceLastCompact);
  861. HeapTrace(L"Large object count: %d\n", this->largeObjectAllocations.Count());
  862. HeapTrace(L"Buckets: \n");
  863. for (int i = 0; i < BucketId::NumBuckets; i++)
  864. {
  865. printf("\t%d => %u [", (1 << (i + 7)), buckets[i].Count());
  866. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->buckets[i], bucketIter)
  867. {
  868. BVUnit usedBitVector = page.freeBitVector;
  869. usedBitVector.ComplimentAll(); // Get the actual used bit vector
  870. printf(" %u ", usedBitVector.Count() * Page::Alignment); // Print out the space used in this page
  871. }
  872. NEXT_DLISTBASE_ENTRY_EDITING
  873. printf("] {{%u}}\n", this->fullPages[i].Count());
  874. }
  875. }
  876. #endif
  877. #pragma endregion
  878. /**
  879. * Helper methods
  880. */
  881. #pragma region "Helpers"
  882. inline unsigned int log2(size_t number)
  883. {
  884. const unsigned int b[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
  885. const unsigned int S[] = {1, 2, 4, 8, 16};
  886. unsigned int result = 0;
  887. for (int i = 4; i >= 0; i--)
  888. {
  889. if (number & b[i])
  890. {
  891. number >>= S[i];
  892. result |= S[i];
  893. }
  894. }
  895. return result;
  896. }
  897. inline BucketId GetBucketForSize(size_t bytes)
  898. {
  899. if (bytes > Page::MaxAllocationSize)
  900. {
  901. return BucketId::LargeObjectList;
  902. }
  903. BucketId bucket = (BucketId) (log2(bytes) - 7);
  904. // < 8 => 0
  905. // 8 => 1
  906. // 9 => 2 ...
  907. Assert(bucket < BucketId::LargeObjectList);
  908. if (bucket < BucketId::SmallObjectList)
  909. {
  910. bucket = BucketId::SmallObjectList;
  911. }
  912. return bucket;
  913. }
  914. // Fills the specified buffer with "debug break" instruction encoding.
  915. // If there is any space left after that due to alignment, fill it with 0.
  916. // static
  917. void FillDebugBreak(__out_bcount_full(byteCount) BYTE* buffer, __in size_t byteCount)
  918. {
  919. #if defined(_M_ARM)
  920. // On ARM there is breakpoint instruction (BKPT) which is 0xBEii, where ii (immediate 8) can be any value, 0xBE in particular.
  921. // While it could be easier to put 0xBE (same way as 0xCC on x86), BKPT is not recommended -- it may cause unexpected side effects.
  922. // So, use same sequence are C++ compiler uses (0xDEFE), this is recognized by debugger as __debugbreak.
  923. // This is 2 bytes, and in case there is a gap of 1 byte in the end, fill it with 0 (there is no 1 byte long THUMB instruction).
  924. CompileAssert(sizeof(wchar_t) == 2);
  925. wchar_t pattern = 0xDEFE;
  926. wmemset(reinterpret_cast<wchar_t*>(buffer), pattern, byteCount / 2);
  927. if (byteCount % 2)
  928. {
  929. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  930. *(buffer + byteCount - 1) = 0; // Fill last remaining byte.
  931. }
  932. #elif defined(_M_ARM64)
  933. CompileAssert(sizeof(DWORD) == 4);
  934. DWORD pattern = 0xd4200000 | (0xf000 << 5);
  935. for (size_t i = 0; i < byteCount / 4; i++)
  936. {
  937. reinterpret_cast<DWORD*>(buffer)[i] = pattern;
  938. }
  939. for (size_t i = (byteCount / 4) * 4; i < byteCount; i++)
  940. {
  941. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  942. buffer[i] = 0; // Fill last remaining bytes.
  943. }
  944. #else
  945. // On Intel just use "INT 3" instruction which is 0xCC.
  946. memset(buffer, 0xCC, byteCount);
  947. #endif
  948. }
  949. #pragma endregion
  950. };
  951. }