CustomHeap.cpp 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_X64
  7. #include "Memory/amd64/XDataAllocator.h"
  8. #elif defined(_M_ARM)
  9. #include "Memory/arm/XDataAllocator.h"
  10. #include <wchar.h>
  11. #elif defined(_M_ARM64)
  12. #include "Memory/arm64/XDataAllocator.h"
  13. #endif
  14. #include "CustomHeap.h"
  15. namespace Memory
  16. {
  17. namespace CustomHeap
  18. {
  19. #pragma region "Constructor and Destructor"
  20. Heap::Heap(ArenaAllocator * alloc, CodePageAllocators * codePageAllocators):
  21. auxiliaryAllocator(alloc),
  22. codePageAllocators(codePageAllocators),
  23. lastSecondaryAllocStateChangedCount(0)
  24. #if DBG_DUMP
  25. , freeObjectSize(0)
  26. , totalAllocationSize(0)
  27. , allocationsSinceLastCompact(0)
  28. , freesSinceLastCompact(0)
  29. #endif
  30. #if DBG
  31. , inDtor(false)
  32. #endif
  33. {
  34. for (int i = 0; i < NumBuckets; i++)
  35. {
  36. this->buckets[i].Reset();
  37. }
  38. }
  39. Heap::~Heap()
  40. {
  41. #if DBG
  42. inDtor = true;
  43. #endif
  44. this->FreeAll();
  45. }
  46. #pragma endregion
  47. #pragma region "Public routines"
  48. void Heap::FreeAll()
  49. {
  50. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  51. FreeBuckets(false);
  52. FreeLargeObjects();
  53. FreeDecommittedBuckets();
  54. FreeDecommittedLargeObjects();
  55. }
  56. void Heap::Free(__in Allocation* object)
  57. {
  58. Assert(object != nullptr);
  59. if (object == nullptr)
  60. {
  61. return;
  62. }
  63. BucketId bucket = (BucketId) GetBucketForSize(object->size);
  64. if (bucket == BucketId::LargeObjectList)
  65. {
  66. #if PDATA_ENABLED
  67. if(!object->xdata.IsFreed())
  68. {
  69. FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
  70. }
  71. #endif
  72. if (!object->largeObjectAllocation.isDecommitted)
  73. {
  74. FreeLargeObject(object);
  75. }
  76. return;
  77. }
  78. #if PDATA_ENABLED
  79. if(!object->xdata.IsFreed())
  80. {
  81. FreeXdata(&object->xdata, object->page->segment);
  82. }
  83. #endif
  84. if (!object->page->isDecommitted)
  85. {
  86. FreeAllocation(object);
  87. }
  88. }
  89. void Heap::DecommitAll()
  90. {
  91. // This function doesn't really touch the page allocator data structure.
  92. // DecommitPages is merely a wrapper for VirtualFree
  93. // So no need to take the critical section to synchronize
  94. DListBase<Allocation>::EditingIterator i(&this->largeObjectAllocations);
  95. while (i.Next())
  96. {
  97. Allocation& allocation = i.Data();
  98. Assert(!allocation.largeObjectAllocation.isDecommitted);
  99. this->codePageAllocators->DecommitPages(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  100. i.MoveCurrentTo(&this->decommittedLargeObjects);
  101. allocation.largeObjectAllocation.isDecommitted = true;
  102. }
  103. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  104. {
  105. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter1)
  106. {
  107. Assert(page.inFullList);
  108. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  109. bucketIter1.MoveCurrentTo(&(this->decommittedPages));
  110. page.isDecommitted = true;
  111. }
  112. NEXT_DLISTBASE_ENTRY_EDITING;
  113. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->buckets[bucket]), bucketIter2)
  114. {
  115. Assert(!page.inFullList);
  116. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  117. bucketIter2.MoveCurrentTo(&(this->decommittedPages));
  118. page.isDecommitted = true;
  119. }
  120. NEXT_DLISTBASE_ENTRY_EDITING;
  121. }
  122. }
  123. bool Heap::IsInHeap(DListBase<Page> const& bucket, __in void * address)
  124. {
  125. DListBase<Page>::Iterator i(&bucket);
  126. while (i.Next())
  127. {
  128. Page& page = i.Data();
  129. if (page.address <= address && address < page.address + AutoSystemInfo::PageSize)
  130. {
  131. return true;
  132. }
  133. }
  134. return false;
  135. }
  136. bool Heap::IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void * address)
  137. {
  138. for (uint i = 0; i < NumBuckets; i++)
  139. {
  140. if (this->IsInHeap(buckets[i], address))
  141. {
  142. return true;
  143. }
  144. }
  145. return false;
  146. }
  147. bool Heap::IsInHeap(DListBase<Allocation> const& allocations, __in void *address)
  148. {
  149. DListBase<Allocation>::Iterator i(&allocations);
  150. while (i.Next())
  151. {
  152. Allocation& allocation = i.Data();
  153. if (allocation.address <= address && address < allocation.address + allocation.size)
  154. {
  155. return true;
  156. }
  157. }
  158. return false;
  159. }
  160. bool Heap::IsInHeap(__in void* address)
  161. {
  162. return IsInHeap(buckets, address) || IsInHeap(fullPages, address) || IsInHeap(largeObjectAllocations, address);
  163. }
  164. Page * Heap::GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment)
  165. {
  166. if (!this->buckets[bucket].Empty())
  167. {
  168. Assert(!this->buckets[bucket].Head().inFullList);
  169. return &this->buckets[bucket].Head();
  170. }
  171. return FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
  172. }
  173. /*
  174. * Algorithm:
  175. * - Find bucket
  176. * - Check bucket pages - if it has enough free space, allocate that chunk
  177. * - Check pages in bigger buckets - if that has enough space, split that page and allocate from that chunk
  178. * - Allocate new page
  179. */
  180. Allocation* Heap::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  181. {
  182. Assert(bytes > 0);
  183. Assert((codePageAllocators->AllocXdata() || pdataCount == 0) && (!codePageAllocators->AllocXdata() || pdataCount > 0));
  184. Assert(pdataCount > 0 || (pdataCount == 0 && xdataSize == 0));
  185. // Round up to power of two to allocate, and figure out which bucket to allocate in
  186. size_t bytesToAllocate = PowerOf2Policy::GetSize(bytes);
  187. BucketId bucket = (BucketId) GetBucketForSize(bytesToAllocate);
  188. if (bucket == BucketId::LargeObjectList)
  189. {
  190. Allocation * allocation = AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  191. #if defined(DBG)
  192. if (allocation)
  193. {
  194. MEMORY_BASIC_INFORMATION memBasicInfo;
  195. size_t resultBytes = VirtualQuery(allocation->address, &memBasicInfo, sizeof(memBasicInfo));
  196. Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
  197. }
  198. #endif
  199. return allocation;
  200. }
  201. VerboseHeapTrace(L"Bucket is %d\n", bucket);
  202. VerboseHeapTrace(L"Requested: %d bytes. Allocated: %d bytes\n", bytes, bytesToAllocate);
  203. do
  204. {
  205. Page* page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  206. if (page == nullptr && UpdateFullPages())
  207. {
  208. page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  209. }
  210. if (page == nullptr)
  211. {
  212. page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  213. }
  214. // Out of memory
  215. if (page == nullptr)
  216. {
  217. return nullptr;
  218. }
  219. #if defined(DBG)
  220. MEMORY_BASIC_INFORMATION memBasicInfo;
  221. size_t resultBytes = VirtualQuery(page->address, &memBasicInfo, sizeof(memBasicInfo));
  222. Assert(resultBytes != 0 && memBasicInfo.Protect == PAGE_EXECUTE);
  223. #endif
  224. Allocation* allocation = nullptr;
  225. if (AllocInPage(page, bytesToAllocate, pdataCount, xdataSize, &allocation))
  226. {
  227. return allocation;
  228. }
  229. } while (true);
  230. }
  231. BOOL Heap::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, char* addressInPage)
  232. {
  233. DWORD protectFlags = 0;
  234. if (AutoSystemInfo::Data.IsCFGEnabled())
  235. {
  236. protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
  237. }
  238. else
  239. {
  240. protectFlags = PAGE_EXECUTE_READWRITE;
  241. }
  242. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE, addressInPage);
  243. }
  244. BOOL Heap::ProtectAllocationWithExecuteReadOnly(Allocation *allocation, char* addressInPage)
  245. {
  246. DWORD protectFlags = 0;
  247. if (AutoSystemInfo::Data.IsCFGEnabled())
  248. {
  249. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  250. }
  251. else
  252. {
  253. protectFlags = PAGE_EXECUTE;
  254. }
  255. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
  256. }
  257. BOOL Heap::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
  258. {
  259. // Allocate at the page level so that our protections don't
  260. // transcend allocation page boundaries. Here, allocation->address is page
  261. // aligned if the object is a large object allocation. If it isn't, in the else
  262. // branch of the following if statement, we set it to the allocation's page's
  263. // address. This ensures that the address being protected is always page aligned
  264. Assert(allocation != nullptr);
  265. Assert(allocation->isAllocationUsed);
  266. Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
  267. char* address = allocation->address;
  268. size_t pageCount;
  269. void * segment;
  270. if (allocation->IsLargeAllocation())
  271. {
  272. #if DBG_DUMP || defined(RECYCLER_TRACE)
  273. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  274. {
  275. Output::Print(L"Protecting large allocation\n");
  276. }
  277. #endif
  278. segment = allocation->largeObjectAllocation.segment;
  279. if (addressInPage != nullptr)
  280. {
  281. if (addressInPage >= allocation->address + AutoSystemInfo::PageSize)
  282. {
  283. size_t page = (addressInPage - allocation->address) / AutoSystemInfo::PageSize;
  284. address = allocation->address + (page * AutoSystemInfo::PageSize);
  285. }
  286. pageCount = 1;
  287. }
  288. else
  289. {
  290. pageCount = allocation->GetPageCount();
  291. }
  292. VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
  293. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  294. }
  295. else
  296. {
  297. #if DBG_DUMP || defined(RECYCLER_TRACE)
  298. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  299. {
  300. Output::Print(L"Protecting small allocation\n");
  301. }
  302. #endif
  303. segment = allocation->page->segment;
  304. address = allocation->page->address;
  305. pageCount = 1;
  306. VerboseHeapTrace(L"Protecting 0x%p with 0x%x\n", address, dwVirtualProtectFlags);
  307. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  308. }
  309. }
  310. #pragma endregion
  311. #pragma region "Large object methods"
  312. Allocation* Heap::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  313. {
  314. size_t pages = GetNumPagesForSize(bytes);
  315. if (pages == 0)
  316. {
  317. return nullptr;
  318. }
  319. void * segment = nullptr;
  320. char* address = nullptr;
  321. #if PDATA_ENABLED
  322. XDataAllocation xdata;
  323. #endif
  324. {
  325. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  326. address = this->codePageAllocators->Alloc(&pages, &segment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  327. // Out of memory
  328. if (address == nullptr)
  329. {
  330. return nullptr;
  331. }
  332. FillDebugBreak((BYTE*) address, pages*AutoSystemInfo::PageSize);
  333. DWORD protectFlags = 0;
  334. if (AutoSystemInfo::Data.IsCFGEnabled())
  335. {
  336. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  337. }
  338. else
  339. {
  340. protectFlags = PAGE_EXECUTE;
  341. }
  342. this->codePageAllocators->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
  343. #if PDATA_ENABLED
  344. if(pdataCount > 0)
  345. {
  346. if (!this->codePageAllocators->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
  347. {
  348. this->codePageAllocators->Release(address, pages, segment);
  349. return nullptr;
  350. }
  351. }
  352. #endif
  353. }
  354. Allocation* allocation = this->largeObjectAllocations.PrependNode(this->auxiliaryAllocator);
  355. if (allocation == nullptr)
  356. {
  357. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  358. this->codePageAllocators->Release(address, pages, segment);
  359. #if PDATA_ENABLED
  360. if(pdataCount > 0)
  361. {
  362. this->codePageAllocators->ReleaseSecondary(xdata, segment);
  363. }
  364. #endif
  365. return nullptr;
  366. }
  367. allocation->address = address;
  368. allocation->largeObjectAllocation.segment = segment;
  369. allocation->largeObjectAllocation.isDecommitted = false;
  370. allocation->size = pages * AutoSystemInfo::PageSize;
  371. #if PDATA_ENABLED
  372. allocation->xdata = xdata;
  373. #endif
  374. return allocation;
  375. }
  376. void Heap::FreeDecommittedLargeObjects()
  377. {
  378. // CodePageAllocators is locked in FreeAll
  379. Assert(inDtor);
  380. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->decommittedLargeObjects, largeObjectIter)
  381. {
  382. VerboseHeapTrace(L"Decommitting large object at address 0x%p of size %u\n", allocation.address, allocation.size);
  383. this->codePageAllocators->ReleaseDecommitted(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  384. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  385. }
  386. NEXT_DLISTBASE_ENTRY_EDITING;
  387. }
  388. //Called during Free (while shutting down)
  389. DWORD Heap::EnsurePageWriteable(Page* page)
  390. {
  391. return EnsurePageReadWrite<PAGE_READWRITE>(page);
  392. }
  393. // this get called when freeing the whole page
  394. DWORD Heap::EnsureAllocationWriteable(Allocation* allocation)
  395. {
  396. return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
  397. }
  398. // this get called when only freeing a part in the page
  399. DWORD Heap::EnsureAllocationExecuteWriteable(Allocation* allocation)
  400. {
  401. if (AutoSystemInfo::Data.IsCFGEnabled())
  402. {
  403. return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
  404. }
  405. else
  406. {
  407. return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
  408. }
  409. }
  410. void Heap::FreeLargeObjects()
  411. {
  412. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  413. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
  414. {
  415. EnsureAllocationWriteable(&allocation);
  416. #if PDATA_ENABLED
  417. Assert(allocation.xdata.IsFreed());
  418. #endif
  419. this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  420. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  421. }
  422. NEXT_DLISTBASE_ENTRY_EDITING;
  423. }
  424. void Heap::FreeLargeObject(Allocation* allocation)
  425. {
  426. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  427. EnsureAllocationWriteable(allocation);
  428. #if PDATA_ENABLED
  429. Assert(allocation->xdata.IsFreed());
  430. #endif
  431. this->codePageAllocators->Release(allocation->address, allocation->GetPageCount(), allocation->largeObjectAllocation.segment);
  432. this->largeObjectAllocations.RemoveElement(this->auxiliaryAllocator, allocation);
  433. }
  434. #pragma endregion
  435. #pragma region "Page methods"
  436. bool Heap::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocationOut)
  437. {
  438. Allocation * allocation = AnewNoThrowStruct(this->auxiliaryAllocator, Allocation);
  439. if (allocation == nullptr)
  440. {
  441. return true;
  442. }
  443. Assert(Math::IsPow2((int32)bytes));
  444. uint length = GetChunkSizeForBytes(bytes);
  445. BVIndex index = GetFreeIndexForPage(page, bytes);
  446. Assert(index != BVInvalidIndex);
  447. char* address = page->address + Page::Alignment * index;
  448. #if PDATA_ENABLED
  449. XDataAllocation xdata;
  450. if(pdataCount > 0)
  451. {
  452. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  453. if (this->ShouldBeInFullList(page))
  454. {
  455. Adelete(this->auxiliaryAllocator, allocation);
  456. // If we run out of XData space with the segment, move the page to the full page list, and return false to try the next page.
  457. BucketId bucket = page->currentBucket;
  458. VerboseHeapTrace(L"Moving page from bucket %d to full list\n", bucket);
  459. Assert(!page->inFullList);
  460. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  461. page->inFullList = true;
  462. return false;
  463. }
  464. if (!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
  465. {
  466. Adelete(this->auxiliaryAllocator, allocation);
  467. return true;
  468. }
  469. }
  470. #endif
  471. #if DBG
  472. allocation->isAllocationUsed = false;
  473. allocation->isNotExecutableBecauseOOM = false;
  474. #endif
  475. allocation->page = page;
  476. allocation->size = bytes;
  477. allocation->address = address;
  478. #if DBG_DUMP
  479. this->allocationsSinceLastCompact += bytes;
  480. this->freeObjectSize -= bytes;
  481. #endif
  482. page->freeBitVector.ClearRange(index, length);
  483. VerboseHeapTrace(L"ChunkSize: %d, Index: %d, Free bit vector in page: ", length, index);
  484. #if VERBOSE_HEAP
  485. page->freeBitVector.DumpWord();
  486. #endif
  487. VerboseHeapTrace(L"\n");
  488. if (this->ShouldBeInFullList(page))
  489. {
  490. BucketId bucket = page->currentBucket;
  491. VerboseHeapTrace(L"Moving page from bucket %d to full list\n", bucket);
  492. Assert(!page->inFullList);
  493. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  494. page->inFullList = true;
  495. }
  496. #if PDATA_ENABLED
  497. allocation->xdata = xdata;
  498. #endif
  499. *allocationOut = allocation;
  500. return true;
  501. }
  502. Page* Heap::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  503. {
  504. void* pageSegment = nullptr;
  505. char* address = nullptr;
  506. {
  507. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  508. address = this->codePageAllocators->AllocPages(1, &pageSegment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  509. }
  510. if (address == nullptr)
  511. {
  512. return nullptr;
  513. }
  514. FillDebugBreak((BYTE*) address, AutoSystemInfo::PageSize);
  515. DWORD protectFlags = 0;
  516. if (AutoSystemInfo::Data.IsCFGEnabled())
  517. {
  518. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  519. }
  520. else
  521. {
  522. protectFlags = PAGE_EXECUTE;
  523. }
  524. //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
  525. this->codePageAllocators->ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
  526. // Switch to allocating on a list of pages so we can do leak tracking later
  527. VerboseHeapTrace(L"Allocing new page in bucket %d\n", bucket);
  528. Page* page = this->buckets[bucket].PrependNode(this->auxiliaryAllocator, address, pageSegment, bucket);
  529. if (page == nullptr)
  530. {
  531. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  532. this->codePageAllocators->ReleasePages(address, 1, pageSegment);
  533. return nullptr;
  534. }
  535. #if DBG_DUMP
  536. this->totalAllocationSize += AutoSystemInfo::PageSize;
  537. this->freeObjectSize += AutoSystemInfo::PageSize;
  538. #endif
  539. return page;
  540. }
  541. Page* Heap::AddPageToBucket(Page* page, BucketId bucket, bool wasFull)
  542. {
  543. Assert(bucket > BucketId::InvalidBucket && bucket < BucketId::NumBuckets);
  544. BucketId oldBucket = page->currentBucket;
  545. page->currentBucket = bucket;
  546. if (wasFull)
  547. {
  548. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  549. Assert(page->inFullList);
  550. this->fullPages[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  551. page->inFullList = false;
  552. }
  553. else
  554. {
  555. Assert(!page->inFullList);
  556. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  557. this->buckets[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  558. }
  559. return page;
  560. }
  561. /*
  562. * This method goes through the buckets greater than the target bucket
  563. * and if the higher bucket has a page with enough free space to allocate
  564. * something in the smaller bucket, then we bring the page to the smaller
  565. * bucket.
  566. * Note that if we allocate something from a page in the given bucket,
  567. * and then that page is split into a lower bucket, freeing is still not
  568. * a problem since the larger allocation is a multiple of the smaller one.
  569. * This gets more complicated if we can coalesce buckets. In that case,
  570. * we need to make sure that if a page was coalesced, and an allocation
  571. * pre-coalescing was freed, the page would need to get split upon free
  572. * to ensure correctness. For now, we've skipped implementing coalescing.
  573. * findPreReservedHeapPages - true, if we need to find pages only belonging to PreReservedHeapSegment
  574. */
  575. Page* Heap::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages)
  576. {
  577. for (BucketId b = (BucketId)(targetBucket + 1); b < BucketId::NumBuckets; b = (BucketId) (b + 1))
  578. {
  579. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  580. FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
  581. {
  582. Assert(!pageInBucket.inFullList);
  583. if (findPreReservedHeapPages && !this->codePageAllocators->IsPreReservedSegment(pageInBucket.segment))
  584. {
  585. //Find only pages that are pre-reserved using preReservedHeapPageAllocator
  586. continue;
  587. }
  588. if (pageInBucket.CanAllocate(targetBucket))
  589. {
  590. Page* page = &pageInBucket;
  591. if (findPreReservedHeapPages)
  592. {
  593. VerboseHeapTrace(L"PRE-RESERVE: Found page for splitting in Pre Reserved Segment\n");
  594. }
  595. VerboseHeapTrace(L"Found page to split. Moving from bucket %d to %d\n", b, targetBucket);
  596. return AddPageToBucket(page, targetBucket);
  597. }
  598. }
  599. NEXT_DLISTBASE_ENTRY_EDITING;
  600. }
  601. return nullptr;
  602. }
  603. BVIndex Heap::GetIndexInPage(__in Page* page, __in char* address)
  604. {
  605. Assert(page->address <= address && address < page->address + AutoSystemInfo::PageSize);
  606. return (BVIndex) ((address - page->address) / Page::Alignment);
  607. }
  608. #pragma endregion
  609. /**
  610. * Free List methods
  611. */
  612. #pragma region "Freeing methods"
  613. bool Heap::FreeAllocation(Allocation* object)
  614. {
  615. Page* page = object->page;
  616. void* segment = page->segment;
  617. size_t pageSize = AutoSystemInfo::PageSize;
  618. unsigned int length = GetChunkSizeForBytes(object->size);
  619. BVIndex index = GetIndexInPage(page, object->address);
  620. #if DBG
  621. // Make sure that it's not already been freed
  622. for (BVIndex i = index; i < length; i++)
  623. {
  624. Assert(!page->freeBitVector.Test(i));
  625. }
  626. #endif
  627. if (page->inFullList)
  628. {
  629. VerboseHeapTrace(L"Recycling page 0x%p because address 0x%p of size %d was freed\n", page->address, object->address, object->size);
  630. // If the object being freed is equal to the page size, we're
  631. // going to remove it anyway so don't add it to a bucket
  632. if (object->size != pageSize)
  633. {
  634. AddPageToBucket(page, page->currentBucket, true);
  635. }
  636. else
  637. {
  638. EnsureAllocationWriteable(object);
  639. // Fill the old buffer with debug breaks
  640. CustomHeap::FillDebugBreak((BYTE *)object->address, object->size);
  641. void* pageAddress = page->address;
  642. this->fullPages[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  643. // The page is not in any bucket- just update the stats, free the allocation
  644. // and dump the page- we don't need to update free object size since the object
  645. // size is equal to the page size so they cancel each other out
  646. #if DBG_DUMP
  647. this->totalAllocationSize -= pageSize;
  648. #endif
  649. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  650. {
  651. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  652. this->codePageAllocators->ReleasePages(pageAddress, 1, segment);
  653. }
  654. VerboseHeapTrace(L"FastPath: freeing page-sized object directly\n");
  655. return true;
  656. }
  657. }
  658. // If the page is about to become empty then we should not need
  659. // to set it to executable and we don't expect to restore the
  660. // previous protection settings.
  661. if (page->freeBitVector.Count() == BVUnit::BitsPerWord - length)
  662. {
  663. EnsureAllocationWriteable(object);
  664. }
  665. else
  666. {
  667. EnsureAllocationExecuteWriteable(object);
  668. }
  669. // Fill the old buffer with debug breaks
  670. CustomHeap::FillDebugBreak((BYTE *)object->address, object->size);
  671. VerboseHeapTrace(L"Setting %d bits starting at bit %d, Free bit vector in page was ", length, index);
  672. #if VERBOSE_HEAP
  673. page->freeBitVector.DumpWord();
  674. #endif
  675. VerboseHeapTrace(L"\n");
  676. page->freeBitVector.SetRange(index, length);
  677. VerboseHeapTrace(L"Free bit vector in page: ", length, index);
  678. #if VERBOSE_HEAP
  679. page->freeBitVector.DumpWord();
  680. #endif
  681. VerboseHeapTrace(L"\n");
  682. #if DBG_DUMP
  683. this->freeObjectSize += object->size;
  684. this->freesSinceLastCompact += object->size;
  685. #endif
  686. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  687. if (page->IsEmpty())
  688. {
  689. this->buckets[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  690. return false;
  691. }
  692. else // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
  693. {
  694. DWORD protectFlags = 0;
  695. if (AutoSystemInfo::Data.IsCFGEnabled())
  696. {
  697. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  698. }
  699. else
  700. {
  701. protectFlags = PAGE_EXECUTE;
  702. }
  703. this->codePageAllocators->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
  704. return true;
  705. }
  706. }
  707. void Heap::FreeDecommittedBuckets()
  708. {
  709. // CodePageAllocators is locked in FreeAll
  710. Assert(inDtor);
  711. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->decommittedPages, iter)
  712. {
  713. this->codePageAllocators->TrackDecommittedPages(page.address, 1, page.segment);
  714. iter.RemoveCurrent(this->auxiliaryAllocator);
  715. }
  716. NEXT_DLISTBASE_ENTRY_EDITING;
  717. }
  718. void Heap::FreePage(Page* page)
  719. {
  720. // CodePageAllocators is locked in FreeAll
  721. Assert(inDtor);
  722. DWORD pageSize = AutoSystemInfo::PageSize;
  723. EnsurePageWriteable(page);
  724. size_t freeSpace = page->freeBitVector.Count() * Page::Alignment;
  725. VerboseHeapTrace(L"Removing page in bucket %d, freeSpace: %d\n", page->currentBucket, freeSpace);
  726. this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
  727. #if DBG_DUMP
  728. this->freeObjectSize -= freeSpace;
  729. this->totalAllocationSize -= pageSize;
  730. #endif
  731. }
  732. void Heap::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
  733. {
  734. // CodePageAllocators is locked in FreeAll
  735. Assert(inDtor);
  736. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, bucket, pageIter)
  737. {
  738. // Templatize this to remove branches/make code more compact?
  739. if (!freeOnlyEmptyPages || page.IsEmpty())
  740. {
  741. FreePage(&page);
  742. pageIter.RemoveCurrent(this->auxiliaryAllocator);
  743. }
  744. }
  745. NEXT_DLISTBASE_ENTRY_EDITING;
  746. }
  747. void Heap::FreeBuckets(bool freeOnlyEmptyPages)
  748. {
  749. // CodePageAllocators is locked in FreeAll
  750. Assert(inDtor);
  751. for (int i = 0; i < NumBuckets; i++)
  752. {
  753. FreeBucket(&this->buckets[i], freeOnlyEmptyPages);
  754. FreeBucket(&this->fullPages[i], freeOnlyEmptyPages);
  755. }
  756. #if DBG_DUMP
  757. this->allocationsSinceLastCompact = 0;
  758. this->freesSinceLastCompact = 0;
  759. #endif
  760. }
  761. bool Heap::UpdateFullPages()
  762. {
  763. bool updated = false;
  764. if (this->codePageAllocators->HasSecondaryAllocStateChanged(&lastSecondaryAllocStateChangedCount))
  765. {
  766. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  767. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  768. {
  769. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter)
  770. {
  771. Assert(page.inFullList);
  772. if (!this->ShouldBeInFullList(&page))
  773. {
  774. VerboseHeapTrace(L"Recycling page 0x%p because XDATA was freed\n", page.address);
  775. bucketIter.MoveCurrentTo(&(this->buckets[bucket]));
  776. page.inFullList = false;
  777. updated = true;
  778. }
  779. }
  780. NEXT_DLISTBASE_ENTRY_EDITING;
  781. }
  782. }
  783. return updated;
  784. }
  785. #if PDATA_ENABLED
  786. void Heap::FreeXdata(XDataAllocation* xdata, void* segment)
  787. {
  788. Assert(!xdata->IsFreed());
  789. {
  790. CodePageAllocators::AutoLock autoLock(this->codePageAllocators);
  791. this->codePageAllocators->ReleaseSecondary(*xdata, segment);
  792. xdata->Free();
  793. }
  794. }
  795. #endif
  796. #if DBG_DUMP
  797. void Heap::DumpStats()
  798. {
  799. HeapTrace(L"Total allocation size: %d\n", totalAllocationSize);
  800. HeapTrace(L"Total free size: %d\n", freeObjectSize);
  801. HeapTrace(L"Total allocations since last compact: %d\n", allocationsSinceLastCompact);
  802. HeapTrace(L"Total frees since last compact: %d\n", freesSinceLastCompact);
  803. HeapTrace(L"Large object count: %d\n", this->largeObjectAllocations.Count());
  804. HeapTrace(L"Buckets: \n");
  805. for (int i = 0; i < BucketId::NumBuckets; i++)
  806. {
  807. printf("\t%d => %u [", (1 << (i + 7)), buckets[i].Count());
  808. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->buckets[i], bucketIter)
  809. {
  810. BVUnit usedBitVector = page.freeBitVector;
  811. usedBitVector.ComplimentAll(); // Get the actual used bit vector
  812. printf(" %u ", usedBitVector.Count() * Page::Alignment); // Print out the space used in this page
  813. }
  814. NEXT_DLISTBASE_ENTRY_EDITING
  815. printf("] {{%u}}\n", this->fullPages[i].Count());
  816. }
  817. }
  818. #endif
  819. #pragma endregion
  820. /**
  821. * Helper methods
  822. */
  823. #pragma region "Helpers"
  824. inline unsigned int log2(size_t number)
  825. {
  826. const unsigned int b[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
  827. const unsigned int S[] = {1, 2, 4, 8, 16};
  828. unsigned int result = 0;
  829. for (int i = 4; i >= 0; i--)
  830. {
  831. if (number & b[i])
  832. {
  833. number >>= S[i];
  834. result |= S[i];
  835. }
  836. }
  837. return result;
  838. }
  839. inline BucketId GetBucketForSize(size_t bytes)
  840. {
  841. if (bytes > Page::MaxAllocationSize)
  842. {
  843. return BucketId::LargeObjectList;
  844. }
  845. BucketId bucket = (BucketId) (log2(bytes) - 7);
  846. // < 8 => 0
  847. // 8 => 1
  848. // 9 => 2 ...
  849. Assert(bucket < BucketId::LargeObjectList);
  850. if (bucket < BucketId::SmallObjectList)
  851. {
  852. bucket = BucketId::SmallObjectList;
  853. }
  854. return bucket;
  855. }
  856. // Fills the specified buffer with "debug break" instruction encoding.
  857. // If there is any space left after that due to alignment, fill it with 0.
  858. // static
  859. void FillDebugBreak(__out_bcount_full(byteCount) BYTE* buffer, __in size_t byteCount)
  860. {
  861. #if defined(_M_ARM)
  862. // On ARM there is breakpoint instruction (BKPT) which is 0xBEii, where ii (immediate 8) can be any value, 0xBE in particular.
  863. // While it could be easier to put 0xBE (same way as 0xCC on x86), BKPT is not recommended -- it may cause unexpected side effects.
  864. // So, use same sequence are C++ compiler uses (0xDEFE), this is recognized by debugger as __debugbreak.
  865. // This is 2 bytes, and in case there is a gap of 1 byte in the end, fill it with 0 (there is no 1 byte long THUMB instruction).
  866. CompileAssert(sizeof(wchar_t) == 2);
  867. wchar_t pattern = 0xDEFE;
  868. wmemset(reinterpret_cast<wchar_t*>(buffer), pattern, byteCount / 2);
  869. if (byteCount % 2)
  870. {
  871. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  872. *(buffer + byteCount - 1) = 0; // Fill last remaining byte.
  873. }
  874. #elif defined(_M_ARM64)
  875. CompileAssert(sizeof(DWORD) == 4);
  876. DWORD pattern = 0xd4200000 | (0xf000 << 5);
  877. for (size_t i = 0; i < byteCount / 4; i++)
  878. {
  879. reinterpret_cast<DWORD*>(buffer)[i] = pattern;
  880. }
  881. for (size_t i = (byteCount / 4) * 4; i < byteCount; i++)
  882. {
  883. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  884. buffer[i] = 0; // Fill last remaining bytes.
  885. }
  886. #else
  887. // On Intel just use "INT 3" instruction which is 0xCC.
  888. memset(buffer, 0xCC, byteCount);
  889. #endif
  890. }
  891. #pragma endregion
  892. };
  893. }