CustomHeap.cpp 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #ifdef _M_X64
  7. #include "Memory/amd64/XDataAllocator.h"
  8. #elif defined(_M_ARM)
  9. #include "Memory/arm/XDataAllocator.h"
  10. #include <wchar.h>
  11. #elif defined(_M_ARM64)
  12. #include "Memory/arm64/XDataAllocator.h"
  13. #endif
  14. #include "CustomHeap.h"
  15. namespace Memory
  16. {
  17. namespace CustomHeap
  18. {
  19. #pragma region "Constructor and Destructor"
  20. template<typename TAlloc, typename TPreReservedAlloc>
  21. Heap<TAlloc, TPreReservedAlloc>::Heap(ArenaAllocator * alloc, CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators, HANDLE processHandle):
  22. auxiliaryAllocator(alloc),
  23. codePageAllocators(codePageAllocators),
  24. lastSecondaryAllocStateChangedCount(0),
  25. processHandle(processHandle)
  26. #if DBG_DUMP
  27. , freeObjectSize(0)
  28. , totalAllocationSize(0)
  29. , allocationsSinceLastCompact(0)
  30. , freesSinceLastCompact(0)
  31. #endif
  32. #if DBG
  33. , inDtor(false)
  34. #endif
  35. {
  36. for (int i = 0; i < NumBuckets; i++)
  37. {
  38. this->buckets[i].Reset();
  39. }
  40. }
  41. template<typename TAlloc, typename TPreReservedAlloc>
  42. Heap<TAlloc, TPreReservedAlloc>::~Heap()
  43. {
  44. #if DBG
  45. inDtor = true;
  46. #endif
  47. this->FreeAll();
  48. }
  49. #pragma endregion
  50. #pragma region "Public routines"
  51. template<typename TAlloc, typename TPreReservedAlloc>
  52. void Heap<TAlloc, TPreReservedAlloc>::FreeAll()
  53. {
  54. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  55. FreeBuckets(false);
  56. FreeLargeObjects();
  57. FreeDecommittedBuckets();
  58. FreeDecommittedLargeObjects();
  59. }
  60. template<typename TAlloc, typename TPreReservedAlloc>
  61. void Heap<TAlloc, TPreReservedAlloc>::Free(__in Allocation* object)
  62. {
  63. Assert(object != nullptr);
  64. if (object == nullptr)
  65. {
  66. return;
  67. }
  68. BucketId bucket = (BucketId) GetBucketForSize(object->size);
  69. if (bucket == BucketId::LargeObjectList)
  70. {
  71. #if PDATA_ENABLED
  72. if(!object->xdata.IsFreed())
  73. {
  74. FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
  75. }
  76. #endif
  77. if (!object->largeObjectAllocation.isDecommitted)
  78. {
  79. FreeLargeObject(object);
  80. }
  81. return;
  82. }
  83. #if PDATA_ENABLED
  84. if(!object->xdata.IsFreed())
  85. {
  86. FreeXdata(&object->xdata, object->page->segment);
  87. }
  88. #endif
  89. if (!object->page->isDecommitted)
  90. {
  91. FreeAllocation(object);
  92. }
  93. }
  94. template<typename TAlloc, typename TPreReservedAlloc>
  95. void Heap<TAlloc, TPreReservedAlloc>::DecommitAll()
  96. {
  97. // This function doesn't really touch the page allocator data structure.
  98. // DecommitPages is merely a wrapper for VirtualFree
  99. // So no need to take the critical section to synchronize
  100. DListBase<Allocation>::EditingIterator i(&this->largeObjectAllocations);
  101. while (i.Next())
  102. {
  103. Allocation& allocation = i.Data();
  104. Assert(!allocation.largeObjectAllocation.isDecommitted);
  105. this->codePageAllocators->DecommitPages(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  106. i.MoveCurrentTo(&this->decommittedLargeObjects);
  107. allocation.largeObjectAllocation.isDecommitted = true;
  108. }
  109. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  110. {
  111. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter1)
  112. {
  113. Assert(page.inFullList);
  114. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  115. bucketIter1.MoveCurrentTo(&(this->decommittedPages));
  116. page.isDecommitted = true;
  117. }
  118. NEXT_DLISTBASE_ENTRY_EDITING;
  119. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->buckets[bucket]), bucketIter2)
  120. {
  121. Assert(!page.inFullList);
  122. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  123. bucketIter2.MoveCurrentTo(&(this->decommittedPages));
  124. page.isDecommitted = true;
  125. }
  126. NEXT_DLISTBASE_ENTRY_EDITING;
  127. }
  128. }
  129. template<typename TAlloc, typename TPreReservedAlloc>
  130. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Page> const& bucket, __in void * address)
  131. {
  132. DListBase<Page>::Iterator i(&bucket);
  133. while (i.Next())
  134. {
  135. Page& page = i.Data();
  136. if (page.address <= address && address < page.address + AutoSystemInfo::PageSize)
  137. {
  138. return true;
  139. }
  140. }
  141. return false;
  142. }
  143. template<typename TAlloc, typename TPreReservedAlloc>
  144. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void * address)
  145. {
  146. for (uint i = 0; i < NumBuckets; i++)
  147. {
  148. if (this->IsInHeap(buckets[i], address))
  149. {
  150. return true;
  151. }
  152. }
  153. return false;
  154. }
  155. template<typename TAlloc, typename TPreReservedAlloc>
  156. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Allocation> const& allocations, __in void *address)
  157. {
  158. DListBase<Allocation>::Iterator i(&allocations);
  159. while (i.Next())
  160. {
  161. Allocation& allocation = i.Data();
  162. if (allocation.address <= address && address < allocation.address + allocation.size)
  163. {
  164. return true;
  165. }
  166. }
  167. return false;
  168. }
  169. template<typename TAlloc, typename TPreReservedAlloc>
  170. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(__in void* address)
  171. {
  172. return IsInHeap(buckets, address) || IsInHeap(fullPages, address) || IsInHeap(largeObjectAllocations, address);
  173. }
  174. template<typename TAlloc, typename TPreReservedAlloc>
  175. Page * Heap<TAlloc, TPreReservedAlloc>::GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment)
  176. {
  177. // TODO: this can get a non-prereserved page even if you want one
  178. if (!this->buckets[bucket].Empty())
  179. {
  180. Assert(!this->buckets[bucket].Head().inFullList);
  181. return &this->buckets[bucket].Head();
  182. }
  183. return FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
  184. }
  185. /*
  186. * Algorithm:
  187. * - Find bucket
  188. * - Check bucket pages - if it has enough free space, allocate that chunk
  189. * - Check pages in bigger buckets - if that has enough space, split that page and allocate from that chunk
  190. * - Allocate new page
  191. */
  192. template<typename TAlloc, typename TPreReservedAlloc>
  193. Allocation* Heap<TAlloc, TPreReservedAlloc>::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  194. {
  195. Assert(bytes > 0);
  196. Assert((codePageAllocators->AllocXdata() || pdataCount == 0) && (!codePageAllocators->AllocXdata() || pdataCount > 0));
  197. Assert(pdataCount > 0 || (pdataCount == 0 && xdataSize == 0));
  198. // Round up to power of two to allocate, and figure out which bucket to allocate in
  199. int _;
  200. size_t bytesToAllocate = PowerOf2Policy::GetSize(bytes, &_ /* modFunctionIndex */);
  201. BucketId bucket = (BucketId) GetBucketForSize(bytesToAllocate);
  202. if (bucket == BucketId::LargeObjectList)
  203. {
  204. return AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  205. }
  206. VerboseHeapTrace(_u("Bucket is %d\n"), bucket);
  207. VerboseHeapTrace(_u("Requested: %d bytes. Allocated: %d bytes\n"), bytes, bytesToAllocate);
  208. do
  209. {
  210. Page* page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  211. if (page == nullptr && UpdateFullPages())
  212. {
  213. page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  214. }
  215. if (page == nullptr)
  216. {
  217. page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  218. }
  219. else if (!canAllocInPreReservedHeapPageSegment && isAnyJittedCode)
  220. {
  221. *isAllJITCodeInPreReservedRegion = false;
  222. }
  223. // Out of memory
  224. if (page == nullptr)
  225. {
  226. return nullptr;
  227. }
  228. #if defined(DBG)
  229. MEMORY_BASIC_INFORMATION memBasicInfo;
  230. size_t resultBytes = VirtualQueryEx(this->processHandle, page->address, &memBasicInfo, sizeof(memBasicInfo));
  231. if (resultBytes == 0)
  232. {
  233. MemoryOperationLastError::RecordLastError();
  234. }
  235. else
  236. {
  237. Assert(memBasicInfo.Protect == PAGE_EXECUTE);
  238. }
  239. #endif
  240. Allocation* allocation = nullptr;
  241. if (AllocInPage(page, bytesToAllocate, pdataCount, xdataSize, &allocation))
  242. {
  243. return allocation;
  244. }
  245. } while (true);
  246. }
  247. template<typename TAlloc, typename TPreReservedAlloc>
  248. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, __in_opt char* addressInPage)
  249. {
  250. DWORD protectFlags = 0;
  251. if (AutoSystemInfo::Data.IsCFGEnabled())
  252. {
  253. protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
  254. }
  255. else
  256. {
  257. protectFlags = PAGE_EXECUTE_READWRITE;
  258. }
  259. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE, addressInPage);
  260. }
  261. template<typename TAlloc, typename TPreReservedAlloc>
  262. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocationWithExecuteReadOnly(Allocation *allocation, __in_opt char* addressInPage)
  263. {
  264. DWORD protectFlags = 0;
  265. if (AutoSystemInfo::Data.IsCFGEnabled())
  266. {
  267. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  268. }
  269. else
  270. {
  271. protectFlags = PAGE_EXECUTE;
  272. }
  273. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
  274. }
  275. template<typename TAlloc, typename TPreReservedAlloc>
  276. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
  277. {
  278. // Allocate at the page level so that our protections don't
  279. // transcend allocation page boundaries. Here, allocation->address is page
  280. // aligned if the object is a large object allocation. If it isn't, in the else
  281. // branch of the following if statement, we set it to the allocation's page's
  282. // address. This ensures that the address being protected is always page aligned
  283. Assert(allocation != nullptr);
  284. Assert(allocation->isAllocationUsed);
  285. Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
  286. char* address = allocation->address;
  287. size_t pageCount;
  288. void * segment;
  289. if (allocation->IsLargeAllocation())
  290. {
  291. #if DBG_DUMP || defined(RECYCLER_TRACE)
  292. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  293. {
  294. Output::Print(_u("Protecting large allocation\n"));
  295. }
  296. #endif
  297. segment = allocation->largeObjectAllocation.segment;
  298. if (addressInPage != nullptr)
  299. {
  300. if (addressInPage >= allocation->address + AutoSystemInfo::PageSize)
  301. {
  302. size_t page = (addressInPage - allocation->address) / AutoSystemInfo::PageSize;
  303. address = allocation->address + (page * AutoSystemInfo::PageSize);
  304. }
  305. pageCount = 1;
  306. }
  307. else
  308. {
  309. pageCount = allocation->GetPageCount();
  310. }
  311. VerboseHeapTrace(_u("Protecting 0x%p with 0x%x\n"), address, dwVirtualProtectFlags);
  312. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  313. }
  314. else
  315. {
  316. #if DBG_DUMP || defined(RECYCLER_TRACE)
  317. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  318. {
  319. Output::Print(_u("Protecting small allocation\n"));
  320. }
  321. #endif
  322. segment = allocation->page->segment;
  323. address = allocation->page->address;
  324. pageCount = 1;
  325. VerboseHeapTrace(_u("Protecting 0x%p with 0x%x\n"), address, dwVirtualProtectFlags);
  326. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  327. }
  328. }
  329. #pragma endregion
  330. #pragma region "Large object methods"
  331. template<typename TAlloc, typename TPreReservedAlloc>
  332. Allocation* Heap<TAlloc, TPreReservedAlloc>::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  333. {
  334. size_t pages = GetNumPagesForSize(bytes);
  335. if (pages == 0)
  336. {
  337. return nullptr;
  338. }
  339. void * segment = nullptr;
  340. char* address = nullptr;
  341. #if PDATA_ENABLED
  342. XDataAllocation xdata;
  343. #endif
  344. {
  345. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  346. address = this->codePageAllocators->Alloc(&pages, &segment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  347. // Out of memory
  348. if (address == nullptr)
  349. {
  350. return nullptr;
  351. }
  352. char* localAddr = this->codePageAllocators->AllocLocal(address, pages*AutoSystemInfo::PageSize, segment);
  353. if (!localAddr)
  354. {
  355. return nullptr;
  356. }
  357. FillDebugBreak((BYTE*)localAddr, pages*AutoSystemInfo::PageSize);
  358. this->codePageAllocators->FreeLocal(localAddr, segment);
  359. if (this->processHandle == GetCurrentProcess())
  360. {
  361. DWORD protectFlags = 0;
  362. if (AutoSystemInfo::Data.IsCFGEnabled())
  363. {
  364. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  365. }
  366. else
  367. {
  368. protectFlags = PAGE_EXECUTE;
  369. }
  370. this->codePageAllocators->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
  371. }
  372. #if PDATA_ENABLED
  373. if(pdataCount > 0)
  374. {
  375. if (!this->codePageAllocators->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
  376. {
  377. this->codePageAllocators->Release(address, pages, segment);
  378. return nullptr;
  379. }
  380. }
  381. #endif
  382. }
  383. #if defined(DBG)
  384. MEMORY_BASIC_INFORMATION memBasicInfo;
  385. size_t resultBytes = VirtualQueryEx(this->processHandle, address, &memBasicInfo, sizeof(memBasicInfo));
  386. if (resultBytes == 0)
  387. {
  388. MemoryOperationLastError::RecordLastError();
  389. }
  390. else
  391. {
  392. Assert(memBasicInfo.Protect == PAGE_EXECUTE);
  393. }
  394. #endif
  395. Allocation* allocation = this->largeObjectAllocations.PrependNode(this->auxiliaryAllocator);
  396. if (allocation == nullptr)
  397. {
  398. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  399. this->codePageAllocators->Release(address, pages, segment);
  400. #if PDATA_ENABLED
  401. if(pdataCount > 0)
  402. {
  403. this->codePageAllocators->ReleaseSecondary(xdata, segment);
  404. }
  405. #endif
  406. return nullptr;
  407. }
  408. allocation->address = address;
  409. allocation->largeObjectAllocation.segment = segment;
  410. allocation->largeObjectAllocation.isDecommitted = false;
  411. allocation->size = pages * AutoSystemInfo::PageSize;
  412. #if PDATA_ENABLED
  413. allocation->xdata = xdata;
  414. #endif
  415. return allocation;
  416. }
  417. template<typename TAlloc, typename TPreReservedAlloc>
  418. void Heap<TAlloc, TPreReservedAlloc>::FreeDecommittedLargeObjects()
  419. {
  420. // CodePageAllocators is locked in FreeAll
  421. Assert(inDtor);
  422. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->decommittedLargeObjects, largeObjectIter)
  423. {
  424. VerboseHeapTrace(_u("Decommitting large object at address 0x%p of size %u\n"), allocation.address, allocation.size);
  425. this->codePageAllocators->ReleaseDecommitted(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  426. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  427. }
  428. NEXT_DLISTBASE_ENTRY_EDITING;
  429. }
  430. //Called during Free (while shutting down)
  431. template<typename TAlloc, typename TPreReservedAlloc>
  432. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsurePageWriteable(Page* page)
  433. {
  434. return EnsurePageReadWrite<PAGE_READWRITE>(page);
  435. }
  436. // this get called when freeing the whole page
  437. template<typename TAlloc, typename TPreReservedAlloc>
  438. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsureAllocationWriteable(Allocation* allocation)
  439. {
  440. return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
  441. }
  442. // this get called when only freeing a part in the page
  443. template<typename TAlloc, typename TPreReservedAlloc>
  444. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsureAllocationExecuteWriteable(Allocation* allocation)
  445. {
  446. if (AutoSystemInfo::Data.IsCFGEnabled())
  447. {
  448. return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
  449. }
  450. else
  451. {
  452. return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
  453. }
  454. }
  455. template<typename TAlloc, typename TPreReservedAlloc>
  456. void Heap<TAlloc, TPreReservedAlloc>::FreeLargeObjects()
  457. {
  458. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  459. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
  460. {
  461. EnsureAllocationWriteable(&allocation);
  462. #if PDATA_ENABLED
  463. Assert(allocation.xdata.IsFreed());
  464. #endif
  465. this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  466. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  467. }
  468. NEXT_DLISTBASE_ENTRY_EDITING;
  469. }
  470. template<typename TAlloc, typename TPreReservedAlloc>
  471. void Heap<TAlloc, TPreReservedAlloc>::FreeLargeObject(Allocation* allocation)
  472. {
  473. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  474. EnsureAllocationWriteable(allocation);
  475. #if PDATA_ENABLED
  476. Assert(allocation->xdata.IsFreed());
  477. #endif
  478. this->codePageAllocators->Release(allocation->address, allocation->GetPageCount(), allocation->largeObjectAllocation.segment);
  479. this->largeObjectAllocations.RemoveElement(this->auxiliaryAllocator, allocation);
  480. }
  481. #pragma endregion
  482. #pragma region "Page methods"
  483. template<typename TAlloc, typename TPreReservedAlloc>
  484. bool Heap<TAlloc, TPreReservedAlloc>::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocationOut)
  485. {
  486. Allocation * allocation = AnewNoThrowStruct(this->auxiliaryAllocator, Allocation);
  487. if (allocation == nullptr)
  488. {
  489. return true;
  490. }
  491. Assert(Math::IsPow2((int32)bytes));
  492. uint length = GetChunkSizeForBytes(bytes);
  493. BVIndex index = GetFreeIndexForPage(page, bytes);
  494. if (index == BVInvalidIndex)
  495. {
  496. CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
  497. return false;
  498. }
  499. char* address = page->address + Page::Alignment * index;
  500. #if PDATA_ENABLED
  501. XDataAllocation xdata;
  502. if(pdataCount > 0)
  503. {
  504. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  505. if (this->ShouldBeInFullList(page))
  506. {
  507. Adelete(this->auxiliaryAllocator, allocation);
  508. // If we run out of XData space with the segment, move the page to the full page list, and return false to try the next page.
  509. BucketId bucket = page->currentBucket;
  510. VerboseHeapTrace(_u("Moving page from bucket %d to full list\n"), bucket);
  511. Assert(!page->inFullList);
  512. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  513. page->inFullList = true;
  514. return false;
  515. }
  516. if (!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
  517. {
  518. Adelete(this->auxiliaryAllocator, allocation);
  519. return true;
  520. }
  521. }
  522. #endif
  523. #if DBG
  524. allocation->isAllocationUsed = false;
  525. allocation->isNotExecutableBecauseOOM = false;
  526. #endif
  527. allocation->page = page;
  528. allocation->size = bytes;
  529. allocation->address = address;
  530. #if DBG_DUMP
  531. this->allocationsSinceLastCompact += bytes;
  532. this->freeObjectSize -= bytes;
  533. #endif
  534. //Section of the Page should already be freed.
  535. if (!page->freeBitVector.TestRange(index, length))
  536. {
  537. CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
  538. return false;
  539. }
  540. //Section of the Page should already be freed.
  541. if (!page->freeBitVector.TestRange(index, length))
  542. {
  543. CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
  544. return false;
  545. }
  546. page->freeBitVector.ClearRange(index, length);
  547. VerboseHeapTrace(_u("ChunkSize: %d, Index: %d, Free bit vector in page: "), length, index);
  548. #if VERBOSE_HEAP
  549. page->freeBitVector.DumpWord();
  550. #endif
  551. VerboseHeapTrace(_u("\n"));
  552. if (this->ShouldBeInFullList(page))
  553. {
  554. BucketId bucket = page->currentBucket;
  555. VerboseHeapTrace(_u("Moving page from bucket %d to full list\n"), bucket);
  556. Assert(!page->inFullList);
  557. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  558. page->inFullList = true;
  559. }
  560. #if PDATA_ENABLED
  561. allocation->xdata = xdata;
  562. #endif
  563. *allocationOut = allocation;
  564. return true;
  565. }
  566. template<typename TAlloc, typename TPreReservedAlloc>
  567. Page* Heap<TAlloc, TPreReservedAlloc>::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  568. {
  569. void* pageSegment = nullptr;
  570. char* address = nullptr;
  571. {
  572. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  573. address = this->codePageAllocators->AllocPages(1, &pageSegment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  574. if (address == nullptr)
  575. {
  576. return nullptr;
  577. }
  578. }
  579. char* localAddr = this->codePageAllocators->AllocLocal(address, AutoSystemInfo::PageSize, pageSegment);
  580. if (!localAddr)
  581. {
  582. return nullptr;
  583. }
  584. FillDebugBreak((BYTE*)localAddr, AutoSystemInfo::PageSize);
  585. this->codePageAllocators->FreeLocal(localAddr, pageSegment);
  586. DWORD protectFlags = 0;
  587. if (AutoSystemInfo::Data.IsCFGEnabled())
  588. {
  589. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  590. }
  591. else
  592. {
  593. protectFlags = PAGE_EXECUTE;
  594. }
  595. //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
  596. this->codePageAllocators->ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
  597. // Switch to allocating on a list of pages so we can do leak tracking later
  598. VerboseHeapTrace(_u("Allocing new page in bucket %d\n"), bucket);
  599. Page* page = this->buckets[bucket].PrependNode(this->auxiliaryAllocator, address, pageSegment, bucket);
  600. if (page == nullptr)
  601. {
  602. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  603. this->codePageAllocators->ReleasePages(address, 1, pageSegment);
  604. return nullptr;
  605. }
  606. #if DBG_DUMP
  607. this->totalAllocationSize += AutoSystemInfo::PageSize;
  608. this->freeObjectSize += AutoSystemInfo::PageSize;
  609. #endif
  610. return page;
  611. }
  612. template<typename TAlloc, typename TPreReservedAlloc>
  613. Page* Heap<TAlloc, TPreReservedAlloc>::AddPageToBucket(Page* page, BucketId bucket, bool wasFull)
  614. {
  615. Assert(bucket > BucketId::InvalidBucket && bucket < BucketId::NumBuckets);
  616. BucketId oldBucket = page->currentBucket;
  617. page->currentBucket = bucket;
  618. if (wasFull)
  619. {
  620. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  621. Assert(page->inFullList);
  622. this->fullPages[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  623. page->inFullList = false;
  624. }
  625. else
  626. {
  627. Assert(!page->inFullList);
  628. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  629. this->buckets[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  630. }
  631. return page;
  632. }
  633. /*
  634. * This method goes through the buckets greater than the target bucket
  635. * and if the higher bucket has a page with enough free space to allocate
  636. * something in the smaller bucket, then we bring the page to the smaller
  637. * bucket.
  638. * Note that if we allocate something from a page in the given bucket,
  639. * and then that page is split into a lower bucket, freeing is still not
  640. * a problem since the larger allocation is a multiple of the smaller one.
  641. * This gets more complicated if we can coalesce buckets. In that case,
  642. * we need to make sure that if a page was coalesced, and an allocation
  643. * pre-coalescing was freed, the page would need to get split upon free
  644. * to ensure correctness. For now, we've skipped implementing coalescing.
  645. * findPreReservedHeapPages - true, if we need to find pages only belonging to PreReservedHeapSegment
  646. */
  647. template<typename TAlloc, typename TPreReservedAlloc>
  648. Page* Heap<TAlloc, TPreReservedAlloc>::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages)
  649. {
  650. for (BucketId b = (BucketId)(targetBucket + 1); b < BucketId::NumBuckets; b = (BucketId) (b + 1))
  651. {
  652. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  653. FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
  654. {
  655. Assert(!pageInBucket.inFullList);
  656. if (findPreReservedHeapPages && !this->codePageAllocators->IsPreReservedSegment(pageInBucket.segment))
  657. {
  658. //Find only pages that are pre-reserved using preReservedHeapPageAllocator
  659. continue;
  660. }
  661. if (pageInBucket.CanAllocate(targetBucket))
  662. {
  663. Page* page = &pageInBucket;
  664. if (findPreReservedHeapPages)
  665. {
  666. VerboseHeapTrace(_u("PRE-RESERVE: Found page for splitting in Pre Reserved Segment\n"));
  667. }
  668. VerboseHeapTrace(_u("Found page to split. Moving from bucket %d to %d\n"), b, targetBucket);
  669. return AddPageToBucket(page, targetBucket);
  670. }
  671. }
  672. NEXT_DLISTBASE_ENTRY_EDITING;
  673. }
  674. return nullptr;
  675. }
  676. template<typename TAlloc, typename TPreReservedAlloc>
  677. BVIndex Heap<TAlloc, TPreReservedAlloc>::GetIndexInPage(__in Page* page, __in char* address)
  678. {
  679. Assert(page->address <= address && address < page->address + AutoSystemInfo::PageSize);
  680. return (BVIndex) ((address - page->address) / Page::Alignment);
  681. }
  682. #pragma endregion
  683. /**
  684. * Free List methods
  685. */
  686. #pragma region "Freeing methods"
  687. template<typename TAlloc, typename TPreReservedAlloc>
  688. bool Heap<TAlloc, TPreReservedAlloc>::FreeAllocation(Allocation* object)
  689. {
  690. Page* page = object->page;
  691. void* segment = page->segment;
  692. size_t pageSize = AutoSystemInfo::PageSize;
  693. unsigned int length = GetChunkSizeForBytes(object->size);
  694. BVIndex index = GetIndexInPage(page, object->address);
  695. uint freeBitsCount = page->freeBitVector.Count();
  696. // Make sure that the section under interest or the whole page has not already been freed
  697. if (page->IsEmpty() || page->freeBitVector.TestAnyInRange(index, length))
  698. {
  699. CustomHeap_BadPageState_fatal_error((ULONG_PTR)this);
  700. return false;
  701. }
  702. if (page->inFullList)
  703. {
  704. VerboseHeapTrace(_u("Recycling page 0x%p because address 0x%p of size %d was freed\n"), page->address, object->address, object->size);
  705. // If the object being freed is equal to the page size, we're
  706. // going to remove it anyway so don't add it to a bucket
  707. if (object->size != pageSize)
  708. {
  709. AddPageToBucket(page, page->currentBucket, true);
  710. }
  711. else
  712. {
  713. EnsureAllocationWriteable(object);
  714. // Fill the old buffer with debug breaks
  715. char* localAddr = this->codePageAllocators->AllocLocal(object->address, object->size, page->segment);
  716. if (!localAddr)
  717. {
  718. MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
  719. return false;
  720. }
  721. FillDebugBreak((BYTE*)localAddr, object->size);
  722. this->codePageAllocators->FreeLocal(localAddr, page->segment);
  723. void* pageAddress = page->address;
  724. this->fullPages[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  725. // The page is not in any bucket- just update the stats, free the allocation
  726. // and dump the page- we don't need to update free object size since the object
  727. // size is equal to the page size so they cancel each other out
  728. #if DBG_DUMP
  729. this->totalAllocationSize -= pageSize;
  730. #endif
  731. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  732. {
  733. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  734. this->codePageAllocators->ReleasePages(pageAddress, 1, segment);
  735. }
  736. VerboseHeapTrace(_u("FastPath: freeing page-sized object directly\n"));
  737. return true;
  738. }
  739. }
  740. // If the page is about to become empty then we should not need
  741. // to set it to executable and we don't expect to restore the
  742. // previous protection settings.
  743. if (freeBitsCount == BVUnit::BitsPerWord - length)
  744. {
  745. EnsureAllocationWriteable(object);
  746. FreeAllocationHelper(object, index, length);
  747. Assert(page->IsEmpty());
  748. this->buckets[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  749. return false;
  750. }
  751. else
  752. {
  753. EnsureAllocationExecuteWriteable(object);
  754. FreeAllocationHelper(object, index, length);
  755. // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE (always with TARGETS_NO_UPDATE state)
  756. DWORD protectFlags = 0;
  757. if (AutoSystemInfo::Data.IsCFGEnabled())
  758. {
  759. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  760. }
  761. else
  762. {
  763. protectFlags = PAGE_EXECUTE;
  764. }
  765. this->codePageAllocators->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
  766. return true;
  767. }
  768. }
  769. template<typename TAlloc, typename TPreReservedAlloc>
  770. void Heap<TAlloc, TPreReservedAlloc>::FreeAllocationHelper(Allocation* object, BVIndex index, uint length)
  771. {
  772. Page* page = object->page;
  773. // Fill the old buffer with debug breaks
  774. char* localAddr = this->codePageAllocators->AllocLocal(object->address, object->size, page->segment);
  775. if (localAddr)
  776. {
  777. FillDebugBreak((BYTE*)localAddr, object->size);
  778. this->codePageAllocators->FreeLocal(localAddr, page->segment);
  779. }
  780. else
  781. {
  782. MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
  783. return;
  784. }
  785. VerboseHeapTrace(_u("Setting %d bits starting at bit %d, Free bit vector in page was "), length, index);
  786. #if VERBOSE_HEAP
  787. page->freeBitVector.DumpWord();
  788. #endif
  789. VerboseHeapTrace(_u("\n"));
  790. page->freeBitVector.SetRange(index, length);
  791. VerboseHeapTrace(_u("Free bit vector in page: "), length, index);
  792. #if VERBOSE_HEAP
  793. page->freeBitVector.DumpWord();
  794. #endif
  795. VerboseHeapTrace(_u("\n"));
  796. #if DBG_DUMP
  797. this->freeObjectSize += object->size;
  798. this->freesSinceLastCompact += object->size;
  799. #endif
  800. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  801. }
  802. template<typename TAlloc, typename TPreReservedAlloc>
  803. void Heap<TAlloc, TPreReservedAlloc>::FreeDecommittedBuckets()
  804. {
  805. // CodePageAllocators is locked in FreeAll
  806. Assert(inDtor);
  807. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->decommittedPages, iter)
  808. {
  809. this->codePageAllocators->TrackDecommittedPages(page.address, 1, page.segment);
  810. iter.RemoveCurrent(this->auxiliaryAllocator);
  811. }
  812. NEXT_DLISTBASE_ENTRY_EDITING;
  813. }
  814. template<typename TAlloc, typename TPreReservedAlloc>
  815. void Heap<TAlloc, TPreReservedAlloc>::FreePage(Page* page)
  816. {
  817. // CodePageAllocators is locked in FreeAll
  818. Assert(inDtor);
  819. DWORD pageSize = AutoSystemInfo::PageSize;
  820. EnsurePageWriteable(page);
  821. size_t freeSpace = page->freeBitVector.Count() * Page::Alignment;
  822. VerboseHeapTrace(_u("Removing page in bucket %d, freeSpace: %d\n"), page->currentBucket, freeSpace);
  823. this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
  824. #if DBG_DUMP
  825. this->freeObjectSize -= freeSpace;
  826. this->totalAllocationSize -= pageSize;
  827. #endif
  828. }
  829. template<typename TAlloc, typename TPreReservedAlloc>
  830. void Heap<TAlloc, TPreReservedAlloc>::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
  831. {
  832. // CodePageAllocators is locked in FreeAll
  833. Assert(inDtor);
  834. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, bucket, pageIter)
  835. {
  836. // Templatize this to remove branches/make code more compact?
  837. if (!freeOnlyEmptyPages || page.IsEmpty())
  838. {
  839. FreePage(&page);
  840. pageIter.RemoveCurrent(this->auxiliaryAllocator);
  841. }
  842. }
  843. NEXT_DLISTBASE_ENTRY_EDITING;
  844. }
  845. template<typename TAlloc, typename TPreReservedAlloc>
  846. void Heap<TAlloc, TPreReservedAlloc>::FreeBuckets(bool freeOnlyEmptyPages)
  847. {
  848. // CodePageAllocators is locked in FreeAll
  849. Assert(inDtor);
  850. for (int i = 0; i < NumBuckets; i++)
  851. {
  852. FreeBucket(&this->buckets[i], freeOnlyEmptyPages);
  853. FreeBucket(&this->fullPages[i], freeOnlyEmptyPages);
  854. }
  855. #if DBG_DUMP
  856. this->allocationsSinceLastCompact = 0;
  857. this->freesSinceLastCompact = 0;
  858. #endif
  859. }
  860. template<typename TAlloc, typename TPreReservedAlloc>
  861. bool Heap<TAlloc, TPreReservedAlloc>::UpdateFullPages()
  862. {
  863. bool updated = false;
  864. if (this->codePageAllocators->HasSecondaryAllocStateChanged(&lastSecondaryAllocStateChangedCount))
  865. {
  866. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  867. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  868. {
  869. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter)
  870. {
  871. Assert(page.inFullList);
  872. if (!this->ShouldBeInFullList(&page))
  873. {
  874. VerboseHeapTrace(_u("Recycling page 0x%p because XDATA was freed\n"), page.address);
  875. bucketIter.MoveCurrentTo(&(this->buckets[bucket]));
  876. page.inFullList = false;
  877. updated = true;
  878. }
  879. }
  880. NEXT_DLISTBASE_ENTRY_EDITING;
  881. }
  882. }
  883. return updated;
  884. }
  885. #if PDATA_ENABLED
  886. template<typename TAlloc, typename TPreReservedAlloc>
  887. void Heap<TAlloc, TPreReservedAlloc>::FreeXdata(XDataAllocation* xdata, void* segment)
  888. {
  889. Assert(!xdata->IsFreed());
  890. {
  891. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  892. this->codePageAllocators->ReleaseSecondary(*xdata, segment);
  893. xdata->Free();
  894. }
  895. }
  896. #endif
  897. #if DBG_DUMP
  898. template<typename TAlloc, typename TPreReservedAlloc>
  899. void Heap<TAlloc, TPreReservedAlloc>::DumpStats()
  900. {
  901. HeapTrace(_u("Total allocation size: %d\n"), totalAllocationSize);
  902. HeapTrace(_u("Total free size: %d\n"), freeObjectSize);
  903. HeapTrace(_u("Total allocations since last compact: %d\n"), allocationsSinceLastCompact);
  904. HeapTrace(_u("Total frees since last compact: %d\n"), freesSinceLastCompact);
  905. HeapTrace(_u("Large object count: %d\n"), this->largeObjectAllocations.Count());
  906. HeapTrace(_u("Buckets: \n"));
  907. for (int i = 0; i < BucketId::NumBuckets; i++)
  908. {
  909. printf("\t%d => %u [", (1 << (i + 7)), buckets[i].Count());
  910. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->buckets[i], bucketIter)
  911. {
  912. BVUnit usedBitVector = page.freeBitVector;
  913. usedBitVector.ComplimentAll(); // Get the actual used bit vector
  914. printf(" %u ", usedBitVector.Count() * Page::Alignment); // Print out the space used in this page
  915. }
  916. NEXT_DLISTBASE_ENTRY_EDITING
  917. printf("] {{%u}}\n", this->fullPages[i].Count());
  918. }
  919. }
  920. #endif
  921. #pragma endregion
  922. /**
  923. * Helper methods
  924. */
  925. #pragma region "Helpers"
  926. inline unsigned int log2(size_t number)
  927. {
  928. const unsigned int b[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
  929. const unsigned int S[] = {1, 2, 4, 8, 16};
  930. unsigned int result = 0;
  931. for (int i = 4; i >= 0; i--)
  932. {
  933. if (number & b[i])
  934. {
  935. number >>= S[i];
  936. result |= S[i];
  937. }
  938. }
  939. return result;
  940. }
  941. inline BucketId GetBucketForSize(size_t bytes)
  942. {
  943. if (bytes > Page::MaxAllocationSize)
  944. {
  945. return BucketId::LargeObjectList;
  946. }
  947. BucketId bucket = (BucketId) (log2(bytes) - 7);
  948. // < 8 => 0
  949. // 8 => 1
  950. // 9 => 2 ...
  951. Assert(bucket < BucketId::LargeObjectList);
  952. if (bucket < BucketId::SmallObjectList)
  953. {
  954. bucket = BucketId::SmallObjectList;
  955. }
  956. return bucket;
  957. }
  958. // Fills the specified buffer with "debug break" instruction encoding.
  959. // If there is any space left after that due to alignment, fill it with 0.
  960. // static
  961. void FillDebugBreak(_Out_writes_bytes_all_(byteCount) BYTE* buffer, _In_ size_t byteCount)
  962. {
  963. #if defined(_M_ARM)
  964. // On ARM there is breakpoint instruction (BKPT) which is 0xBEii, where ii (immediate 8) can be any value, 0xBE in particular.
  965. // While it could be easier to put 0xBE (same way as 0xCC on x86), BKPT is not recommended -- it may cause unexpected side effects.
  966. // So, use same sequence are C++ compiler uses (0xDEFE), this is recognized by debugger as __debugbreak.
  967. // This is 2 bytes, and in case there is a gap of 1 byte in the end, fill it with 0 (there is no 1 byte long THUMB instruction).
  968. CompileAssert(sizeof(char16) == 2);
  969. char16 pattern = 0xDEFE;
  970. BYTE * writeBuffer = buffer;
  971. wmemset((char16 *)writeBuffer, pattern, byteCount / 2);
  972. if (byteCount % 2)
  973. {
  974. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  975. *(writeBuffer + byteCount - 1) = 0; // Fill last remaining byte.
  976. }
  977. #elif defined(_M_ARM64)
  978. CompileAssert(sizeof(DWORD) == 4);
  979. DWORD pattern = 0xd4200000 | (0xf000 << 5);
  980. for (size_t i = 0; i < byteCount / 4; i++)
  981. {
  982. reinterpret_cast<DWORD*>(buffer)[i] = pattern;
  983. }
  984. for (size_t i = (byteCount / 4) * 4; i < byteCount; i++)
  985. {
  986. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  987. buffer[i] = 0; // Fill last remaining bytes.
  988. }
  989. #else
  990. // On Intel just use "INT 3" instruction which is 0xCC.
  991. memset(buffer, 0xCC, byteCount);
  992. #endif
  993. }
  994. template class Heap<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>;
  995. #if ENABLE_OOP_NATIVE_CODEGEN
  996. template class Heap<SectionAllocWrapper, PreReservedSectionAllocWrapper>;
  997. #endif
  998. #pragma endregion
  999. template<>
  1000. char *
  1001. CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>::AllocLocal(char * remoteAddr, size_t size, void * segment)
  1002. {
  1003. return remoteAddr;
  1004. }
  1005. template<>
  1006. void
  1007. CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>::FreeLocal(char * localAddr, void * segment)
  1008. {
  1009. // do nothing in case we are in proc
  1010. }
  1011. #if ENABLE_OOP_NATIVE_CODEGEN
  1012. template<>
  1013. char *
  1014. CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper>::AllocLocal(char * remoteAddr, size_t size, void * segment)
  1015. {
  1016. AutoCriticalSection autoLock(&this->cs);
  1017. Assert(segment);
  1018. LPVOID address = nullptr;
  1019. if (IsPreReservedSegment(segment))
  1020. {
  1021. address = ((SegmentBase<PreReservedSectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->AllocLocal(remoteAddr, size);
  1022. }
  1023. else
  1024. {
  1025. address = ((SegmentBase<SectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->AllocLocal(remoteAddr, size);
  1026. }
  1027. return (char*)address;
  1028. }
  1029. template<>
  1030. void
  1031. CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper>::FreeLocal(char * localAddr, void * segment)
  1032. {
  1033. AutoCriticalSection autoLock(&this->cs);
  1034. Assert(segment);
  1035. if (IsPreReservedSegment(segment))
  1036. {
  1037. ((SegmentBase<PreReservedSectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->FreeLocal(localAddr);
  1038. }
  1039. else
  1040. {
  1041. ((SegmentBase<SectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->FreeLocal(localAddr);
  1042. }
  1043. }
  1044. #endif
  1045. } // namespace CustomHeap
  1046. } // namespace Memory