CustomHeap.cpp 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if ENABLE_NATIVE_CODEGEN || DYNAMIC_INTERPRETER_THUNK
  7. #include "Memory/XDataAllocator.h"
  8. #if defined(_M_ARM)
  9. #include <wchar.h>
  10. #endif
  11. #include "CustomHeap.h"
  12. #if PDATA_ENABLED && defined(_WIN32)
  13. #include "Core/DelayLoadLibrary.h"
  14. #include <malloc.h>
  15. #endif
  16. namespace Memory
  17. {
  18. namespace CustomHeap
  19. {
  20. #pragma region "Constructor and Destructor"
  21. template<typename TAlloc, typename TPreReservedAlloc>
  22. Heap<TAlloc, TPreReservedAlloc>::Heap(ArenaAllocator * alloc, CodePageAllocators<TAlloc, TPreReservedAlloc> * codePageAllocators, HANDLE processHandle):
  23. auxiliaryAllocator(alloc),
  24. codePageAllocators(codePageAllocators),
  25. lastSecondaryAllocStateChangedCount(0),
  26. processHandle(processHandle)
  27. #if DBG_DUMP
  28. , freeObjectSize(0)
  29. , totalAllocationSize(0)
  30. , allocationsSinceLastCompact(0)
  31. , freesSinceLastCompact(0)
  32. #endif
  33. #if DBG
  34. , inDtor(false)
  35. #endif
  36. {
  37. for (int i = 0; i < NumBuckets; i++)
  38. {
  39. this->buckets[i].Reset();
  40. }
  41. }
  42. template<typename TAlloc, typename TPreReservedAlloc>
  43. Heap<TAlloc, TPreReservedAlloc>::~Heap()
  44. {
  45. #if DBG
  46. inDtor = true;
  47. #endif
  48. this->FreeAll();
  49. }
  50. #pragma endregion
  51. #pragma region "Public routines"
  52. template<typename TAlloc, typename TPreReservedAlloc>
  53. void Heap<TAlloc, TPreReservedAlloc>::FreeAll()
  54. {
  55. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  56. FreeBuckets(false);
  57. FreeLargeObjects();
  58. FreeDecommittedBuckets();
  59. FreeDecommittedLargeObjects();
  60. }
  61. template<typename TAlloc, typename TPreReservedAlloc>
  62. void Heap<TAlloc, TPreReservedAlloc>::Free(__in Allocation* object)
  63. {
  64. Assert(object != nullptr);
  65. if (object == nullptr)
  66. {
  67. return;
  68. }
  69. BucketId bucket = (BucketId) GetBucketForSize(object->size);
  70. if (bucket == BucketId::LargeObjectList)
  71. {
  72. #if PDATA_ENABLED
  73. if(!object->xdata.IsFreed())
  74. {
  75. FreeXdata(&object->xdata, object->largeObjectAllocation.segment);
  76. }
  77. #endif
  78. if (!object->largeObjectAllocation.isDecommitted)
  79. {
  80. FreeLargeObject(object);
  81. }
  82. return;
  83. }
  84. #if PDATA_ENABLED
  85. if(!object->xdata.IsFreed())
  86. {
  87. FreeXdata(&object->xdata, object->page->segment);
  88. }
  89. #endif
  90. if (!object->page->isDecommitted)
  91. {
  92. FreeAllocation(object);
  93. }
  94. }
  95. template<typename TAlloc, typename TPreReservedAlloc>
  96. void Heap<TAlloc, TPreReservedAlloc>::DecommitAll()
  97. {
  98. // This function doesn't really touch the page allocator data structure.
  99. // DecommitPages is merely a wrapper for VirtualFree
  100. // So no need to take the critical section to synchronize
  101. DListBase<Allocation>::EditingIterator i(&this->largeObjectAllocations);
  102. while (i.Next())
  103. {
  104. Allocation& allocation = i.Data();
  105. Assert(!allocation.largeObjectAllocation.isDecommitted);
  106. this->codePageAllocators->DecommitPages(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  107. i.MoveCurrentTo(&this->decommittedLargeObjects);
  108. allocation.largeObjectAllocation.isDecommitted = true;
  109. }
  110. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  111. {
  112. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter1)
  113. {
  114. Assert(page.inFullList);
  115. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  116. bucketIter1.MoveCurrentTo(&(this->decommittedPages));
  117. page.isDecommitted = true;
  118. }
  119. NEXT_DLISTBASE_ENTRY_EDITING;
  120. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->buckets[bucket]), bucketIter2)
  121. {
  122. Assert(!page.inFullList);
  123. this->codePageAllocators->DecommitPages(page.address, 1 /* pageCount */, page.segment);
  124. bucketIter2.MoveCurrentTo(&(this->decommittedPages));
  125. page.isDecommitted = true;
  126. }
  127. NEXT_DLISTBASE_ENTRY_EDITING;
  128. }
  129. }
  130. template<typename TAlloc, typename TPreReservedAlloc>
  131. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Page> const& bucket, __in void * address)
  132. {
  133. DListBase<Page>::Iterator i(&bucket);
  134. while (i.Next())
  135. {
  136. Page& page = i.Data();
  137. if (page.address <= address && address < page.address + AutoSystemInfo::PageSize)
  138. {
  139. return true;
  140. }
  141. }
  142. return false;
  143. }
  144. template<typename TAlloc, typename TPreReservedAlloc>
  145. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Page> const buckets[NumBuckets], __in void * address)
  146. {
  147. for (uint i = 0; i < NumBuckets; i++)
  148. {
  149. if (this->IsInHeap(buckets[i], address))
  150. {
  151. return true;
  152. }
  153. }
  154. return false;
  155. }
  156. template<typename TAlloc, typename TPreReservedAlloc>
  157. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(DListBase<Allocation> const& allocations, __in void *address)
  158. {
  159. DListBase<Allocation>::Iterator i(&allocations);
  160. while (i.Next())
  161. {
  162. Allocation& allocation = i.Data();
  163. if (allocation.address <= address && address < allocation.address + allocation.size)
  164. {
  165. return true;
  166. }
  167. }
  168. return false;
  169. }
  170. template<typename TAlloc, typename TPreReservedAlloc>
  171. bool Heap<TAlloc, TPreReservedAlloc>::IsInHeap(__in void* address)
  172. {
  173. return IsInHeap(buckets, address) || IsInHeap(fullPages, address) || IsInHeap(largeObjectAllocations, address);
  174. }
  175. template<typename TAlloc, typename TPreReservedAlloc>
  176. Page * Heap<TAlloc, TPreReservedAlloc>::GetExistingPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment)
  177. {
  178. // TODO: this can get a non-prereserved page even if you want one
  179. if (!this->buckets[bucket].Empty())
  180. {
  181. Assert(!this->buckets[bucket].Head().inFullList);
  182. return &this->buckets[bucket].Head();
  183. }
  184. return FindPageToSplit(bucket, canAllocInPreReservedHeapPageSegment);
  185. }
  186. /*
  187. * Algorithm:
  188. * - Find bucket
  189. * - Check bucket pages - if it has enough free space, allocate that chunk
  190. * - Check pages in bigger buckets - if that has enough space, split that page and allocate from that chunk
  191. * - Allocate new page
  192. */
  193. template<typename TAlloc, typename TPreReservedAlloc>
  194. Allocation* Heap<TAlloc, TPreReservedAlloc>::Alloc(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  195. {
  196. Assert(bytes > 0);
  197. Assert((codePageAllocators->AllocXdata() || pdataCount == 0) && (!codePageAllocators->AllocXdata() || pdataCount > 0));
  198. Assert(pdataCount > 0 || (pdataCount == 0 && xdataSize == 0));
  199. // Round up to power of two to allocate, and figure out which bucket to allocate in
  200. int _;
  201. size_t bytesToAllocate = PowerOf2Policy::GetSize(bytes, &_ /* modFunctionIndex */);
  202. BucketId bucket = (BucketId) GetBucketForSize(bytesToAllocate);
  203. if (bucket == BucketId::LargeObjectList)
  204. {
  205. return AllocLargeObject(bytes, pdataCount, xdataSize, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  206. }
  207. VerboseHeapTrace(_u("Bucket is %d\n"), bucket);
  208. VerboseHeapTrace(_u("Requested: %d bytes. Allocated: %d bytes\n"), bytes, bytesToAllocate);
  209. do
  210. {
  211. Page* page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  212. if (page == nullptr && UpdateFullPages())
  213. {
  214. page = GetExistingPage(bucket, canAllocInPreReservedHeapPageSegment);
  215. }
  216. if (page == nullptr)
  217. {
  218. page = AllocNewPage(bucket, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  219. }
  220. else if (!canAllocInPreReservedHeapPageSegment && isAnyJittedCode)
  221. {
  222. *isAllJITCodeInPreReservedRegion = false;
  223. }
  224. // Out of memory
  225. if (page == nullptr)
  226. {
  227. return nullptr;
  228. }
  229. #if defined(DBG)
  230. MEMORY_BASIC_INFORMATION memBasicInfo;
  231. size_t resultBytes = VirtualQueryEx(this->processHandle, page->address, &memBasicInfo, sizeof(memBasicInfo));
  232. if (resultBytes == 0)
  233. {
  234. MemoryOperationLastError::RecordLastError();
  235. }
  236. else
  237. {
  238. Assert(memBasicInfo.Protect == PAGE_EXECUTE_READ);
  239. }
  240. #endif
  241. Allocation* allocation = nullptr;
  242. if (AllocInPage(page, bytesToAllocate, pdataCount, xdataSize, &allocation))
  243. {
  244. return allocation;
  245. }
  246. } while (true);
  247. }
  248. template<typename TAlloc, typename TPreReservedAlloc>
  249. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocationWithExecuteReadWrite(Allocation *allocation, __in_opt char* addressInPage)
  250. {
  251. DWORD protectFlags = 0;
  252. if (AutoSystemInfo::Data.IsCFGEnabled())
  253. {
  254. protectFlags = PAGE_EXECUTE_RW_TARGETS_NO_UPDATE;
  255. }
  256. else
  257. {
  258. protectFlags = PAGE_EXECUTE_READWRITE;
  259. }
  260. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READ, addressInPage);
  261. }
  262. template<typename TAlloc, typename TPreReservedAlloc>
  263. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocationWithExecuteReadOnly(__in Allocation *allocation, __in_opt char* addressInPage)
  264. {
  265. DWORD protectFlags = 0;
  266. if (AutoSystemInfo::Data.IsCFGEnabled())
  267. {
  268. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  269. }
  270. else
  271. {
  272. protectFlags = PAGE_EXECUTE_READ;
  273. }
  274. return this->ProtectAllocation(allocation, protectFlags, PAGE_EXECUTE_READWRITE, addressInPage);
  275. }
  276. template<typename TAlloc, typename TPreReservedAlloc>
  277. BOOL Heap<TAlloc, TPreReservedAlloc>::ProtectAllocation(__in Allocation* allocation, DWORD dwVirtualProtectFlags, DWORD desiredOldProtectFlag, __in_opt char* addressInPage)
  278. {
  279. // Allocate at the page level so that our protections don't
  280. // transcend allocation page boundaries. Here, allocation->address is page
  281. // aligned if the object is a large object allocation. If it isn't, in the else
  282. // branch of the following if statement, we set it to the allocation's page's
  283. // address. This ensures that the address being protected is always page aligned
  284. Assert(allocation != nullptr);
  285. Assert(allocation->isAllocationUsed);
  286. Assert(addressInPage == nullptr || (addressInPage >= allocation->address && addressInPage < (allocation->address + allocation->size)));
  287. char* address = allocation->address;
  288. size_t pageCount;
  289. void * segment;
  290. if (allocation->IsLargeAllocation())
  291. {
  292. #if DBG_DUMP || defined(RECYCLER_TRACE)
  293. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  294. {
  295. Output::Print(_u("Protecting large allocation\n"));
  296. }
  297. #endif
  298. segment = allocation->largeObjectAllocation.segment;
  299. if (addressInPage != nullptr)
  300. {
  301. if (addressInPage >= allocation->address + AutoSystemInfo::PageSize)
  302. {
  303. size_t page = (addressInPage - allocation->address) / AutoSystemInfo::PageSize;
  304. address = allocation->address + (page * AutoSystemInfo::PageSize);
  305. }
  306. pageCount = 1;
  307. }
  308. else
  309. {
  310. pageCount = allocation->GetPageCount();
  311. }
  312. VerboseHeapTrace(_u("Protecting 0x%p with 0x%x\n"), address, dwVirtualProtectFlags);
  313. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  314. }
  315. else
  316. {
  317. #if DBG_DUMP || defined(RECYCLER_TRACE)
  318. if (Js::Configuration::Global.flags.IsEnabled(Js::TraceProtectPagesFlag))
  319. {
  320. Output::Print(_u("Protecting small allocation\n"));
  321. }
  322. #endif
  323. segment = allocation->page->segment;
  324. address = allocation->page->address;
  325. pageCount = 1;
  326. VerboseHeapTrace(_u("Protecting 0x%p with 0x%x\n"), address, dwVirtualProtectFlags);
  327. return this->codePageAllocators->ProtectPages(address, pageCount, segment, dwVirtualProtectFlags, desiredOldProtectFlag);
  328. }
  329. }
  330. #pragma endregion
  331. #pragma region "Large object methods"
  332. template<typename TAlloc, typename TPreReservedAlloc>
  333. Allocation* Heap<TAlloc, TPreReservedAlloc>::AllocLargeObject(size_t bytes, ushort pdataCount, ushort xdataSize, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  334. {
  335. size_t pages = GetNumPagesForSize(bytes);
  336. if (pages == 0)
  337. {
  338. return nullptr;
  339. }
  340. void * segment = nullptr;
  341. char* address = nullptr;
  342. #if PDATA_ENABLED
  343. XDataAllocation xdata;
  344. #endif
  345. {
  346. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  347. address = this->codePageAllocators->Alloc(&pages, &segment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  348. // Out of memory
  349. if (address == nullptr)
  350. {
  351. return nullptr;
  352. }
  353. char* localAddr = this->codePageAllocators->AllocLocal(address, pages*AutoSystemInfo::PageSize, segment);
  354. if (!localAddr)
  355. {
  356. return nullptr;
  357. }
  358. FillDebugBreak((BYTE*)localAddr, pages*AutoSystemInfo::PageSize);
  359. this->codePageAllocators->FreeLocal(localAddr, segment);
  360. if (this->processHandle == GetCurrentProcess())
  361. {
  362. DWORD protectFlags = 0;
  363. if (AutoSystemInfo::Data.IsCFGEnabled())
  364. {
  365. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  366. }
  367. else
  368. {
  369. protectFlags = PAGE_EXECUTE_READ;
  370. }
  371. this->codePageAllocators->ProtectPages(address, pages, segment, protectFlags /*dwVirtualProtectFlags*/, PAGE_READWRITE /*desiredOldProtectFlags*/);
  372. }
  373. #if PDATA_ENABLED
  374. if(pdataCount > 0)
  375. {
  376. if (!this->codePageAllocators->AllocSecondary(segment, (ULONG_PTR) address, bytes, pdataCount, xdataSize, &xdata))
  377. {
  378. this->codePageAllocators->Release(address, pages, segment);
  379. return nullptr;
  380. }
  381. }
  382. #endif
  383. }
  384. #if defined(DBG)
  385. MEMORY_BASIC_INFORMATION memBasicInfo;
  386. size_t resultBytes = VirtualQueryEx(this->processHandle, address, &memBasicInfo, sizeof(memBasicInfo));
  387. if (resultBytes == 0)
  388. {
  389. MemoryOperationLastError::RecordLastError();
  390. }
  391. else
  392. {
  393. Assert(memBasicInfo.Protect == PAGE_EXECUTE_READ);
  394. }
  395. #endif
  396. Allocation* allocation = this->largeObjectAllocations.PrependNode(this->auxiliaryAllocator);
  397. if (allocation == nullptr)
  398. {
  399. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  400. this->codePageAllocators->Release(address, pages, segment);
  401. #if PDATA_ENABLED
  402. if(pdataCount > 0)
  403. {
  404. this->codePageAllocators->ReleaseSecondary(xdata, segment);
  405. }
  406. #endif
  407. return nullptr;
  408. }
  409. allocation->address = address;
  410. allocation->largeObjectAllocation.segment = segment;
  411. allocation->largeObjectAllocation.isDecommitted = false;
  412. allocation->size = pages * AutoSystemInfo::PageSize;
  413. allocation->thunkAddress = 0;
  414. #if PDATA_ENABLED
  415. allocation->xdata = xdata;
  416. #endif
  417. return allocation;
  418. }
  419. template<typename TAlloc, typename TPreReservedAlloc>
  420. void Heap<TAlloc, TPreReservedAlloc>::FreeDecommittedLargeObjects()
  421. {
  422. // CodePageAllocators is locked in FreeAll
  423. Assert(inDtor);
  424. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->decommittedLargeObjects, largeObjectIter)
  425. {
  426. VerboseHeapTrace(_u("Decommitting large object at address 0x%p of size %u\n"), allocation.address, allocation.size);
  427. this->codePageAllocators->ReleaseDecommitted(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  428. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  429. }
  430. NEXT_DLISTBASE_ENTRY_EDITING;
  431. }
  432. //Called during Free (while shutting down)
  433. template<typename TAlloc, typename TPreReservedAlloc>
  434. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsurePageWriteable(Page* page)
  435. {
  436. return EnsurePageReadWrite<PAGE_READWRITE>(page);
  437. }
  438. // this get called when freeing the whole page
  439. template<typename TAlloc, typename TPreReservedAlloc>
  440. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsureAllocationWriteable(Allocation* allocation)
  441. {
  442. return EnsureAllocationReadWrite<PAGE_READWRITE>(allocation);
  443. }
  444. // this get called when only freeing a part in the page
  445. template<typename TAlloc, typename TPreReservedAlloc>
  446. DWORD Heap<TAlloc, TPreReservedAlloc>::EnsureAllocationExecuteWriteable(Allocation* allocation)
  447. {
  448. if (AutoSystemInfo::Data.IsCFGEnabled())
  449. {
  450. return EnsureAllocationReadWrite<PAGE_EXECUTE_RW_TARGETS_NO_UPDATE>(allocation);
  451. }
  452. else
  453. {
  454. return EnsureAllocationReadWrite<PAGE_EXECUTE_READWRITE>(allocation);
  455. }
  456. }
  457. template<typename TAlloc, typename TPreReservedAlloc>
  458. void Heap<TAlloc, TPreReservedAlloc>::FreeLargeObjects()
  459. {
  460. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  461. FOREACH_DLISTBASE_ENTRY_EDITING(Allocation, allocation, &this->largeObjectAllocations, largeObjectIter)
  462. {
  463. EnsureAllocationWriteable(&allocation);
  464. #if PDATA_ENABLED
  465. Assert(allocation.xdata.IsFreed());
  466. #endif
  467. this->codePageAllocators->Release(allocation.address, allocation.GetPageCount(), allocation.largeObjectAllocation.segment);
  468. largeObjectIter.RemoveCurrent(this->auxiliaryAllocator);
  469. }
  470. NEXT_DLISTBASE_ENTRY_EDITING;
  471. }
  472. template<typename TAlloc, typename TPreReservedAlloc>
  473. void Heap<TAlloc, TPreReservedAlloc>::FreeLargeObject(Allocation* allocation)
  474. {
  475. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  476. EnsureAllocationWriteable(allocation);
  477. #if PDATA_ENABLED
  478. Assert(allocation->xdata.IsFreed());
  479. #endif
  480. this->codePageAllocators->Release(allocation->address, allocation->GetPageCount(), allocation->largeObjectAllocation.segment);
  481. this->largeObjectAllocations.RemoveElement(this->auxiliaryAllocator, allocation);
  482. }
  483. #pragma endregion
  484. #pragma region "Page methods"
  485. template<typename TAlloc, typename TPreReservedAlloc>
  486. bool Heap<TAlloc, TPreReservedAlloc>::AllocInPage(Page* page, size_t bytes, ushort pdataCount, ushort xdataSize, Allocation ** allocationOut)
  487. {
  488. Allocation * allocation = AnewNoThrowStruct(this->auxiliaryAllocator, Allocation);
  489. if (allocation == nullptr)
  490. {
  491. return true;
  492. }
  493. Assert(Math::IsPow2((int32)bytes));
  494. uint length = GetChunkSizeForBytes(bytes);
  495. BVIndex index = GetFreeIndexForPage(page, bytes);
  496. if (index == BVInvalidIndex)
  497. {
  498. CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
  499. return false;
  500. }
  501. char* address = page->address + Page::Alignment * index;
  502. #if PDATA_ENABLED
  503. XDataAllocation xdata;
  504. if(pdataCount > 0)
  505. {
  506. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  507. if (this->ShouldBeInFullList(page))
  508. {
  509. Adelete(this->auxiliaryAllocator, allocation);
  510. // If we run out of XData space with the segment, move the page to the full page list, and return false to try the next page.
  511. BucketId bucket = page->currentBucket;
  512. VerboseHeapTrace(_u("Moving page from bucket %d to full list\n"), bucket);
  513. Assert(!page->inFullList);
  514. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  515. page->inFullList = true;
  516. return false;
  517. }
  518. if (!this->codePageAllocators->AllocSecondary(page->segment, (ULONG_PTR)address, bytes, pdataCount, xdataSize, &xdata))
  519. {
  520. Adelete(this->auxiliaryAllocator, allocation);
  521. return true;
  522. }
  523. }
  524. #endif
  525. #if DBG
  526. allocation->isAllocationUsed = false;
  527. allocation->isNotExecutableBecauseOOM = false;
  528. #endif
  529. allocation->page = page;
  530. allocation->size = bytes;
  531. allocation->address = address;
  532. allocation->thunkAddress = 0;
  533. #if DBG_DUMP
  534. this->allocationsSinceLastCompact += bytes;
  535. this->freeObjectSize -= bytes;
  536. #endif
  537. //Section of the Page should already be freed.
  538. if (!page->freeBitVector.TestRange(index, length))
  539. {
  540. CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
  541. return false;
  542. }
  543. //Section of the Page should already be freed.
  544. if (!page->freeBitVector.TestRange(index, length))
  545. {
  546. CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
  547. return false;
  548. }
  549. page->freeBitVector.ClearRange(index, length);
  550. VerboseHeapTrace(_u("ChunkSize: %d, Index: %d, Free bit vector in page: "), length, index);
  551. #if VERBOSE_HEAP
  552. page->freeBitVector.DumpWord();
  553. #endif
  554. VerboseHeapTrace(_u("\n"));
  555. if (this->ShouldBeInFullList(page))
  556. {
  557. BucketId bucket = page->currentBucket;
  558. VerboseHeapTrace(_u("Moving page from bucket %d to full list\n"), bucket);
  559. Assert(!page->inFullList);
  560. this->buckets[bucket].MoveElementTo(page, &this->fullPages[bucket]);
  561. page->inFullList = true;
  562. }
  563. #if PDATA_ENABLED
  564. allocation->xdata = xdata;
  565. #endif
  566. *allocationOut = allocation;
  567. return true;
  568. }
  569. template<typename TAlloc, typename TPreReservedAlloc>
  570. Page* Heap<TAlloc, TPreReservedAlloc>::AllocNewPage(BucketId bucket, bool canAllocInPreReservedHeapPageSegment, bool isAnyJittedCode, _Inout_ bool* isAllJITCodeInPreReservedRegion)
  571. {
  572. void* pageSegment = nullptr;
  573. char* address = nullptr;
  574. {
  575. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  576. address = this->codePageAllocators->AllocPages(1, &pageSegment, canAllocInPreReservedHeapPageSegment, isAnyJittedCode, isAllJITCodeInPreReservedRegion);
  577. if (address == nullptr)
  578. {
  579. return nullptr;
  580. }
  581. }
  582. char* localAddr = this->codePageAllocators->AllocLocal(address, AutoSystemInfo::PageSize, pageSegment);
  583. if (!localAddr)
  584. {
  585. return nullptr;
  586. }
  587. FillDebugBreak((BYTE*)localAddr, AutoSystemInfo::PageSize);
  588. this->codePageAllocators->FreeLocal(localAddr, pageSegment);
  589. DWORD protectFlags = 0;
  590. if (AutoSystemInfo::Data.IsCFGEnabled())
  591. {
  592. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  593. }
  594. else
  595. {
  596. protectFlags = PAGE_EXECUTE_READ;
  597. }
  598. //Change the protection of the page to Read-Only Execute, before adding it to the bucket list.
  599. this->codePageAllocators->ProtectPages(address, 1, pageSegment, protectFlags, PAGE_READWRITE);
  600. // Switch to allocating on a list of pages so we can do leak tracking later
  601. VerboseHeapTrace(_u("Allocing new page in bucket %d\n"), bucket);
  602. Page* page = this->buckets[bucket].PrependNode(this->auxiliaryAllocator, address, pageSegment, bucket);
  603. if (page == nullptr)
  604. {
  605. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  606. this->codePageAllocators->ReleasePages(address, 1, pageSegment);
  607. return nullptr;
  608. }
  609. #if DBG_DUMP
  610. this->totalAllocationSize += AutoSystemInfo::PageSize;
  611. this->freeObjectSize += AutoSystemInfo::PageSize;
  612. #endif
  613. return page;
  614. }
  615. template<typename TAlloc, typename TPreReservedAlloc>
  616. Page* Heap<TAlloc, TPreReservedAlloc>::AddPageToBucket(Page* page, BucketId bucket, bool wasFull)
  617. {
  618. Assert(bucket > BucketId::InvalidBucket && bucket < BucketId::NumBuckets);
  619. BucketId oldBucket = page->currentBucket;
  620. page->currentBucket = bucket;
  621. if (wasFull)
  622. {
  623. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  624. Assert(page->inFullList);
  625. this->fullPages[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  626. page->inFullList = false;
  627. }
  628. else
  629. {
  630. Assert(!page->inFullList);
  631. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  632. this->buckets[oldBucket].MoveElementTo(page, &this->buckets[bucket]);
  633. }
  634. return page;
  635. }
  636. /*
  637. * This method goes through the buckets greater than the target bucket
  638. * and if the higher bucket has a page with enough free space to allocate
  639. * something in the smaller bucket, then we bring the page to the smaller
  640. * bucket.
  641. * Note that if we allocate something from a page in the given bucket,
  642. * and then that page is split into a lower bucket, freeing is still not
  643. * a problem since the larger allocation is a multiple of the smaller one.
  644. * This gets more complicated if we can coalesce buckets. In that case,
  645. * we need to make sure that if a page was coalesced, and an allocation
  646. * pre-coalescing was freed, the page would need to get split upon free
  647. * to ensure correctness. For now, we've skipped implementing coalescing.
  648. * findPreReservedHeapPages - true, if we need to find pages only belonging to PreReservedHeapSegment
  649. */
  650. template<typename TAlloc, typename TPreReservedAlloc>
  651. Page* Heap<TAlloc, TPreReservedAlloc>::FindPageToSplit(BucketId targetBucket, bool findPreReservedHeapPages)
  652. {
  653. for (BucketId b = (BucketId)(targetBucket + 1); b < BucketId::NumBuckets; b = (BucketId) (b + 1))
  654. {
  655. #pragma prefast(suppress: __WARNING_UNCHECKED_LOWER_BOUND_FOR_ENUMINDEX, "targetBucket is always in range >= SmallObjectList, but an __in_range doesn't fix the warning.");
  656. FOREACH_DLISTBASE_ENTRY_EDITING(Page, pageInBucket, &this->buckets[b], bucketIter)
  657. {
  658. Assert(!pageInBucket.inFullList);
  659. if (findPreReservedHeapPages && !this->codePageAllocators->IsPreReservedSegment(pageInBucket.segment))
  660. {
  661. //Find only pages that are pre-reserved using preReservedHeapPageAllocator
  662. continue;
  663. }
  664. if (pageInBucket.CanAllocate(targetBucket))
  665. {
  666. Page* page = &pageInBucket;
  667. if (findPreReservedHeapPages)
  668. {
  669. VerboseHeapTrace(_u("PRE-RESERVE: Found page for splitting in Pre Reserved Segment\n"));
  670. }
  671. VerboseHeapTrace(_u("Found page to split. Moving from bucket %d to %d\n"), b, targetBucket);
  672. return AddPageToBucket(page, targetBucket);
  673. }
  674. }
  675. NEXT_DLISTBASE_ENTRY_EDITING;
  676. }
  677. return nullptr;
  678. }
  679. template<typename TAlloc, typename TPreReservedAlloc>
  680. BVIndex Heap<TAlloc, TPreReservedAlloc>::GetIndexInPage(__in Page* page, __in char* address)
  681. {
  682. Assert(page->address <= address && address < page->address + AutoSystemInfo::PageSize);
  683. return (BVIndex) ((address - page->address) / Page::Alignment);
  684. }
  685. #pragma endregion
  686. /**
  687. * Free List methods
  688. */
  689. #pragma region "Freeing methods"
  690. template<typename TAlloc, typename TPreReservedAlloc>
  691. bool Heap<TAlloc, TPreReservedAlloc>::FreeAllocation(Allocation* object)
  692. {
  693. Page* page = object->page;
  694. void* segment = page->segment;
  695. size_t pageSize = AutoSystemInfo::PageSize;
  696. unsigned int length = GetChunkSizeForBytes(object->size);
  697. BVIndex index = GetIndexInPage(page, object->address);
  698. uint freeBitsCount = page->freeBitVector.Count();
  699. // Make sure that the section under interest or the whole page has not already been freed
  700. if (page->IsEmpty() || page->freeBitVector.TestAnyInRange(index, length))
  701. {
  702. CustomHeap_BadPageState_unrecoverable_error((ULONG_PTR)this);
  703. return false;
  704. }
  705. if (page->inFullList)
  706. {
  707. VerboseHeapTrace(_u("Recycling page 0x%p because address 0x%p of size %d was freed\n"), page->address, object->address, object->size);
  708. // If the object being freed is equal to the page size, we're
  709. // going to remove it anyway so don't add it to a bucket
  710. if (object->size != pageSize)
  711. {
  712. AddPageToBucket(page, page->currentBucket, true);
  713. }
  714. else
  715. {
  716. EnsureAllocationWriteable(object);
  717. // Fill the old buffer with debug breaks
  718. char* localAddr = this->codePageAllocators->AllocLocal(object->address, object->size, page->segment);
  719. if (!localAddr)
  720. {
  721. MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
  722. return false;
  723. }
  724. FillDebugBreak((BYTE*)localAddr, object->size);
  725. this->codePageAllocators->FreeLocal(localAddr, page->segment);
  726. void* pageAddress = page->address;
  727. this->fullPages[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  728. // The page is not in any bucket- just update the stats, free the allocation
  729. // and dump the page- we don't need to update free object size since the object
  730. // size is equal to the page size so they cancel each other out
  731. #if DBG_DUMP
  732. this->totalAllocationSize -= pageSize;
  733. #endif
  734. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  735. {
  736. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  737. this->codePageAllocators->ReleasePages(pageAddress, 1, segment);
  738. }
  739. VerboseHeapTrace(_u("FastPath: freeing page-sized object directly\n"));
  740. return true;
  741. }
  742. }
  743. // If the page is about to become empty then we should not need
  744. // to set it to executable and we don't expect to restore the
  745. // previous protection settings.
  746. if (freeBitsCount == BVUnit::BitsPerWord - length)
  747. {
  748. EnsureAllocationWriteable(object);
  749. FreeAllocationHelper(object, index, length);
  750. Assert(page->IsEmpty());
  751. this->buckets[page->currentBucket].RemoveElement(this->auxiliaryAllocator, page);
  752. return false;
  753. }
  754. else
  755. {
  756. EnsureAllocationExecuteWriteable(object);
  757. FreeAllocationHelper(object, index, length);
  758. // after freeing part of the page, the page should be in PAGE_EXECUTE_READWRITE protection, and turning to PAGE_EXECUTE_READ (always with TARGETS_NO_UPDATE state)
  759. DWORD protectFlags = 0;
  760. if (AutoSystemInfo::Data.IsCFGEnabled())
  761. {
  762. protectFlags = PAGE_EXECUTE_RO_TARGETS_NO_UPDATE;
  763. }
  764. else
  765. {
  766. protectFlags = PAGE_EXECUTE_READ;
  767. }
  768. this->codePageAllocators->ProtectPages(page->address, 1, segment, protectFlags, PAGE_EXECUTE_READWRITE);
  769. return true;
  770. }
  771. }
  772. template<typename TAlloc, typename TPreReservedAlloc>
  773. void Heap<TAlloc, TPreReservedAlloc>::FreeAllocationHelper(Allocation* object, BVIndex index, uint length)
  774. {
  775. Page* page = object->page;
  776. // Fill the old buffer with debug breaks
  777. char* localAddr = this->codePageAllocators->AllocLocal(object->address, object->size, page->segment);
  778. if (localAddr)
  779. {
  780. FillDebugBreak((BYTE*)localAddr, object->size);
  781. this->codePageAllocators->FreeLocal(localAddr, page->segment);
  782. }
  783. else
  784. {
  785. MemoryOperationLastError::RecordError(JSERR_FatalMemoryExhaustion);
  786. return;
  787. }
  788. VerboseHeapTrace(_u("Setting %d bits starting at bit %d, Free bit vector in page was "), length, index);
  789. #if VERBOSE_HEAP
  790. page->freeBitVector.DumpWord();
  791. #endif
  792. VerboseHeapTrace(_u("\n"));
  793. page->freeBitVector.SetRange(index, length);
  794. VerboseHeapTrace(_u("Free bit vector in page: "), length, index);
  795. #if VERBOSE_HEAP
  796. page->freeBitVector.DumpWord();
  797. #endif
  798. VerboseHeapTrace(_u("\n"));
  799. #if DBG_DUMP
  800. this->freeObjectSize += object->size;
  801. this->freesSinceLastCompact += object->size;
  802. #endif
  803. this->auxiliaryAllocator->Free(object, sizeof(Allocation));
  804. }
  805. template<typename TAlloc, typename TPreReservedAlloc>
  806. void Heap<TAlloc, TPreReservedAlloc>::FreeDecommittedBuckets()
  807. {
  808. // CodePageAllocators is locked in FreeAll
  809. Assert(inDtor);
  810. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->decommittedPages, iter)
  811. {
  812. this->codePageAllocators->TrackDecommittedPages(page.address, 1, page.segment);
  813. iter.RemoveCurrent(this->auxiliaryAllocator);
  814. }
  815. NEXT_DLISTBASE_ENTRY_EDITING;
  816. }
  817. template<typename TAlloc, typename TPreReservedAlloc>
  818. void Heap<TAlloc, TPreReservedAlloc>::FreePage(Page* page)
  819. {
  820. // CodePageAllocators is locked in FreeAll
  821. Assert(inDtor);
  822. DWORD pageSize = AutoSystemInfo::PageSize;
  823. EnsurePageWriteable(page);
  824. size_t freeSpace = page->freeBitVector.Count() * Page::Alignment;
  825. VerboseHeapTrace(_u("Removing page in bucket %d, freeSpace: %d\n"), page->currentBucket, freeSpace);
  826. this->codePageAllocators->ReleasePages(page->address, 1, page->segment);
  827. #if DBG_DUMP
  828. this->freeObjectSize -= freeSpace;
  829. this->totalAllocationSize -= pageSize;
  830. #endif
  831. }
  832. template<typename TAlloc, typename TPreReservedAlloc>
  833. void Heap<TAlloc, TPreReservedAlloc>::FreeBucket(DListBase<Page>* bucket, bool freeOnlyEmptyPages)
  834. {
  835. // CodePageAllocators is locked in FreeAll
  836. Assert(inDtor);
  837. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, bucket, pageIter)
  838. {
  839. // Templatize this to remove branches/make code more compact?
  840. if (!freeOnlyEmptyPages || page.IsEmpty())
  841. {
  842. FreePage(&page);
  843. pageIter.RemoveCurrent(this->auxiliaryAllocator);
  844. }
  845. }
  846. NEXT_DLISTBASE_ENTRY_EDITING;
  847. }
  848. template<typename TAlloc, typename TPreReservedAlloc>
  849. void Heap<TAlloc, TPreReservedAlloc>::FreeBuckets(bool freeOnlyEmptyPages)
  850. {
  851. // CodePageAllocators is locked in FreeAll
  852. Assert(inDtor);
  853. for (int i = 0; i < NumBuckets; i++)
  854. {
  855. FreeBucket(&this->buckets[i], freeOnlyEmptyPages);
  856. FreeBucket(&this->fullPages[i], freeOnlyEmptyPages);
  857. }
  858. #if DBG_DUMP
  859. this->allocationsSinceLastCompact = 0;
  860. this->freesSinceLastCompact = 0;
  861. #endif
  862. }
  863. template<typename TAlloc, typename TPreReservedAlloc>
  864. bool Heap<TAlloc, TPreReservedAlloc>::UpdateFullPages()
  865. {
  866. bool updated = false;
  867. if (this->codePageAllocators->HasSecondaryAllocStateChanged(&lastSecondaryAllocStateChangedCount))
  868. {
  869. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  870. for (int bucket = 0; bucket < BucketId::NumBuckets; bucket++)
  871. {
  872. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &(this->fullPages[bucket]), bucketIter)
  873. {
  874. Assert(page.inFullList);
  875. if (!this->ShouldBeInFullList(&page))
  876. {
  877. VerboseHeapTrace(_u("Recycling page 0x%p because XDATA was freed\n"), page.address);
  878. bucketIter.MoveCurrentTo(&(this->buckets[bucket]));
  879. page.inFullList = false;
  880. updated = true;
  881. }
  882. }
  883. NEXT_DLISTBASE_ENTRY_EDITING;
  884. }
  885. }
  886. return updated;
  887. }
  888. #if PDATA_ENABLED
  889. template<typename TAlloc, typename TPreReservedAlloc>
  890. void Heap<TAlloc, TPreReservedAlloc>::FreeXdata(XDataAllocation* xdata, void* segment)
  891. {
  892. Assert(!xdata->IsFreed());
  893. {
  894. AutoCriticalSection autoLock(&this->codePageAllocators->cs);
  895. this->codePageAllocators->ReleaseSecondary(*xdata, segment);
  896. xdata->Free();
  897. }
  898. }
  899. #endif
  900. #if DBG_DUMP
  901. template<typename TAlloc, typename TPreReservedAlloc>
  902. void Heap<TAlloc, TPreReservedAlloc>::DumpStats()
  903. {
  904. HeapTrace(_u("Total allocation size: %d\n"), totalAllocationSize);
  905. HeapTrace(_u("Total free size: %d\n"), freeObjectSize);
  906. HeapTrace(_u("Total allocations since last compact: %d\n"), allocationsSinceLastCompact);
  907. HeapTrace(_u("Total frees since last compact: %d\n"), freesSinceLastCompact);
  908. HeapTrace(_u("Large object count: %d\n"), this->largeObjectAllocations.Count());
  909. HeapTrace(_u("Buckets: \n"));
  910. for (int i = 0; i < BucketId::NumBuckets; i++)
  911. {
  912. Output::Print(_u("\t%d => %u ["), (1 << (i + 7)), buckets[i].Count());
  913. FOREACH_DLISTBASE_ENTRY_EDITING(Page, page, &this->buckets[i], bucketIter)
  914. {
  915. BVUnit usedBitVector = page.freeBitVector;
  916. usedBitVector.ComplimentAll(); // Get the actual used bit vector
  917. Output::Print(_u(" %u "), usedBitVector.Count() * Page::Alignment); // Print out the space used in this page
  918. }
  919. NEXT_DLISTBASE_ENTRY_EDITING
  920. Output::Print(_u("] {{%u}}\n"), this->fullPages[i].Count());
  921. }
  922. }
  923. #endif
  924. #pragma endregion
  925. /**
  926. * Helper methods
  927. */
  928. #pragma region "Helpers"
  929. inline unsigned int log2(size_t number)
  930. {
  931. const unsigned int b[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
  932. const unsigned int S[] = {1, 2, 4, 8, 16};
  933. unsigned int result = 0;
  934. for (int i = 4; i >= 0; i--)
  935. {
  936. if (number & b[i])
  937. {
  938. number >>= S[i];
  939. result |= S[i];
  940. }
  941. }
  942. return result;
  943. }
  944. inline BucketId GetBucketForSize(size_t bytes)
  945. {
  946. if (bytes > Page::MaxAllocationSize)
  947. {
  948. return BucketId::LargeObjectList;
  949. }
  950. BucketId bucket = (BucketId) (log2(bytes / Page::sizePerBit));
  951. Assert(bucket < BucketId::LargeObjectList);
  952. Assert(bucket >= BucketId::SmallObjectList);
  953. return bucket;
  954. }
  955. // Fills the specified buffer with "debug break" instruction encoding.
  956. // If there is any space left after that due to alignment, fill it with 0.
  957. // static
  958. void FillDebugBreak(_Out_writes_bytes_all_(byteCount) BYTE* buffer, _In_ size_t byteCount)
  959. {
  960. #if defined(_M_ARM)
  961. // On ARM there is breakpoint instruction (BKPT) which is 0xBEii, where ii (immediate 8) can be any value, 0xBE in particular.
  962. // While it could be easier to put 0xBE (same way as 0xCC on x86), BKPT is not recommended -- it may cause unexpected side effects.
  963. // So, use same sequence are C++ compiler uses (0xDEFE), this is recognized by debugger as __debugbreak.
  964. // This is 2 bytes, and in case there is a gap of 1 byte in the end, fill it with 0 (there is no 1 byte long THUMB instruction).
  965. CompileAssert(sizeof(char16) == 2);
  966. char16 pattern = 0xDEFE;
  967. BYTE * writeBuffer = buffer;
  968. wmemset((char16 *)writeBuffer, pattern, byteCount / 2);
  969. if (byteCount % 2)
  970. {
  971. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  972. *(writeBuffer + byteCount - 1) = 0; // Fill last remaining byte.
  973. }
  974. #elif defined(_M_ARM64)
  975. CompileAssert(sizeof(DWORD) == 4);
  976. DWORD pattern = 0xd4200000 | (0xf000 << 5);
  977. for (size_t i = 0; i < byteCount / 4; i++)
  978. {
  979. reinterpret_cast<DWORD*>(buffer)[i] = pattern;
  980. }
  981. for (size_t i = (byteCount / 4) * 4; i < byteCount; i++)
  982. {
  983. // Note: this is valid scenario: in JIT mode, we may not be 2-byte-aligned in the end of unwind info.
  984. buffer[i] = 0; // Fill last remaining bytes.
  985. }
  986. #else
  987. // On Intel just use "INT 3" instruction which is 0xCC.
  988. memset(buffer, 0xCC, byteCount);
  989. #endif
  990. }
  991. template class Heap<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>;
  992. #if ENABLE_OOP_NATIVE_CODEGEN
  993. template class Heap<SectionAllocWrapper, PreReservedSectionAllocWrapper>;
  994. #endif
  995. #pragma endregion
  996. template<>
  997. char *
  998. CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>::AllocLocal(char * remoteAddr, size_t size, void * segment)
  999. {
  1000. return remoteAddr;
  1001. }
  1002. template<>
  1003. void
  1004. CodePageAllocators<VirtualAllocWrapper, PreReservedVirtualAllocWrapper>::FreeLocal(char * localAddr, void * segment)
  1005. {
  1006. // do nothing in case we are in proc
  1007. }
  1008. #if ENABLE_OOP_NATIVE_CODEGEN
  1009. template<>
  1010. char *
  1011. CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper>::AllocLocal(char * remoteAddr, size_t size, void * segment)
  1012. {
  1013. AutoCriticalSection autoLock(&this->cs);
  1014. Assert(segment);
  1015. LPVOID address = nullptr;
  1016. if (IsPreReservedSegment(segment))
  1017. {
  1018. address = ((SegmentBase<PreReservedSectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->AllocLocal(remoteAddr, size);
  1019. }
  1020. else
  1021. {
  1022. address = ((SegmentBase<SectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->AllocLocal(remoteAddr, size);
  1023. }
  1024. return (char*)address;
  1025. }
  1026. template<>
  1027. void
  1028. CodePageAllocators<SectionAllocWrapper, PreReservedSectionAllocWrapper>::FreeLocal(char * localAddr, void * segment)
  1029. {
  1030. AutoCriticalSection autoLock(&this->cs);
  1031. Assert(segment);
  1032. if (IsPreReservedSegment(segment))
  1033. {
  1034. ((SegmentBase<PreReservedSectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->FreeLocal(localAddr);
  1035. }
  1036. else
  1037. {
  1038. ((SegmentBase<SectionAllocWrapper>*)segment)->GetAllocator()->GetVirtualAllocator()->FreeLocal(localAddr);
  1039. }
  1040. }
  1041. #endif
  1042. } // namespace CustomHeap
  1043. } // namespace Memory
  1044. #endif // ENABLE_NATIVE_CODEGEN || DYNAMIC_INTERPRETER_THUNK