SectionAllocWrapper.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if _WIN32
  7. #if ENABLE_OOP_NATIVE_CODEGEN
  8. #include "../Core/DelayLoadLibrary.h"
  9. #ifdef NTDDI_WIN10_RS2
  10. #if (NTDDI_VERSION >= NTDDI_WIN10_RS2)
  11. #define USEFILEMAP2 1
  12. #define USEVIRTUALUNLOCKEX 1
  13. #endif
  14. #endif
  15. namespace Memory
  16. {
  17. void UnlockMemory(HANDLE process, LPVOID address, SIZE_T size)
  18. {
  19. #if USEVIRTUALUNLOCKEX
  20. VirtualUnlockEx(process, address, size);
  21. #else
  22. NtdllLibrary::Instance->UnlockVirtualMemory(process, &address, &size, NtdllLibrary::MAP_PROCESS);
  23. #endif
  24. }
  25. void CloseSectionHandle(HANDLE handle)
  26. {
  27. #if USEFILEMAP2
  28. CloseHandle(handle);
  29. #else
  30. NtdllLibrary::Instance->Close(handle);
  31. #endif
  32. }
  33. HANDLE CreateSection(size_t sectionSize, bool commit)
  34. {
  35. const ULONG allocAttributes = commit ? SEC_COMMIT : SEC_RESERVE;
  36. #if USEFILEMAP2
  37. #if TARGET_32
  38. DWORD sizeHigh = 0;
  39. #elif TARGET_64
  40. DWORD sizeHigh = (DWORD)(sectionSize >> 32);
  41. #endif
  42. HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE | allocAttributes, sizeHigh, (DWORD)sectionSize, NULL);
  43. if (handle == nullptr)
  44. {
  45. return nullptr;
  46. }
  47. #else
  48. NtdllLibrary::OBJECT_ATTRIBUTES attr;
  49. NtdllLibrary::Instance->InitializeObjectAttributes(&attr, NULL, NtdllLibrary::OBJ_KERNEL_HANDLE, NULL, NULL);
  50. LARGE_INTEGER size = { 0 };
  51. #if TARGET_32
  52. size.LowPart = sectionSize;
  53. #elif TARGET_64
  54. size.QuadPart = sectionSize;
  55. #endif
  56. HANDLE handle = nullptr;
  57. int status = NtdllLibrary::Instance->CreateSection(&handle, SECTION_MAP_READ | SECTION_MAP_WRITE | SECTION_QUERY | SECTION_MAP_EXECUTE, &attr, &size, PAGE_EXECUTE_READWRITE, allocAttributes, NULL);
  58. if (status != 0)
  59. {
  60. return nullptr;
  61. }
  62. #endif
  63. return handle;
  64. }
  65. void UnmapView(HANDLE process, PVOID address)
  66. {
  67. #if USEFILEMAP2
  68. UnmapViewOfFile2(process, address, 0);
  69. #else
  70. NtdllLibrary::Instance->UnmapViewOfSection(process, address);
  71. #endif
  72. }
  73. PVOID MapView(HANDLE process, HANDLE sectionHandle, size_t size, size_t offset, bool local)
  74. {
  75. PVOID address = nullptr;
  76. DWORD flags = 0;
  77. if (local)
  78. {
  79. if (process != GetCurrentProcess())
  80. {
  81. return nullptr;
  82. }
  83. flags = PAGE_READWRITE;
  84. }
  85. else
  86. {
  87. if (process == GetCurrentProcess())
  88. {
  89. return nullptr;
  90. }
  91. flags = AutoSystemInfo::Data.IsCFGEnabled() ? PAGE_EXECUTE_RO_TARGETS_INVALID : PAGE_EXECUTE_READ;
  92. }
  93. #if USEFILEMAP2
  94. address = MapViewOfFile2(sectionHandle, process, offset, nullptr, size, NULL, flags);
  95. if (local && address != nullptr)
  96. {
  97. address = VirtualAlloc(address, size, MEM_COMMIT, flags);
  98. }
  99. #else
  100. LARGE_INTEGER mapOffset = { 0 };
  101. #if TARGET_32
  102. mapOffset.LowPart = offset;
  103. #elif TARGET_64
  104. mapOffset.QuadPart = offset;
  105. #else
  106. CompileAssert(UNREACHED);
  107. #endif
  108. SIZE_T viewSize = size;
  109. int status = NtdllLibrary::Instance->MapViewOfSection(sectionHandle, process, &address, NULL, viewSize, &mapOffset, &viewSize, NtdllLibrary::ViewUnmap, NULL, flags);
  110. if (status != 0)
  111. {
  112. return nullptr;
  113. }
  114. #endif
  115. return address;
  116. }
  117. #if defined(TARGET_64)
  118. SectionMap32::SectionMap32(__in char * startAddress) :
  119. startAddress(startAddress),
  120. #else
  121. SectionMap32::SectionMap32() :
  122. #endif
  123. count(0)
  124. {
  125. memset(map, 0, sizeof(map));
  126. #if defined(TARGET_64)
  127. Assert(((size_t)startAddress) % TotalSize == 0);
  128. #endif
  129. }
  130. SectionMap32::~SectionMap32()
  131. {
  132. for (uint i = 0; i < _countof(map); i++)
  133. {
  134. L2MapChunk * chunk = map[i];
  135. if (chunk)
  136. {
  137. HeapDelete(chunk);
  138. }
  139. }
  140. }
  141. SectionInfo *
  142. SectionMap32::GetSection(void* address)
  143. {
  144. uint id1 = GetLevel1Id(address);
  145. L2MapChunk * l2map = map[id1];
  146. if (l2map == nullptr)
  147. {
  148. return nullptr;
  149. }
  150. return l2map->Get(address);
  151. }
  152. void
  153. SectionMap32::Cleanup()
  154. {
  155. for (uint id1 = 0; id1 < L1Count; id1++)
  156. {
  157. L2MapChunk * l2map = map[id1];
  158. if (l2map != nullptr && l2map->IsEmpty())
  159. {
  160. map[id1] = nullptr;
  161. HeapDelete(l2map);
  162. Assert(count > 0);
  163. count--;
  164. }
  165. }
  166. }
  167. bool
  168. SectionMap32::EnsureSection(void * address, uint pageCount)
  169. {
  170. uint id1 = GetLevel1Id(address);
  171. uint id2 = GetLevel2Id(address);
  172. uint currentPageCount = min(pageCount, L2Count - id2);
  173. while (true)
  174. {
  175. if (map[id1] == nullptr)
  176. {
  177. L2MapChunk * newChunk = HeapNewNoThrowZ(L2MapChunk);
  178. if (newChunk == nullptr)
  179. {
  180. // remove any previously allocated L2 maps
  181. Cleanup();
  182. return false;
  183. }
  184. map[id1] = newChunk;
  185. count++;
  186. }
  187. pageCount -= currentPageCount;
  188. if (pageCount == 0)
  189. {
  190. break;
  191. }
  192. id2 = 0;
  193. id1++;
  194. currentPageCount = min(pageCount, L2Count);
  195. }
  196. return true;
  197. }
  198. void
  199. SectionMap32::ClearSection(void * address, uint pageCount)
  200. {
  201. uint id1 = GetLevel1Id(address);
  202. uint id2 = GetLevel2Id(address);
  203. uint currentPageCount = min(pageCount, L2Count - id2);
  204. while (true)
  205. {
  206. Assert(map[id1] != nullptr);
  207. map[id1]->Clear(id2, currentPageCount);
  208. pageCount -= currentPageCount;
  209. if (pageCount == 0)
  210. {
  211. return;
  212. }
  213. id2 = 0;
  214. id1++;
  215. currentPageCount = min(pageCount, L2Count);
  216. }
  217. }
  218. void
  219. SectionMap32::SetSectionNoCheck(void * address, uint pageCount, SectionInfo * section)
  220. {
  221. uint id1 = GetLevel1Id(address);
  222. uint id2 = GetLevel2Id(address);
  223. uint currentPageCount = min(pageCount, L2Count - id2);
  224. while (true)
  225. {
  226. Assert(map[id1] != nullptr);
  227. map[id1]->Set(id2, currentPageCount, section);
  228. pageCount -= currentPageCount;
  229. if (pageCount == 0)
  230. {
  231. return;
  232. }
  233. id2 = 0;
  234. id1++;
  235. currentPageCount = min(pageCount, L2Count);
  236. }
  237. }
  238. bool
  239. SectionMap32::SetSection(void * address, uint pageCount, SectionInfo * section)
  240. {
  241. if (!EnsureSection(address, pageCount))
  242. {
  243. return false;
  244. }
  245. SetSectionNoCheck(address, pageCount, section);
  246. return true;
  247. }
  248. SectionMap32::L2MapChunk::~L2MapChunk()
  249. {
  250. for (uint i = 0; i < L2Count; ++i)
  251. {
  252. if (map[i] != nullptr)
  253. {
  254. // in case runtime process has abnormal termination, map may not be empty
  255. CloseSectionHandle(map[i]->handle);
  256. HeapDelete(map[i]);
  257. }
  258. }
  259. }
  260. bool
  261. SectionMap32::L2MapChunk::IsEmpty() const
  262. {
  263. for (uint i = 0; i < L2Count; i++)
  264. {
  265. if (this->map[i] != nullptr)
  266. {
  267. return false;
  268. }
  269. }
  270. return true;
  271. }
  272. void
  273. SectionMap32::L2MapChunk::Clear(uint id2, uint pageCount)
  274. {
  275. uint id2End = id2 + pageCount;
  276. Assert(id2 < L2Count);
  277. Assert(id2End <= L2Count);
  278. for (uint i = id2; i < id2End; i++)
  279. {
  280. __analysis_assume(i < L2Count);
  281. Assert(map[i] != nullptr);
  282. map[i] = nullptr;
  283. }
  284. }
  285. void
  286. SectionMap32::L2MapChunk::Set(uint id2, uint pageCount, SectionInfo * section)
  287. {
  288. uint id2End = id2 + pageCount;
  289. Assert(id2 < L2Count);
  290. Assert(id2End <= L2Count);
  291. for (uint i = id2; i < id2End; i++)
  292. {
  293. __analysis_assume(i < L2Count);
  294. Assert(map[i] == nullptr);
  295. map[i] = section;
  296. }
  297. }
  298. SectionInfo *
  299. SectionMap32::L2MapChunk::Get(void * address)
  300. {
  301. uint id2 = GetLevel2Id(address);
  302. Assert(id2 < L2Count);
  303. __analysis_assume(id2 < L2Count);
  304. return map[id2];
  305. }
  306. #if TARGET_64
  307. SectionMap64::SectionMap64() : list(nullptr)
  308. {
  309. }
  310. SectionMap64::~SectionMap64()
  311. {
  312. Node * node = list;
  313. list = nullptr;
  314. while (node != nullptr)
  315. {
  316. Node * next = node->next;
  317. HeapDelete(node);
  318. node = next;
  319. }
  320. }
  321. bool
  322. SectionMap64::EnsureSection(void * address, size_t pageCount)
  323. {
  324. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  325. size_t pageCountLeft = pageCount;
  326. uint nodePages = PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  327. if (pageCountLeft < nodePages)
  328. {
  329. nodePages = (uint)pageCountLeft;
  330. }
  331. do
  332. {
  333. Node * node = FindOrInsertNode(address);
  334. if (node == nullptr || !node->map.EnsureSection(address, nodePages))
  335. {
  336. return false;
  337. }
  338. pageCountLeft -= nodePages;
  339. if (pageCountLeft == 0)
  340. {
  341. return true;
  342. }
  343. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  344. nodePages = PagesPer4GB;
  345. if (pageCountLeft < PagesPer4GB)
  346. {
  347. nodePages = (uint)pageCountLeft;
  348. }
  349. } while (true);
  350. }
  351. void
  352. SectionMap64::SetSectionNoCheck(void * address, size_t pageCount, SectionInfo * section)
  353. {
  354. ForEachNodeInAddressRange(address, pageCount, [&](Node * node, void * address, uint nodePages)
  355. {
  356. Assert(node != nullptr);
  357. node->map.SetSectionNoCheck(address, nodePages, section);
  358. });
  359. }
  360. bool
  361. SectionMap64::SetSection(void * address, uint pageCount, SectionInfo * section)
  362. {
  363. if (!EnsureSection(address, pageCount))
  364. {
  365. return false;
  366. }
  367. SetSectionNoCheck(address, pageCount, section);
  368. return true;
  369. }
  370. SectionInfo *
  371. SectionMap64::GetSection(void * address)
  372. {
  373. Node * node = FindNode(address);
  374. if (node == nullptr)
  375. {
  376. return nullptr;
  377. }
  378. return node->map.GetSection(address);
  379. }
  380. void
  381. SectionMap64::ClearSection(void * address, uint pageCount)
  382. {
  383. ForEachNodeInAddressRange(address, pageCount, [&](Node* node, void* address, uint nodePages)
  384. {
  385. Assert(node != nullptr);
  386. node->map.ClearSection(address, nodePages);
  387. });
  388. }
  389. template <class Fn>
  390. void SectionMap64::ForEachNodeInAddressRange(void * address, size_t pageCount, Fn fn)
  391. {
  392. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  393. uint nodePages = SectionMap64::PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  394. if (pageCount < nodePages)
  395. {
  396. nodePages = (uint)pageCount;
  397. }
  398. do
  399. {
  400. Node * node = FindNode(address);
  401. fn(node, address, nodePages);
  402. pageCount -= nodePages;
  403. if (pageCount == 0)
  404. {
  405. break;
  406. }
  407. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  408. nodePages = SectionMap64::PagesPer4GB;
  409. if (pageCount < SectionMap64::PagesPer4GB)
  410. {
  411. nodePages = (uint)pageCount;
  412. }
  413. } while (true);
  414. }
  415. SectionMap64::Node *
  416. SectionMap64::FindOrInsertNode(void * address)
  417. {
  418. Node * node = FindNode(address);
  419. if (node == nullptr)
  420. {
  421. node = HeapNewNoThrowZ(Node, GetNodeStartAddress(address));
  422. if (node != nullptr)
  423. {
  424. node->nodeIndex = GetNodeIndex(address);
  425. node->next = list;
  426. list = node;
  427. }
  428. }
  429. return node;
  430. }
  431. SectionMap64::Node *
  432. SectionMap64::FindNode(void * address) const
  433. {
  434. uint index = GetNodeIndex(address);
  435. Node * node = list;
  436. while (node != nullptr)
  437. {
  438. if (node->nodeIndex == index)
  439. {
  440. return node;
  441. }
  442. node = node->next;
  443. }
  444. return nullptr;
  445. }
  446. #endif //TARGET_64
  447. static const uint SectionAlignment = 65536;
  448. PVOID
  449. AllocLocalView(HANDLE sectionHandle, LPVOID remoteBaseAddr, LPVOID remoteRequestAddress, size_t requestSize)
  450. {
  451. const size_t offset = (uintptr_t)remoteRequestAddress - (uintptr_t)remoteBaseAddr;
  452. const size_t offsetAlignment = offset % SectionAlignment;
  453. const size_t alignedOffset = offset - offsetAlignment;
  454. const size_t viewSize = requestSize + offsetAlignment;
  455. PVOID address = MapView(GetCurrentProcess(), sectionHandle, viewSize, alignedOffset, true);
  456. if (address == nullptr)
  457. {
  458. return nullptr;
  459. }
  460. return (PVOID)((uintptr_t)address + offsetAlignment);
  461. }
  462. BOOL
  463. FreeLocalView(LPVOID lpAddress)
  464. {
  465. const size_t alignment = (uintptr_t)lpAddress % SectionAlignment;
  466. UnmapView(GetCurrentProcess(), (LPVOID)((uintptr_t)lpAddress - alignment));
  467. return TRUE;
  468. }
  469. SectionAllocWrapper::SectionAllocWrapper(HANDLE process) :
  470. process(process)
  471. {
  472. Assert(process != GetCurrentProcess()); // only use sections when OOP
  473. }
  474. LPVOID
  475. SectionAllocWrapper::AllocPages(LPVOID requestAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  476. {
  477. if (pageCount > AutoSystemInfo::MaxPageCount)
  478. {
  479. return nullptr;
  480. }
  481. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  482. Assert(isCustomHeapAllocation);
  483. LPVOID address = nullptr;
  484. #if defined(ENABLE_JIT_CLAMP)
  485. // REVIEW: is this needed?
  486. AutoEnableDynamicCodeGen enableCodeGen(true);
  487. #endif
  488. HANDLE sectionHandle = nullptr;
  489. SectionInfo * section = nullptr;
  490. // for new allocations, create new section and fully map it (reserved) into runtime process
  491. if (requestAddress == nullptr)
  492. {
  493. sectionHandle = CreateSection(dwSize, ((allocationType & MEM_COMMIT) == MEM_COMMIT));
  494. if (sectionHandle == nullptr)
  495. {
  496. goto FailureCleanup;
  497. }
  498. address = MapView(this->process, sectionHandle, 0, 0, false);
  499. if(address == nullptr)
  500. {
  501. goto FailureCleanup;
  502. }
  503. section = HeapNewNoThrowStruct(SectionInfo);
  504. if (section == nullptr)
  505. {
  506. goto FailureCleanup;
  507. }
  508. section->handle = sectionHandle;
  509. section->runtimeBaseAddress = address;
  510. if (!sections.SetSection(address, (uint)(dwSize / AutoSystemInfo::PageSize), section))
  511. {
  512. goto FailureCleanup;
  513. }
  514. }
  515. else
  516. {
  517. if (!sections.GetSection(requestAddress))
  518. {
  519. return nullptr;
  520. }
  521. address = requestAddress;
  522. }
  523. return address;
  524. FailureCleanup:
  525. // if section allocation failed, free whatever we started to allocate
  526. if (sectionHandle != nullptr)
  527. {
  528. CloseSectionHandle(sectionHandle);
  529. }
  530. if (address != nullptr)
  531. {
  532. UnmapView(this->process, address);
  533. }
  534. if (section != nullptr)
  535. {
  536. HeapDelete(section);
  537. }
  538. return nullptr;
  539. }
  540. bool
  541. SectionAllocWrapper::GetFileInfo(LPVOID address, HANDLE* fileHandle, PVOID* baseAddress)
  542. {
  543. SectionInfo* sectionInfo = sections.GetSection(address);
  544. if (!sectionInfo)
  545. {
  546. return false;
  547. }
  548. *fileHandle = sectionInfo->handle;
  549. *baseAddress = sectionInfo->runtimeBaseAddress;
  550. return true;
  551. }
  552. LPVOID
  553. SectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  554. {
  555. SectionInfo * section = sections.GetSection(requestAddress);
  556. Assert(section);
  557. return AllocLocalView(section->handle, section->runtimeBaseAddress, requestAddress, dwSize);
  558. }
  559. BOOL SectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  560. {
  561. return FreeLocalView(lpAddress);
  562. }
  563. BOOL SectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  564. {
  565. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  566. Assert(this->process != GetCurrentProcess()); // only use sections when OOP
  567. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  568. {
  569. SectionInfo * section = sections.GetSection(lpAddress);
  570. Assert(section);
  571. Assert(section->runtimeBaseAddress == lpAddress);
  572. sections.ClearSection(lpAddress, (uint)(dwSize / AutoSystemInfo::PageSize));
  573. UnmapView(this->process, lpAddress);
  574. CloseSectionHandle(section->handle);
  575. }
  576. else
  577. {
  578. Assert((dwFreeType & MEM_DECOMMIT) == MEM_DECOMMIT);
  579. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  580. {
  581. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  582. if (localAddr == nullptr)
  583. {
  584. return FALSE;
  585. }
  586. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  587. FreeLocal(localAddr);
  588. }
  589. UnlockMemory(this->process, lpAddress, dwSize);
  590. }
  591. return TRUE;
  592. }
  593. /*
  594. * class PreReservedVirtualAllocWrapper
  595. */
  596. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  597. // TODO: this should be on runtime process
  598. uint PreReservedSectionAllocWrapper::numPreReservedSegment = 0;
  599. #endif
  600. PreReservedSectionAllocWrapper::PreReservedSectionAllocWrapper(HANDLE process) :
  601. preReservedStartAddress(nullptr),
  602. process(process),
  603. cs(4000),
  604. section(nullptr)
  605. {
  606. Assert(process != GetCurrentProcess()); // only use sections when OOP
  607. freeSegments.SetAll();
  608. }
  609. PreReservedSectionAllocWrapper::~PreReservedSectionAllocWrapper()
  610. {
  611. if (IsPreReservedRegionPresent())
  612. {
  613. UnmapView(this->process, this->preReservedStartAddress);
  614. CloseSectionHandle(this->section);
  615. PreReservedHeapTrace(_u("MEM_RELEASE the PreReservedSegment. Start Address: 0x%p, Size: 0x%x * 0x%x bytes"), this->preReservedStartAddress, PreReservedAllocationSegmentCount,
  616. AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  617. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  618. Assert(numPreReservedSegment > 0);
  619. InterlockedDecrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  620. #endif
  621. }
  622. }
  623. bool
  624. PreReservedSectionAllocWrapper::IsPreReservedRegionPresent()
  625. {
  626. return this->preReservedStartAddress != nullptr;
  627. }
  628. bool
  629. PreReservedSectionAllocWrapper::IsInRange(void * address)
  630. {
  631. if (!this->IsPreReservedRegionPresent())
  632. {
  633. return false;
  634. }
  635. bool isInRange = IsInRange(GetPreReservedStartAddress(), address);
  636. #if DBG
  637. if (isInRange)
  638. {
  639. MEMORY_BASIC_INFORMATION memBasicInfo;
  640. size_t bytes = VirtualQueryEx(this->process, address, &memBasicInfo, sizeof(memBasicInfo));
  641. Assert(bytes == 0 || (memBasicInfo.State == MEM_COMMIT && memBasicInfo.AllocationProtect == PAGE_EXECUTE_READ));
  642. }
  643. #endif
  644. return isInRange;
  645. }
  646. /* static */
  647. bool
  648. PreReservedSectionAllocWrapper::IsInRange(void * regionStart, void * address)
  649. {
  650. if (!regionStart)
  651. {
  652. return false;
  653. }
  654. if (address >= regionStart && address < GetPreReservedEndAddress(regionStart))
  655. {
  656. return true;
  657. }
  658. return false;
  659. }
  660. LPVOID
  661. PreReservedSectionAllocWrapper::GetPreReservedStartAddress()
  662. {
  663. return this->preReservedStartAddress;
  664. }
  665. LPVOID
  666. PreReservedSectionAllocWrapper::GetPreReservedEndAddress()
  667. {
  668. Assert(IsPreReservedRegionPresent());
  669. return GetPreReservedEndAddress(this->preReservedStartAddress);
  670. }
  671. /* static */
  672. LPVOID
  673. PreReservedSectionAllocWrapper::GetPreReservedEndAddress(void * regionStart)
  674. {
  675. return (char*)regionStart + (PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount() * AutoSystemInfo::PageSize);
  676. }
  677. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegion()
  678. {
  679. LPVOID startAddress = this->preReservedStartAddress;
  680. if (startAddress != nullptr)
  681. {
  682. return startAddress;
  683. }
  684. {
  685. AutoCriticalSection autocs(&this->cs);
  686. return EnsurePreReservedRegionInternal();
  687. }
  688. }
  689. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegionInternal()
  690. {
  691. LPVOID startAddress = this->preReservedStartAddress;
  692. if (startAddress != nullptr)
  693. {
  694. return startAddress;
  695. }
  696. size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  697. if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
  698. {
  699. HANDLE sectionHandle = CreateSection(bytes, false);
  700. if (sectionHandle == nullptr)
  701. {
  702. return nullptr;
  703. }
  704. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  705. if (startAddress == nullptr)
  706. {
  707. CloseSectionHandle(sectionHandle);
  708. return nullptr;
  709. }
  710. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  711. this->preReservedStartAddress = startAddress;
  712. this->section = sectionHandle;
  713. return startAddress;
  714. }
  715. #if defined(_CONTROL_FLOW_GUARD)
  716. bool supportPreReservedRegion = true;
  717. #if TARGET_32
  718. #if _M_IX86
  719. // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
  720. // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
  721. // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
  722. // It doesn't affect functionally, and it should be OK if we exceed.
  723. if (PreReservedSectionAllocWrapper::numPreReservedSegment > PreReservedSectionAllocWrapper::MaxPreReserveSegment)
  724. {
  725. supportPreReservedRegion = false;
  726. }
  727. #else
  728. // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
  729. supportPreReservedRegion = false;
  730. #endif // _M_IX86
  731. #endif
  732. if (AutoSystemInfo::Data.IsCFGEnabled() && supportPreReservedRegion)
  733. {
  734. HANDLE sectionHandle = CreateSection(bytes, false);
  735. if (sectionHandle == nullptr)
  736. {
  737. return nullptr;
  738. }
  739. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  740. if (startAddress == nullptr)
  741. {
  742. CloseSectionHandle(sectionHandle);
  743. return nullptr;
  744. }
  745. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  746. this->preReservedStartAddress = startAddress;
  747. this->section = sectionHandle;
  748. #if TARGET_32
  749. if (startAddress)
  750. {
  751. InterlockedIncrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  752. }
  753. #endif
  754. }
  755. #endif
  756. return startAddress;
  757. }
  758. LPVOID PreReservedSectionAllocWrapper::AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  759. {
  760. if (pageCount > AutoSystemInfo::MaxPageCount)
  761. {
  762. return nullptr;
  763. }
  764. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  765. AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
  766. Assert(dwSize != 0);
  767. {
  768. AutoCriticalSection autocs(&this->cs);
  769. //Return nullptr, if no space to Reserve
  770. if (EnsurePreReservedRegionInternal() == nullptr)
  771. {
  772. PreReservedHeapTrace(_u("No space to pre-reserve memory with %d pages. Returning NULL\n"), PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount());
  773. return nullptr;
  774. }
  775. char * addressToReserve = nullptr;
  776. uint freeSegmentsBVIndex = BVInvalidIndex;
  777. size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  778. Assert(requestedNumOfSegments <= MAXUINT32);
  779. if (lpAddress == nullptr)
  780. {
  781. Assert(requestedNumOfSegments != 0);
  782. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "dwSize should be aligned with Allocation Granularity");
  783. do
  784. {
  785. freeSegmentsBVIndex = freeSegments.GetNextBit(freeSegmentsBVIndex + 1);
  786. //Return nullptr, if we don't have free/decommit pages to allocate
  787. if ((freeSegments.Length() - freeSegmentsBVIndex < requestedNumOfSegments) ||
  788. freeSegmentsBVIndex == BVInvalidIndex)
  789. {
  790. PreReservedHeapTrace(_u("No more space to commit in PreReserved Memory region.\n"));
  791. return nullptr;
  792. }
  793. } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)));
  794. uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  795. addressToReserve = (char*)this->preReservedStartAddress + offset;
  796. }
  797. else
  798. {
  799. //Check If the lpAddress is within the range of the preReserved Memory Region
  800. Assert(((char*)lpAddress) >= (char*)this->preReservedStartAddress || ((char*)lpAddress + dwSize) < GetPreReservedEndAddress());
  801. addressToReserve = (char*)lpAddress;
  802. freeSegmentsBVIndex = (uint)((addressToReserve - (char*)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  803. #if DBG
  804. uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  805. Assert(numOfSegments != 0);
  806. Assert(freeSegmentsBVIndex + numOfSegments - 1 < freeSegments.Length());
  807. Assert(!freeSegments.TestRange(freeSegmentsBVIndex, numOfSegments));
  808. #endif
  809. }
  810. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?");
  811. AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity");
  812. // Keep track of the committed pages within the preReserved Memory Region
  813. if (lpAddress == nullptr)
  814. {
  815. Assert(requestedNumOfSegments != 0);
  816. freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  817. }
  818. PreReservedHeapTrace(_u("MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n"), addressToReserve, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  819. return addressToReserve;
  820. }
  821. }
  822. BOOL
  823. PreReservedSectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  824. {
  825. AutoCriticalSection autocs(&this->cs);
  826. if (dwSize == 0)
  827. {
  828. Assert(false);
  829. return FALSE;
  830. }
  831. if (this->preReservedStartAddress == nullptr)
  832. {
  833. Assert(false);
  834. return FALSE;
  835. }
  836. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  837. // zero one page at a time to minimize working set impact while zeroing
  838. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  839. {
  840. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  841. if (localAddr == nullptr)
  842. {
  843. return FALSE;
  844. }
  845. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  846. FreeLocal(localAddr);
  847. }
  848. UnlockMemory(this->process, lpAddress, dwSize);
  849. size_t requestedNumOfSegments = dwSize / AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  850. Assert(requestedNumOfSegments <= MAXUINT32);
  851. PreReservedHeapTrace(_u("MEM_DECOMMIT: Address: 0x%p of size: 0x%x bytes\n"), lpAddress, dwSize);
  852. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  853. {
  854. Assert((uintptr_t)lpAddress >= (uintptr_t)this->preReservedStartAddress);
  855. AssertMsg(((uintptr_t)lpAddress & (AutoSystemInfo::Data.GetAllocationGranularityPageCount() - 1)) == 0, "Not aligned with Allocation Granularity?");
  856. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "Release size should match the allocation granularity size");
  857. Assert(requestedNumOfSegments != 0);
  858. BVIndex freeSegmentsBVIndex = (BVIndex)(((uintptr_t)lpAddress - (uintptr_t)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  859. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid Index ?");
  860. freeSegments.SetRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  861. PreReservedHeapTrace(_u("MEM_RELEASE: Address: 0x%p of size: 0x%x * 0x%x bytes\n"), lpAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  862. }
  863. return TRUE;
  864. }
  865. bool
  866. PreReservedSectionAllocWrapper::GetFileInfo(LPVOID address, HANDLE* fileHandle, PVOID* baseAddress)
  867. {
  868. if (!this->IsPreReservedRegionPresent())
  869. {
  870. return false;
  871. }
  872. *fileHandle = this->section;
  873. *baseAddress = this->preReservedStartAddress;
  874. return true;
  875. }
  876. LPVOID
  877. PreReservedSectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  878. {
  879. return AllocLocalView(this->section, this->preReservedStartAddress, requestAddress, dwSize);
  880. }
  881. BOOL
  882. PreReservedSectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  883. {
  884. return FreeLocalView(lpAddress);
  885. }
  886. } // namespace Memory
  887. #endif
  888. #endif