SectionAllocWrapper.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if _WIN32
  7. #include "../Core/DelayLoadLibrary.h"
  8. #ifdef NTDDI_WIN10_RS2
  9. #if (NTDDI_VERSION >= NTDDI_WIN10_RS2)
  10. #define USEFILEMAP2 1
  11. #define USEVIRTUALUNLOCKEX 1
  12. #endif
  13. #endif
  14. namespace Memory
  15. {
  16. void UnlockMemory(HANDLE process, LPVOID address, SIZE_T size)
  17. {
  18. #if USEVIRTUALUNLOCKEX
  19. VirtualUnlockEx(process, address, size);
  20. #else
  21. NtdllLibrary::Instance->UnlockVirtualMemory(process, &address, &size, NtdllLibrary::MAP_PROCESS);
  22. #endif
  23. }
  24. void CloseSectionHandle(HANDLE handle)
  25. {
  26. #if USEFILEMAP2
  27. CloseHandle(handle);
  28. #else
  29. NtdllLibrary::Instance->Close(handle);
  30. #endif
  31. }
  32. HANDLE CreateSection(size_t sectionSize, bool commit)
  33. {
  34. const ULONG allocAttributes = commit ? SEC_COMMIT : SEC_RESERVE;
  35. #if USEFILEMAP2
  36. #if TARGET_32
  37. DWORD sizeHigh = 0;
  38. #elif TARGET_64
  39. DWORD sizeHigh = (DWORD)(sectionSize >> 32);
  40. #endif
  41. HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE | allocAttributes, sizeHigh, (DWORD)sectionSize, NULL);
  42. if (handle == nullptr)
  43. {
  44. return nullptr;
  45. }
  46. #else
  47. NtdllLibrary::OBJECT_ATTRIBUTES attr;
  48. NtdllLibrary::Instance->InitializeObjectAttributes(&attr, NULL, NtdllLibrary::OBJ_KERNEL_HANDLE, NULL, NULL);
  49. LARGE_INTEGER size = { 0 };
  50. #if TARGET_32
  51. size.LowPart = sectionSize;
  52. #elif TARGET_64
  53. size.QuadPart = sectionSize;
  54. #endif
  55. HANDLE handle = nullptr;
  56. int status = NtdllLibrary::Instance->CreateSection(&handle, SECTION_MAP_READ | SECTION_MAP_WRITE | SECTION_QUERY | SECTION_MAP_EXECUTE, &attr, &size, PAGE_EXECUTE_READWRITE, allocAttributes, NULL);
  57. if (status != 0)
  58. {
  59. return nullptr;
  60. }
  61. #endif
  62. return handle;
  63. }
  64. void UnmapView(HANDLE process, PVOID address)
  65. {
  66. #if USEFILEMAP2
  67. UnmapViewOfFile2(process, address, 0);
  68. #else
  69. NtdllLibrary::Instance->UnmapViewOfSection(process, address);
  70. #endif
  71. }
  72. PVOID MapView(HANDLE process, HANDLE sectionHandle, size_t size, size_t offset, bool local)
  73. {
  74. PVOID address = nullptr;
  75. DWORD flags = 0;
  76. if (local)
  77. {
  78. if (process != GetCurrentProcess())
  79. {
  80. return nullptr;
  81. }
  82. flags = PAGE_READWRITE;
  83. }
  84. else
  85. {
  86. if (process == GetCurrentProcess())
  87. {
  88. return nullptr;
  89. }
  90. flags = AutoSystemInfo::Data.IsCFGEnabled() ? PAGE_EXECUTE_RO_TARGETS_INVALID : PAGE_EXECUTE_READ;
  91. }
  92. #if USEFILEMAP2
  93. address = MapViewOfFile2(sectionHandle, process, offset, nullptr, size, NULL, flags);
  94. if (local && address != nullptr)
  95. {
  96. address = VirtualAlloc(address, size, MEM_COMMIT, flags);
  97. }
  98. #else
  99. LARGE_INTEGER mapOffset = { 0 };
  100. #if TARGET_32
  101. mapOffset.LowPart = offset;
  102. #elif TARGET_64
  103. mapOffset.QuadPart = offset;
  104. #else
  105. CompileAssert(UNREACHED);
  106. #endif
  107. SIZE_T viewSize = size;
  108. int status = NtdllLibrary::Instance->MapViewOfSection(sectionHandle, process, &address, NULL, viewSize, &mapOffset, &viewSize, NtdllLibrary::ViewUnmap, NULL, flags);
  109. if (status != 0)
  110. {
  111. return nullptr;
  112. }
  113. #endif
  114. return address;
  115. }
  116. #if defined(TARGET_64)
  117. SectionMap32::SectionMap32(__in char * startAddress) :
  118. startAddress(startAddress),
  119. #else
  120. SectionMap32::SectionMap32() :
  121. #endif
  122. count(0)
  123. {
  124. memset(map, 0, sizeof(map));
  125. #if defined(TARGET_64)
  126. Assert(((size_t)startAddress) % TotalSize == 0);
  127. #endif
  128. }
  129. SectionMap32::~SectionMap32()
  130. {
  131. for (uint i = 0; i < _countof(map); i++)
  132. {
  133. L2MapChunk * chunk = map[i];
  134. if (chunk)
  135. {
  136. HeapDelete(chunk);
  137. }
  138. }
  139. }
  140. SectionInfo *
  141. SectionMap32::GetSection(void* address)
  142. {
  143. uint id1 = GetLevel1Id(address);
  144. L2MapChunk * l2map = map[id1];
  145. if (l2map == nullptr)
  146. {
  147. return nullptr;
  148. }
  149. return l2map->Get(address);
  150. }
  151. void
  152. SectionMap32::Cleanup()
  153. {
  154. for (uint id1 = 0; id1 < L1Count; id1++)
  155. {
  156. L2MapChunk * l2map = map[id1];
  157. if (l2map != nullptr && l2map->IsEmpty())
  158. {
  159. map[id1] = nullptr;
  160. HeapDelete(l2map);
  161. Assert(count > 0);
  162. count--;
  163. }
  164. }
  165. }
  166. bool
  167. SectionMap32::EnsureSection(void * address, uint pageCount)
  168. {
  169. uint id1 = GetLevel1Id(address);
  170. uint id2 = GetLevel2Id(address);
  171. uint currentPageCount = min(pageCount, L2Count - id2);
  172. while (true)
  173. {
  174. if (map[id1] == nullptr)
  175. {
  176. L2MapChunk * newChunk = HeapNewNoThrowZ(L2MapChunk);
  177. if (newChunk == nullptr)
  178. {
  179. // remove any previously allocated L2 maps
  180. Cleanup();
  181. return false;
  182. }
  183. map[id1] = newChunk;
  184. count++;
  185. }
  186. pageCount -= currentPageCount;
  187. if (pageCount == 0)
  188. {
  189. break;
  190. }
  191. id2 = 0;
  192. id1++;
  193. currentPageCount = min(pageCount, L2Count);
  194. }
  195. return true;
  196. }
  197. void
  198. SectionMap32::ClearSection(void * address, uint pageCount)
  199. {
  200. uint id1 = GetLevel1Id(address);
  201. uint id2 = GetLevel2Id(address);
  202. uint currentPageCount = min(pageCount, L2Count - id2);
  203. while (true)
  204. {
  205. Assert(map[id1] != nullptr);
  206. map[id1]->Clear(id2, currentPageCount);
  207. pageCount -= currentPageCount;
  208. if (pageCount == 0)
  209. {
  210. return;
  211. }
  212. id2 = 0;
  213. id1++;
  214. currentPageCount = min(pageCount, L2Count);
  215. }
  216. }
  217. void
  218. SectionMap32::SetSectionNoCheck(void * address, uint pageCount, SectionInfo * section)
  219. {
  220. uint id1 = GetLevel1Id(address);
  221. uint id2 = GetLevel2Id(address);
  222. uint currentPageCount = min(pageCount, L2Count - id2);
  223. while (true)
  224. {
  225. Assert(map[id1] != nullptr);
  226. map[id1]->Set(id2, currentPageCount, section);
  227. pageCount -= currentPageCount;
  228. if (pageCount == 0)
  229. {
  230. return;
  231. }
  232. id2 = 0;
  233. id1++;
  234. currentPageCount = min(pageCount, L2Count);
  235. }
  236. }
  237. bool
  238. SectionMap32::SetSection(void * address, uint pageCount, SectionInfo * section)
  239. {
  240. if (!EnsureSection(address, pageCount))
  241. {
  242. return false;
  243. }
  244. SetSectionNoCheck(address, pageCount, section);
  245. return true;
  246. }
  247. SectionMap32::L2MapChunk::~L2MapChunk()
  248. {
  249. for (uint i = 0; i < L2Count; ++i)
  250. {
  251. if (map[i] != nullptr)
  252. {
  253. // in case runtime process has abnormal termination, map may not be empty
  254. CloseSectionHandle(map[i]->handle);
  255. HeapDelete(map[i]);
  256. }
  257. }
  258. }
  259. bool
  260. SectionMap32::L2MapChunk::IsEmpty() const
  261. {
  262. for (uint i = 0; i < L2Count; i++)
  263. {
  264. if (this->map[i] != nullptr)
  265. {
  266. return false;
  267. }
  268. }
  269. return true;
  270. }
  271. void
  272. SectionMap32::L2MapChunk::Clear(uint id2, uint pageCount)
  273. {
  274. uint id2End = id2 + pageCount;
  275. Assert(id2 < L2Count);
  276. Assert(id2End <= L2Count);
  277. for (uint i = id2; i < id2End; i++)
  278. {
  279. __analysis_assume(i < L2Count);
  280. Assert(map[i] != nullptr);
  281. map[i] = nullptr;
  282. }
  283. }
  284. void
  285. SectionMap32::L2MapChunk::Set(uint id2, uint pageCount, SectionInfo * section)
  286. {
  287. uint id2End = id2 + pageCount;
  288. Assert(id2 < L2Count);
  289. Assert(id2End <= L2Count);
  290. for (uint i = id2; i < id2End; i++)
  291. {
  292. __analysis_assume(i < L2Count);
  293. Assert(map[i] == nullptr);
  294. map[i] = section;
  295. }
  296. }
  297. SectionInfo *
  298. SectionMap32::L2MapChunk::Get(void * address)
  299. {
  300. uint id2 = GetLevel2Id(address);
  301. Assert(id2 < L2Count);
  302. __analysis_assume(id2 < L2Count);
  303. return map[id2];
  304. }
  305. #if TARGET_64
  306. SectionMap64::SectionMap64() : list(nullptr)
  307. {
  308. }
  309. SectionMap64::~SectionMap64()
  310. {
  311. Node * node = list;
  312. list = nullptr;
  313. while (node != nullptr)
  314. {
  315. Node * next = node->next;
  316. HeapDelete(node);
  317. node = next;
  318. }
  319. }
  320. bool
  321. SectionMap64::EnsureSection(void * address, size_t pageCount)
  322. {
  323. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  324. size_t pageCountLeft = pageCount;
  325. uint nodePages = PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  326. if (pageCountLeft < nodePages)
  327. {
  328. nodePages = (uint)pageCountLeft;
  329. }
  330. do
  331. {
  332. Node * node = FindOrInsertNode(address);
  333. if (node == nullptr || !node->map.EnsureSection(address, nodePages))
  334. {
  335. return false;
  336. }
  337. pageCountLeft -= nodePages;
  338. if (pageCountLeft == 0)
  339. {
  340. return true;
  341. }
  342. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  343. nodePages = PagesPer4GB;
  344. if (pageCountLeft < PagesPer4GB)
  345. {
  346. nodePages = (uint)pageCountLeft;
  347. }
  348. } while (true);
  349. }
  350. void
  351. SectionMap64::SetSectionNoCheck(void * address, size_t pageCount, SectionInfo * section)
  352. {
  353. ForEachNodeInAddressRange(address, pageCount, [&](Node * node, void * address, uint nodePages)
  354. {
  355. Assert(node != nullptr);
  356. node->map.SetSectionNoCheck(address, nodePages, section);
  357. });
  358. }
  359. bool
  360. SectionMap64::SetSection(void * address, uint pageCount, SectionInfo * section)
  361. {
  362. if (!EnsureSection(address, pageCount))
  363. {
  364. return false;
  365. }
  366. SetSectionNoCheck(address, pageCount, section);
  367. return true;
  368. }
  369. SectionInfo *
  370. SectionMap64::GetSection(void * address)
  371. {
  372. Node * node = FindNode(address);
  373. if (node == nullptr)
  374. {
  375. return nullptr;
  376. }
  377. return node->map.GetSection(address);
  378. }
  379. void
  380. SectionMap64::ClearSection(void * address, uint pageCount)
  381. {
  382. ForEachNodeInAddressRange(address, pageCount, [&](Node* node, void* address, uint nodePages)
  383. {
  384. Assert(node != nullptr);
  385. node->map.ClearSection(address, nodePages);
  386. });
  387. }
  388. template <class Fn>
  389. void SectionMap64::ForEachNodeInAddressRange(void * address, size_t pageCount, Fn fn)
  390. {
  391. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  392. uint nodePages = SectionMap64::PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  393. if (pageCount < nodePages)
  394. {
  395. nodePages = (uint)pageCount;
  396. }
  397. do
  398. {
  399. Node * node = FindNode(address);
  400. fn(node, address, nodePages);
  401. pageCount -= nodePages;
  402. if (pageCount == 0)
  403. {
  404. break;
  405. }
  406. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  407. nodePages = SectionMap64::PagesPer4GB;
  408. if (pageCount < SectionMap64::PagesPer4GB)
  409. {
  410. nodePages = (uint)pageCount;
  411. }
  412. } while (true);
  413. }
  414. SectionMap64::Node *
  415. SectionMap64::FindOrInsertNode(void * address)
  416. {
  417. Node * node = FindNode(address);
  418. if (node == nullptr)
  419. {
  420. node = HeapNewNoThrowZ(Node, GetNodeStartAddress(address));
  421. if (node != nullptr)
  422. {
  423. node->nodeIndex = GetNodeIndex(address);
  424. node->next = list;
  425. list = node;
  426. }
  427. }
  428. return node;
  429. }
  430. SectionMap64::Node *
  431. SectionMap64::FindNode(void * address) const
  432. {
  433. uint index = GetNodeIndex(address);
  434. Node * node = list;
  435. while (node != nullptr)
  436. {
  437. if (node->nodeIndex == index)
  438. {
  439. return node;
  440. }
  441. node = node->next;
  442. }
  443. return nullptr;
  444. }
  445. #endif //TARGET_64
  446. static const uint SectionAlignment = 65536;
  447. PVOID
  448. AllocLocalView(HANDLE sectionHandle, LPVOID remoteBaseAddr, LPVOID remoteRequestAddress, size_t requestSize)
  449. {
  450. const size_t offset = (uintptr_t)remoteRequestAddress - (uintptr_t)remoteBaseAddr;
  451. const size_t offsetAlignment = offset % SectionAlignment;
  452. const size_t alignedOffset = offset - offsetAlignment;
  453. const size_t viewSize = requestSize + offsetAlignment;
  454. PVOID address = MapView(GetCurrentProcess(), sectionHandle, viewSize, alignedOffset, true);
  455. if (address == nullptr)
  456. {
  457. return nullptr;
  458. }
  459. return (PVOID)((uintptr_t)address + offsetAlignment);
  460. }
  461. BOOL
  462. FreeLocalView(LPVOID lpAddress)
  463. {
  464. const size_t alignment = (uintptr_t)lpAddress % SectionAlignment;
  465. UnmapView(GetCurrentProcess(), (LPVOID)((uintptr_t)lpAddress - alignment));
  466. return TRUE;
  467. }
  468. SectionAllocWrapper::SectionAllocWrapper(HANDLE process) :
  469. process(process)
  470. {
  471. Assert(process != GetCurrentProcess()); // only use sections when OOP
  472. }
  473. LPVOID
  474. SectionAllocWrapper::AllocPages(LPVOID requestAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  475. {
  476. if (pageCount > AutoSystemInfo::MaxPageCount)
  477. {
  478. return nullptr;
  479. }
  480. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  481. Assert(isCustomHeapAllocation);
  482. LPVOID address = nullptr;
  483. #if defined(ENABLE_JIT_CLAMP)
  484. // REVIEW: is this needed?
  485. AutoEnableDynamicCodeGen enableCodeGen(true);
  486. #endif
  487. HANDLE sectionHandle = nullptr;
  488. SectionInfo * section = nullptr;
  489. // for new allocations, create new section and fully map it (reserved) into runtime process
  490. if (requestAddress == nullptr)
  491. {
  492. sectionHandle = CreateSection(dwSize, ((allocationType & MEM_COMMIT) == MEM_COMMIT));
  493. if (sectionHandle == nullptr)
  494. {
  495. goto FailureCleanup;
  496. }
  497. address = MapView(this->process, sectionHandle, 0, 0, false);
  498. if(address == nullptr)
  499. {
  500. goto FailureCleanup;
  501. }
  502. section = HeapNewNoThrowStruct(SectionInfo);
  503. if (section == nullptr)
  504. {
  505. goto FailureCleanup;
  506. }
  507. section->handle = sectionHandle;
  508. section->runtimeBaseAddress = address;
  509. if (!sections.SetSection(address, (uint)(dwSize / AutoSystemInfo::PageSize), section))
  510. {
  511. goto FailureCleanup;
  512. }
  513. }
  514. else
  515. {
  516. if (!sections.GetSection(requestAddress))
  517. {
  518. return nullptr;
  519. }
  520. address = requestAddress;
  521. }
  522. return address;
  523. FailureCleanup:
  524. // if section allocation failed, free whatever we started to allocate
  525. if (sectionHandle != nullptr)
  526. {
  527. CloseSectionHandle(sectionHandle);
  528. }
  529. if (address != nullptr)
  530. {
  531. UnmapView(this->process, address);
  532. }
  533. if (section != nullptr)
  534. {
  535. HeapDelete(section);
  536. }
  537. return nullptr;
  538. }
  539. LPVOID
  540. SectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  541. {
  542. SectionInfo * section = sections.GetSection(requestAddress);
  543. Assert(section);
  544. return AllocLocalView(section->handle, section->runtimeBaseAddress, requestAddress, dwSize);
  545. }
  546. BOOL SectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  547. {
  548. return FreeLocalView(lpAddress);
  549. }
  550. BOOL SectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  551. {
  552. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  553. Assert(this->process != GetCurrentProcess()); // only use sections when OOP
  554. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  555. {
  556. SectionInfo * section = sections.GetSection(lpAddress);
  557. Assert(section);
  558. Assert(section->runtimeBaseAddress == lpAddress);
  559. sections.ClearSection(lpAddress, (uint)(dwSize / AutoSystemInfo::PageSize));
  560. UnmapView(this->process, lpAddress);
  561. CloseSectionHandle(section->handle);
  562. }
  563. else
  564. {
  565. Assert((dwFreeType & MEM_DECOMMIT) == MEM_DECOMMIT);
  566. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  567. {
  568. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  569. if (localAddr == nullptr)
  570. {
  571. return FALSE;
  572. }
  573. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  574. FreeLocal(localAddr);
  575. }
  576. UnlockMemory(this->process, lpAddress, dwSize);
  577. }
  578. return TRUE;
  579. }
  580. /*
  581. * class PreReservedVirtualAllocWrapper
  582. */
  583. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  584. // TODO: this should be on runtime process
  585. uint PreReservedSectionAllocWrapper::numPreReservedSegment = 0;
  586. #endif
  587. PreReservedSectionAllocWrapper::PreReservedSectionAllocWrapper(HANDLE process) :
  588. preReservedStartAddress(nullptr),
  589. process(process),
  590. cs(4000),
  591. section(nullptr)
  592. {
  593. Assert(process != GetCurrentProcess()); // only use sections when OOP
  594. freeSegments.SetAll();
  595. }
  596. PreReservedSectionAllocWrapper::~PreReservedSectionAllocWrapper()
  597. {
  598. if (IsPreReservedRegionPresent())
  599. {
  600. UnmapView(this->process, this->preReservedStartAddress);
  601. CloseSectionHandle(this->section);
  602. PreReservedHeapTrace(_u("MEM_RELEASE the PreReservedSegment. Start Address: 0x%p, Size: 0x%x * 0x%x bytes"), this->preReservedStartAddress, PreReservedAllocationSegmentCount,
  603. AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  604. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  605. Assert(numPreReservedSegment > 0);
  606. InterlockedDecrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  607. #endif
  608. }
  609. }
  610. bool
  611. PreReservedSectionAllocWrapper::IsPreReservedRegionPresent()
  612. {
  613. return this->preReservedStartAddress != nullptr;
  614. }
  615. bool
  616. PreReservedSectionAllocWrapper::IsInRange(void * address)
  617. {
  618. if (!this->IsPreReservedRegionPresent())
  619. {
  620. return false;
  621. }
  622. bool isInRange = IsInRange(GetPreReservedStartAddress(), address);
  623. #if DBG
  624. if (isInRange)
  625. {
  626. MEMORY_BASIC_INFORMATION memBasicInfo;
  627. size_t bytes = VirtualQueryEx(this->process, address, &memBasicInfo, sizeof(memBasicInfo));
  628. Assert(bytes == 0 || (memBasicInfo.State == MEM_COMMIT && memBasicInfo.AllocationProtect == PAGE_EXECUTE_READ));
  629. }
  630. #endif
  631. return isInRange;
  632. }
  633. /* static */
  634. bool
  635. PreReservedSectionAllocWrapper::IsInRange(void * regionStart, void * address)
  636. {
  637. if (!regionStart)
  638. {
  639. return false;
  640. }
  641. if (address >= regionStart && address < GetPreReservedEndAddress(regionStart))
  642. {
  643. return true;
  644. }
  645. return false;
  646. }
  647. LPVOID
  648. PreReservedSectionAllocWrapper::GetPreReservedStartAddress()
  649. {
  650. return this->preReservedStartAddress;
  651. }
  652. LPVOID
  653. PreReservedSectionAllocWrapper::GetPreReservedEndAddress()
  654. {
  655. Assert(IsPreReservedRegionPresent());
  656. return GetPreReservedEndAddress(this->preReservedStartAddress);
  657. }
  658. /* static */
  659. LPVOID
  660. PreReservedSectionAllocWrapper::GetPreReservedEndAddress(void * regionStart)
  661. {
  662. return (char*)regionStart + (PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount() * AutoSystemInfo::PageSize);
  663. }
  664. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegion()
  665. {
  666. LPVOID startAddress = this->preReservedStartAddress;
  667. if (startAddress != nullptr)
  668. {
  669. return startAddress;
  670. }
  671. {
  672. AutoCriticalSection autocs(&this->cs);
  673. return EnsurePreReservedRegionInternal();
  674. }
  675. }
  676. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegionInternal()
  677. {
  678. LPVOID startAddress = this->preReservedStartAddress;
  679. if (startAddress != nullptr)
  680. {
  681. return startAddress;
  682. }
  683. size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  684. if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
  685. {
  686. HANDLE sectionHandle = CreateSection(bytes, false);
  687. if (sectionHandle == nullptr)
  688. {
  689. return nullptr;
  690. }
  691. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  692. if (startAddress == nullptr)
  693. {
  694. CloseSectionHandle(sectionHandle);
  695. return nullptr;
  696. }
  697. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  698. this->preReservedStartAddress = startAddress;
  699. this->section = sectionHandle;
  700. return startAddress;
  701. }
  702. #if defined(_CONTROL_FLOW_GUARD)
  703. bool supportPreReservedRegion = true;
  704. #if TARGET_32
  705. #if _M_IX86
  706. // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
  707. // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
  708. // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
  709. // It doesn't affect functionally, and it should be OK if we exceed.
  710. if (PreReservedSectionAllocWrapper::numPreReservedSegment > PreReservedSectionAllocWrapper::MaxPreReserveSegment)
  711. {
  712. supportPreReservedRegion = false;
  713. }
  714. #else
  715. // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
  716. supportPreReservedRegion = false;
  717. #endif // _M_IX86
  718. #endif
  719. if (AutoSystemInfo::Data.IsCFGEnabled() && supportPreReservedRegion)
  720. {
  721. HANDLE sectionHandle = CreateSection(bytes, false);
  722. if (sectionHandle == nullptr)
  723. {
  724. return nullptr;
  725. }
  726. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  727. if (startAddress == nullptr)
  728. {
  729. CloseSectionHandle(sectionHandle);
  730. return nullptr;
  731. }
  732. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  733. this->preReservedStartAddress = startAddress;
  734. this->section = sectionHandle;
  735. #if TARGET_32
  736. if (startAddress)
  737. {
  738. InterlockedIncrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  739. }
  740. #endif
  741. }
  742. #endif
  743. return startAddress;
  744. }
  745. LPVOID PreReservedSectionAllocWrapper::AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  746. {
  747. if (pageCount > AutoSystemInfo::MaxPageCount)
  748. {
  749. return nullptr;
  750. }
  751. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  752. AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
  753. Assert(dwSize != 0);
  754. {
  755. AutoCriticalSection autocs(&this->cs);
  756. //Return nullptr, if no space to Reserve
  757. if (EnsurePreReservedRegionInternal() == nullptr)
  758. {
  759. PreReservedHeapTrace(_u("No space to pre-reserve memory with %d pages. Returning NULL\n"), PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount());
  760. return nullptr;
  761. }
  762. char * addressToReserve = nullptr;
  763. uint freeSegmentsBVIndex = BVInvalidIndex;
  764. size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  765. Assert(requestedNumOfSegments <= MAXUINT32);
  766. if (lpAddress == nullptr)
  767. {
  768. Assert(requestedNumOfSegments != 0);
  769. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "dwSize should be aligned with Allocation Granularity");
  770. do
  771. {
  772. freeSegmentsBVIndex = freeSegments.GetNextBit(freeSegmentsBVIndex + 1);
  773. //Return nullptr, if we don't have free/decommit pages to allocate
  774. if ((freeSegments.Length() - freeSegmentsBVIndex < requestedNumOfSegments) ||
  775. freeSegmentsBVIndex == BVInvalidIndex)
  776. {
  777. PreReservedHeapTrace(_u("No more space to commit in PreReserved Memory region.\n"));
  778. return nullptr;
  779. }
  780. } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)));
  781. uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  782. addressToReserve = (char*)this->preReservedStartAddress + offset;
  783. }
  784. else
  785. {
  786. //Check If the lpAddress is within the range of the preReserved Memory Region
  787. Assert(((char*)lpAddress) >= (char*)this->preReservedStartAddress || ((char*)lpAddress + dwSize) < GetPreReservedEndAddress());
  788. addressToReserve = (char*)lpAddress;
  789. freeSegmentsBVIndex = (uint)((addressToReserve - (char*)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  790. #if DBG
  791. uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  792. Assert(numOfSegments != 0);
  793. Assert(freeSegmentsBVIndex + numOfSegments - 1 < freeSegments.Length());
  794. Assert(!freeSegments.TestRange(freeSegmentsBVIndex, numOfSegments));
  795. #endif
  796. }
  797. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?");
  798. AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity");
  799. // Keep track of the committed pages within the preReserved Memory Region
  800. if (lpAddress == nullptr)
  801. {
  802. Assert(requestedNumOfSegments != 0);
  803. freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  804. }
  805. PreReservedHeapTrace(_u("MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n"), addressToReserve, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  806. return addressToReserve;
  807. }
  808. }
  809. BOOL
  810. PreReservedSectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  811. {
  812. AutoCriticalSection autocs(&this->cs);
  813. if (dwSize == 0)
  814. {
  815. Assert(false);
  816. return FALSE;
  817. }
  818. if (this->preReservedStartAddress == nullptr)
  819. {
  820. Assert(false);
  821. return FALSE;
  822. }
  823. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  824. // zero one page at a time to minimize working set impact while zeroing
  825. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  826. {
  827. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  828. if (localAddr == nullptr)
  829. {
  830. return FALSE;
  831. }
  832. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  833. FreeLocal(localAddr);
  834. }
  835. UnlockMemory(this->process, lpAddress, dwSize);
  836. size_t requestedNumOfSegments = dwSize / AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  837. Assert(requestedNumOfSegments <= MAXUINT32);
  838. PreReservedHeapTrace(_u("MEM_DECOMMIT: Address: 0x%p of size: 0x%x bytes\n"), lpAddress, dwSize);
  839. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  840. {
  841. Assert((uintptr_t)lpAddress >= (uintptr_t)this->preReservedStartAddress);
  842. AssertMsg(((uintptr_t)lpAddress & (AutoSystemInfo::Data.GetAllocationGranularityPageCount() - 1)) == 0, "Not aligned with Allocation Granularity?");
  843. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "Release size should match the allocation granularity size");
  844. Assert(requestedNumOfSegments != 0);
  845. BVIndex freeSegmentsBVIndex = (BVIndex)(((uintptr_t)lpAddress - (uintptr_t)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  846. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid Index ?");
  847. freeSegments.SetRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  848. PreReservedHeapTrace(_u("MEM_RELEASE: Address: 0x%p of size: 0x%x * 0x%x bytes\n"), lpAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  849. }
  850. return TRUE;
  851. }
  852. LPVOID
  853. PreReservedSectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  854. {
  855. return AllocLocalView(this->section, this->preReservedStartAddress, requestAddress, dwSize);
  856. }
  857. BOOL
  858. PreReservedSectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  859. {
  860. return FreeLocalView(lpAddress);
  861. }
  862. } // namespace Memory
  863. #endif