SectionAllocWrapper.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. #if _WIN32
  7. #if ENABLE_OOP_NATIVE_CODEGEN
  8. #include "Core/DelayLoadLibrary.h"
  9. #include "XDataAllocator.h"
  10. #include "CustomHeap.h"
  11. #ifdef NTDDI_WIN10_RS2
  12. #if (NTDDI_VERSION >= NTDDI_WIN10_RS2)
  13. #define USEFILEMAP2 1
  14. #define USEVIRTUALUNLOCKEX 1
  15. #endif
  16. #endif
  17. namespace Memory
  18. {
  19. void UnlockMemory(HANDLE process, LPVOID address, SIZE_T size)
  20. {
  21. #if USEVIRTUALUNLOCKEX
  22. VirtualUnlockEx(process, address, size);
  23. #else
  24. NtdllLibrary::Instance->UnlockVirtualMemory(process, &address, &size, NtdllLibrary::MAP_PROCESS);
  25. #endif
  26. }
  27. void CloseSectionHandle(HANDLE handle)
  28. {
  29. #if USEFILEMAP2
  30. CloseHandle(handle);
  31. #else
  32. NtdllLibrary::Instance->Close(handle);
  33. #endif
  34. }
  35. HANDLE CreateSection(size_t sectionSize, bool commit)
  36. {
  37. const ULONG allocAttributes = commit ? SEC_COMMIT : SEC_RESERVE;
  38. #if USEFILEMAP2
  39. #if TARGET_32
  40. DWORD sizeHigh = 0;
  41. #elif TARGET_64
  42. DWORD sizeHigh = (DWORD)(sectionSize >> 32);
  43. #endif
  44. HANDLE handle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE | allocAttributes, sizeHigh, (DWORD)sectionSize, NULL);
  45. if (handle == nullptr)
  46. {
  47. return nullptr;
  48. }
  49. #else
  50. NtdllLibrary::OBJECT_ATTRIBUTES attr;
  51. NtdllLibrary::Instance->InitializeObjectAttributes(&attr, NULL, NtdllLibrary::OBJ_KERNEL_HANDLE, NULL, NULL);
  52. LARGE_INTEGER size = { 0 };
  53. #if TARGET_32
  54. size.LowPart = sectionSize;
  55. #elif TARGET_64
  56. size.QuadPart = sectionSize;
  57. #endif
  58. HANDLE handle = nullptr;
  59. int status = NtdllLibrary::Instance->CreateSection(&handle, SECTION_MAP_READ | SECTION_MAP_WRITE | SECTION_QUERY | SECTION_MAP_EXECUTE, &attr, &size, PAGE_EXECUTE_READWRITE, allocAttributes, NULL);
  60. if (status != 0)
  61. {
  62. return nullptr;
  63. }
  64. #endif
  65. return handle;
  66. }
  67. void UnmapView(HANDLE process, PVOID address)
  68. {
  69. #if USEFILEMAP2
  70. UnmapViewOfFile2(process, address, 0);
  71. #else
  72. NtdllLibrary::Instance->UnmapViewOfSection(process, address);
  73. #endif
  74. }
  75. PVOID MapView(HANDLE process, HANDLE sectionHandle, size_t size, size_t offset, bool local)
  76. {
  77. PVOID address = nullptr;
  78. DWORD flags = 0;
  79. if (local)
  80. {
  81. if (process != GetCurrentProcess())
  82. {
  83. return nullptr;
  84. }
  85. flags = PAGE_READWRITE;
  86. }
  87. else
  88. {
  89. if (process == GetCurrentProcess())
  90. {
  91. return nullptr;
  92. }
  93. flags = GlobalSecurityPolicy::IsCFGEnabled() ? PAGE_EXECUTE_RO_TARGETS_INVALID : PAGE_EXECUTE_READ;
  94. }
  95. #if USEFILEMAP2
  96. address = MapViewOfFile2(sectionHandle, process, offset, nullptr, size, NULL, flags);
  97. if (local && address != nullptr)
  98. {
  99. address = VirtualAlloc(address, size, MEM_COMMIT, flags);
  100. }
  101. #else
  102. LARGE_INTEGER mapOffset = { 0 };
  103. #if TARGET_32
  104. mapOffset.LowPart = offset;
  105. #elif TARGET_64
  106. mapOffset.QuadPart = offset;
  107. #else
  108. CompileAssert(UNREACHED);
  109. #endif
  110. SIZE_T viewSize = size;
  111. int status = NtdllLibrary::Instance->MapViewOfSection(sectionHandle, process, &address, NULL, viewSize, &mapOffset, &viewSize, NtdllLibrary::ViewUnmap, NULL, flags);
  112. if (status != 0)
  113. {
  114. return nullptr;
  115. }
  116. #endif
  117. return address;
  118. }
  119. #if defined(TARGET_64)
  120. SectionMap32::SectionMap32(__in char * startAddress) :
  121. startAddress(startAddress),
  122. #else
  123. SectionMap32::SectionMap32() :
  124. #endif
  125. count(0)
  126. {
  127. memset(map, 0, sizeof(map));
  128. #if defined(TARGET_64)
  129. Assert(((size_t)startAddress) % TotalSize == 0);
  130. #endif
  131. }
  132. SectionMap32::~SectionMap32()
  133. {
  134. for (uint i = 0; i < _countof(map); i++)
  135. {
  136. L2MapChunk * chunk = map[i];
  137. if (chunk)
  138. {
  139. HeapDelete(chunk);
  140. }
  141. }
  142. }
  143. SectionInfo *
  144. SectionMap32::GetSection(void* address)
  145. {
  146. uint id1 = GetLevel1Id(address);
  147. L2MapChunk * l2map = map[id1];
  148. if (l2map == nullptr)
  149. {
  150. return nullptr;
  151. }
  152. return l2map->Get(address);
  153. }
  154. void
  155. SectionMap32::Cleanup()
  156. {
  157. for (uint id1 = 0; id1 < L1Count; id1++)
  158. {
  159. L2MapChunk * l2map = map[id1];
  160. if (l2map != nullptr && l2map->IsEmpty())
  161. {
  162. map[id1] = nullptr;
  163. HeapDelete(l2map);
  164. Assert(count > 0);
  165. count--;
  166. }
  167. }
  168. }
  169. bool
  170. SectionMap32::EnsureSection(void * address, uint pageCount)
  171. {
  172. uint id1 = GetLevel1Id(address);
  173. uint id2 = GetLevel2Id(address);
  174. uint currentPageCount = min(pageCount, L2Count - id2);
  175. while (true)
  176. {
  177. if (map[id1] == nullptr)
  178. {
  179. L2MapChunk * newChunk = HeapNewNoThrowZ(L2MapChunk);
  180. if (newChunk == nullptr)
  181. {
  182. // remove any previously allocated L2 maps
  183. Cleanup();
  184. return false;
  185. }
  186. map[id1] = newChunk;
  187. count++;
  188. }
  189. pageCount -= currentPageCount;
  190. if (pageCount == 0)
  191. {
  192. break;
  193. }
  194. id2 = 0;
  195. id1++;
  196. currentPageCount = min(pageCount, L2Count);
  197. }
  198. return true;
  199. }
  200. void
  201. SectionMap32::ClearSection(void * address, uint pageCount)
  202. {
  203. uint id1 = GetLevel1Id(address);
  204. uint id2 = GetLevel2Id(address);
  205. uint currentPageCount = min(pageCount, L2Count - id2);
  206. while (true)
  207. {
  208. Assert(map[id1] != nullptr);
  209. map[id1]->Clear(id2, currentPageCount);
  210. pageCount -= currentPageCount;
  211. if (pageCount == 0)
  212. {
  213. return;
  214. }
  215. id2 = 0;
  216. id1++;
  217. currentPageCount = min(pageCount, L2Count);
  218. }
  219. }
  220. void
  221. SectionMap32::SetSectionNoCheck(void * address, uint pageCount, SectionInfo * section)
  222. {
  223. uint id1 = GetLevel1Id(address);
  224. uint id2 = GetLevel2Id(address);
  225. uint currentPageCount = min(pageCount, L2Count - id2);
  226. while (true)
  227. {
  228. Assert(map[id1] != nullptr);
  229. map[id1]->Set(id2, currentPageCount, section);
  230. pageCount -= currentPageCount;
  231. if (pageCount == 0)
  232. {
  233. return;
  234. }
  235. id2 = 0;
  236. id1++;
  237. currentPageCount = min(pageCount, L2Count);
  238. }
  239. }
  240. bool
  241. SectionMap32::SetSection(void * address, uint pageCount, SectionInfo * section)
  242. {
  243. if (!EnsureSection(address, pageCount))
  244. {
  245. return false;
  246. }
  247. SetSectionNoCheck(address, pageCount, section);
  248. return true;
  249. }
  250. SectionMap32::L2MapChunk::~L2MapChunk()
  251. {
  252. for (uint i = 0; i < L2Count; ++i)
  253. {
  254. if (map[i] != nullptr)
  255. {
  256. // in case runtime process has abnormal termination, map may not be empty
  257. CloseSectionHandle(map[i]->handle);
  258. HeapDelete(map[i]);
  259. }
  260. }
  261. }
  262. bool
  263. SectionMap32::L2MapChunk::IsEmpty() const
  264. {
  265. for (uint i = 0; i < L2Count; i++)
  266. {
  267. if (this->map[i] != nullptr)
  268. {
  269. return false;
  270. }
  271. }
  272. return true;
  273. }
  274. void
  275. SectionMap32::L2MapChunk::Clear(uint id2, uint pageCount)
  276. {
  277. uint id2End = id2 + pageCount;
  278. Assert(id2 < L2Count);
  279. Assert(id2End <= L2Count);
  280. for (uint i = id2; i < id2End; i++)
  281. {
  282. __analysis_assume(i < L2Count);
  283. Assert(map[i] != nullptr);
  284. map[i] = nullptr;
  285. }
  286. }
  287. void
  288. SectionMap32::L2MapChunk::Set(uint id2, uint pageCount, SectionInfo * section)
  289. {
  290. uint id2End = id2 + pageCount;
  291. Assert(id2 < L2Count);
  292. Assert(id2End <= L2Count);
  293. for (uint i = id2; i < id2End; i++)
  294. {
  295. __analysis_assume(i < L2Count);
  296. Assert(map[i] == nullptr);
  297. map[i] = section;
  298. }
  299. }
  300. SectionInfo *
  301. SectionMap32::L2MapChunk::Get(void * address)
  302. {
  303. uint id2 = GetLevel2Id(address);
  304. Assert(id2 < L2Count);
  305. __analysis_assume(id2 < L2Count);
  306. return map[id2];
  307. }
  308. #if TARGET_64
  309. SectionMap64::SectionMap64() : list(nullptr)
  310. {
  311. }
  312. SectionMap64::~SectionMap64()
  313. {
  314. Node * node = list;
  315. list = nullptr;
  316. while (node != nullptr)
  317. {
  318. Node * next = node->next;
  319. HeapDelete(node);
  320. node = next;
  321. }
  322. }
  323. bool
  324. SectionMap64::EnsureSection(void * address, size_t pageCount)
  325. {
  326. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  327. size_t pageCountLeft = pageCount;
  328. uint nodePages = PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  329. if (pageCountLeft < nodePages)
  330. {
  331. nodePages = (uint)pageCountLeft;
  332. }
  333. do
  334. {
  335. Node * node = FindOrInsertNode(address);
  336. if (node == nullptr || !node->map.EnsureSection(address, nodePages))
  337. {
  338. return false;
  339. }
  340. pageCountLeft -= nodePages;
  341. if (pageCountLeft == 0)
  342. {
  343. return true;
  344. }
  345. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  346. nodePages = PagesPer4GB;
  347. if (pageCountLeft < PagesPer4GB)
  348. {
  349. nodePages = (uint)pageCountLeft;
  350. }
  351. } while (true);
  352. }
  353. void
  354. SectionMap64::SetSectionNoCheck(void * address, size_t pageCount, SectionInfo * section)
  355. {
  356. ForEachNodeInAddressRange(address, pageCount, [&](Node * node, void * address, uint nodePages)
  357. {
  358. Assert(node != nullptr);
  359. node->map.SetSectionNoCheck(address, nodePages, section);
  360. });
  361. }
  362. bool
  363. SectionMap64::SetSection(void * address, uint pageCount, SectionInfo * section)
  364. {
  365. if (!EnsureSection(address, pageCount))
  366. {
  367. return false;
  368. }
  369. SetSectionNoCheck(address, pageCount, section);
  370. return true;
  371. }
  372. SectionInfo *
  373. SectionMap64::GetSection(void * address)
  374. {
  375. Node * node = FindNode(address);
  376. if (node == nullptr)
  377. {
  378. return nullptr;
  379. }
  380. return node->map.GetSection(address);
  381. }
  382. void
  383. SectionMap64::ClearSection(void * address, uint pageCount)
  384. {
  385. ForEachNodeInAddressRange(address, pageCount, [&](Node* node, void* address, uint nodePages)
  386. {
  387. Assert(node != nullptr);
  388. node->map.ClearSection(address, nodePages);
  389. });
  390. }
  391. template <class Fn>
  392. void SectionMap64::ForEachNodeInAddressRange(void * address, size_t pageCount, Fn fn)
  393. {
  394. uint lowerBitsAddress = ::Math::PointerCastToIntegralTruncate<uint>(address);
  395. uint nodePages = SectionMap64::PagesPer4GB - lowerBitsAddress / AutoSystemInfo::PageSize;
  396. if (pageCount < nodePages)
  397. {
  398. nodePages = (uint)pageCount;
  399. }
  400. do
  401. {
  402. Node * node = FindNode(address);
  403. fn(node, address, nodePages);
  404. pageCount -= nodePages;
  405. if (pageCount == 0)
  406. {
  407. break;
  408. }
  409. address = (void *)((size_t)address + (nodePages * AutoSystemInfo::PageSize));
  410. nodePages = SectionMap64::PagesPer4GB;
  411. if (pageCount < SectionMap64::PagesPer4GB)
  412. {
  413. nodePages = (uint)pageCount;
  414. }
  415. } while (true);
  416. }
  417. SectionMap64::Node *
  418. SectionMap64::FindOrInsertNode(void * address)
  419. {
  420. Node * node = FindNode(address);
  421. if (node == nullptr)
  422. {
  423. node = HeapNewNoThrowZ(Node, GetNodeStartAddress(address));
  424. if (node != nullptr)
  425. {
  426. node->nodeIndex = GetNodeIndex(address);
  427. node->next = list;
  428. list = node;
  429. }
  430. }
  431. return node;
  432. }
  433. SectionMap64::Node *
  434. SectionMap64::FindNode(void * address) const
  435. {
  436. uint index = GetNodeIndex(address);
  437. Node * node = list;
  438. while (node != nullptr)
  439. {
  440. if (node->nodeIndex == index)
  441. {
  442. return node;
  443. }
  444. node = node->next;
  445. }
  446. return nullptr;
  447. }
  448. #endif //TARGET_64
  449. static const uint SectionAlignment = 65536;
  450. PVOID
  451. AllocLocalView(HANDLE sectionHandle, LPVOID remoteBaseAddr, LPVOID remoteRequestAddress, size_t requestSize)
  452. {
  453. const size_t offset = (uintptr_t)remoteRequestAddress - (uintptr_t)remoteBaseAddr;
  454. const size_t offsetAlignment = offset % SectionAlignment;
  455. const size_t alignedOffset = offset - offsetAlignment;
  456. const size_t viewSize = requestSize + offsetAlignment;
  457. PVOID address = MapView(GetCurrentProcess(), sectionHandle, viewSize, alignedOffset, true);
  458. if (address == nullptr)
  459. {
  460. return nullptr;
  461. }
  462. return (PVOID)((uintptr_t)address + offsetAlignment);
  463. }
  464. BOOL
  465. FreeLocalView(LPVOID lpAddress)
  466. {
  467. const size_t alignment = (uintptr_t)lpAddress % SectionAlignment;
  468. UnmapView(GetCurrentProcess(), (LPVOID)((uintptr_t)lpAddress - alignment));
  469. return TRUE;
  470. }
  471. SectionAllocWrapper::SectionAllocWrapper(HANDLE process) :
  472. process(process)
  473. {
  474. Assert(process != GetCurrentProcess()); // only use sections when OOP
  475. }
  476. LPVOID
  477. SectionAllocWrapper::AllocPages(LPVOID requestAddress, size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  478. {
  479. if (pageCount > AutoSystemInfo::MaxPageCount)
  480. {
  481. return nullptr;
  482. }
  483. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  484. Assert(isCustomHeapAllocation);
  485. LPVOID address = nullptr;
  486. #if defined(ENABLE_JIT_CLAMP)
  487. // REVIEW: is this needed?
  488. AutoEnableDynamicCodeGen enableCodeGen(true);
  489. #endif
  490. HANDLE sectionHandle = nullptr;
  491. SectionInfo * section = nullptr;
  492. // for new allocations, create new section and fully map it (reserved) into runtime process
  493. if (requestAddress == nullptr)
  494. {
  495. sectionHandle = CreateSection(dwSize, ((allocationType & MEM_COMMIT) == MEM_COMMIT));
  496. if (sectionHandle == nullptr)
  497. {
  498. goto FailureCleanup;
  499. }
  500. address = MapView(this->process, sectionHandle, 0, 0, false);
  501. if(address == nullptr)
  502. {
  503. goto FailureCleanup;
  504. }
  505. section = HeapNewNoThrowStruct(SectionInfo);
  506. if (section == nullptr)
  507. {
  508. goto FailureCleanup;
  509. }
  510. section->handle = sectionHandle;
  511. section->runtimeBaseAddress = address;
  512. if (!sections.SetSection(address, (uint)(dwSize / AutoSystemInfo::PageSize), section))
  513. {
  514. goto FailureCleanup;
  515. }
  516. }
  517. else
  518. {
  519. if (!sections.GetSection(requestAddress))
  520. {
  521. return nullptr;
  522. }
  523. // pages could be filled with debugbreak
  524. // zero one page at a time to minimize working set impact while zeroing
  525. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  526. {
  527. LPVOID localAddr = AllocLocal((char*)requestAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  528. if (localAddr == nullptr)
  529. {
  530. return nullptr;
  531. }
  532. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  533. FreeLocal(localAddr);
  534. }
  535. address = requestAddress;
  536. }
  537. return address;
  538. FailureCleanup:
  539. // if section allocation failed, free whatever we started to allocate
  540. if (sectionHandle != nullptr)
  541. {
  542. CloseSectionHandle(sectionHandle);
  543. }
  544. if (address != nullptr)
  545. {
  546. UnmapView(this->process, address);
  547. }
  548. if (section != nullptr)
  549. {
  550. HeapDelete(section);
  551. }
  552. return nullptr;
  553. }
  554. bool
  555. SectionAllocWrapper::GetFileInfo(LPVOID address, HANDLE* fileHandle, PVOID* baseAddress)
  556. {
  557. SectionInfo* sectionInfo = sections.GetSection(address);
  558. if (!sectionInfo)
  559. {
  560. return false;
  561. }
  562. *fileHandle = sectionInfo->handle;
  563. *baseAddress = sectionInfo->runtimeBaseAddress;
  564. return true;
  565. }
  566. LPVOID
  567. SectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  568. {
  569. SectionInfo * section = sections.GetSection(requestAddress);
  570. Assert(section);
  571. return AllocLocalView(section->handle, section->runtimeBaseAddress, requestAddress, dwSize);
  572. }
  573. BOOL SectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  574. {
  575. return FreeLocalView(lpAddress);
  576. }
  577. BOOL SectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  578. {
  579. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  580. Assert(this->process != GetCurrentProcess()); // only use sections when OOP
  581. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  582. {
  583. SectionInfo * section = sections.GetSection(lpAddress);
  584. Assert(section);
  585. Assert(section->runtimeBaseAddress == lpAddress);
  586. sections.ClearSection(lpAddress, (uint)(dwSize / AutoSystemInfo::PageSize));
  587. UnmapView(this->process, lpAddress);
  588. CloseSectionHandle(section->handle);
  589. }
  590. else
  591. {
  592. Assert((dwFreeType & MEM_DECOMMIT) == MEM_DECOMMIT);
  593. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  594. {
  595. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  596. if (localAddr == nullptr)
  597. {
  598. return FALSE;
  599. }
  600. CustomHeap::FillDebugBreak((BYTE*)localAddr, AutoSystemInfo::PageSize);
  601. FreeLocal(localAddr);
  602. }
  603. }
  604. return TRUE;
  605. }
  606. /*
  607. * class PreReservedVirtualAllocWrapper
  608. */
  609. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  610. // TODO: this should be on runtime process
  611. uint PreReservedSectionAllocWrapper::numPreReservedSegment = 0;
  612. #endif
  613. PreReservedSectionAllocWrapper::PreReservedSectionAllocWrapper(HANDLE process) :
  614. preReservedStartAddress(nullptr),
  615. process(process),
  616. cs(4000),
  617. section(nullptr)
  618. {
  619. Assert(process != GetCurrentProcess()); // only use sections when OOP
  620. freeSegments.SetAll();
  621. }
  622. PreReservedSectionAllocWrapper::~PreReservedSectionAllocWrapper()
  623. {
  624. if (IsPreReservedRegionPresent())
  625. {
  626. UnmapView(this->process, this->preReservedStartAddress);
  627. CloseSectionHandle(this->section);
  628. PreReservedHeapTrace(_u("MEM_RELEASE the PreReservedSegment. Start Address: 0x%p, Size: 0x%x * 0x%x bytes"), this->preReservedStartAddress, PreReservedAllocationSegmentCount,
  629. AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  630. #if !TARGET_64 && _CONTROL_FLOW_GUARD
  631. Assert(numPreReservedSegment > 0);
  632. InterlockedDecrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  633. #endif
  634. }
  635. }
  636. bool
  637. PreReservedSectionAllocWrapper::IsPreReservedRegionPresent()
  638. {
  639. return this->preReservedStartAddress != nullptr;
  640. }
  641. bool
  642. PreReservedSectionAllocWrapper::IsInRange(void * address)
  643. {
  644. if (!this->IsPreReservedRegionPresent())
  645. {
  646. return false;
  647. }
  648. bool isInRange = IsInRange(GetPreReservedStartAddress(), address);
  649. #if DBG
  650. if (isInRange)
  651. {
  652. MEMORY_BASIC_INFORMATION memBasicInfo;
  653. size_t bytes = VirtualQueryEx(this->process, address, &memBasicInfo, sizeof(memBasicInfo));
  654. Assert(bytes == 0 || (memBasicInfo.State == MEM_COMMIT && memBasicInfo.AllocationProtect == PAGE_EXECUTE_READ));
  655. }
  656. #endif
  657. return isInRange;
  658. }
  659. /* static */
  660. bool
  661. PreReservedSectionAllocWrapper::IsInRange(void * regionStart, void * address)
  662. {
  663. if (!regionStart)
  664. {
  665. return false;
  666. }
  667. if (address >= regionStart && address < GetPreReservedEndAddress(regionStart))
  668. {
  669. return true;
  670. }
  671. return false;
  672. }
  673. LPVOID
  674. PreReservedSectionAllocWrapper::GetPreReservedStartAddress()
  675. {
  676. return this->preReservedStartAddress;
  677. }
  678. LPVOID
  679. PreReservedSectionAllocWrapper::GetPreReservedEndAddress()
  680. {
  681. Assert(IsPreReservedRegionPresent());
  682. return GetPreReservedEndAddress(this->preReservedStartAddress);
  683. }
  684. /* static */
  685. LPVOID
  686. PreReservedSectionAllocWrapper::GetPreReservedEndAddress(void * regionStart)
  687. {
  688. return (char*)regionStart + (PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount() * AutoSystemInfo::PageSize);
  689. }
  690. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegion()
  691. {
  692. LPVOID startAddress = this->preReservedStartAddress;
  693. if (startAddress != nullptr)
  694. {
  695. return startAddress;
  696. }
  697. {
  698. AutoCriticalSection autocs(&this->cs);
  699. return EnsurePreReservedRegionInternal();
  700. }
  701. }
  702. LPVOID PreReservedSectionAllocWrapper::EnsurePreReservedRegionInternal()
  703. {
  704. LPVOID startAddress = this->preReservedStartAddress;
  705. if (startAddress != nullptr)
  706. {
  707. return startAddress;
  708. }
  709. size_t bytes = PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  710. if (PHASE_FORCE1(Js::PreReservedHeapAllocPhase))
  711. {
  712. HANDLE sectionHandle = CreateSection(bytes, false);
  713. if (sectionHandle == nullptr)
  714. {
  715. return nullptr;
  716. }
  717. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  718. if (startAddress == nullptr)
  719. {
  720. CloseSectionHandle(sectionHandle);
  721. return nullptr;
  722. }
  723. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Non-Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  724. this->preReservedStartAddress = startAddress;
  725. this->section = sectionHandle;
  726. return startAddress;
  727. }
  728. #if defined(_CONTROL_FLOW_GUARD)
  729. bool supportPreReservedRegion = true;
  730. #if TARGET_32
  731. #if _M_IX86
  732. // We want to restrict the number of prereserved segment for 32-bit process so that we don't use up the address space
  733. // Note: numPreReservedSegment is for the whole process, and access and update to it is not protected by a global lock.
  734. // So we may allocate more than the maximum some of the time if multiple thread check it simutaniously and allocate pass the limit.
  735. // It doesn't affect functionally, and it should be OK if we exceed.
  736. if (PreReservedSectionAllocWrapper::numPreReservedSegment > PreReservedSectionAllocWrapper::MaxPreReserveSegment)
  737. {
  738. supportPreReservedRegion = false;
  739. }
  740. #else
  741. // TODO: fast check for prereserved segment is not implementated in ARM yet, so it is only enabled for x86
  742. supportPreReservedRegion = false;
  743. #endif // _M_IX86
  744. #endif
  745. if (GlobalSecurityPolicy::IsCFGEnabled() && supportPreReservedRegion)
  746. {
  747. HANDLE sectionHandle = CreateSection(bytes, false);
  748. if (sectionHandle == nullptr)
  749. {
  750. return nullptr;
  751. }
  752. startAddress = MapView(this->process, sectionHandle, 0, 0, false);
  753. if (startAddress == nullptr)
  754. {
  755. CloseSectionHandle(sectionHandle);
  756. return nullptr;
  757. }
  758. PreReservedHeapTrace(_u("Reserving PreReservedSegment For the first time(CFG Enabled). Address: 0x%p\n"), this->preReservedStartAddress);
  759. this->preReservedStartAddress = startAddress;
  760. this->section = sectionHandle;
  761. #if TARGET_32
  762. if (startAddress)
  763. {
  764. InterlockedIncrement(&PreReservedSectionAllocWrapper::numPreReservedSegment);
  765. }
  766. #endif
  767. }
  768. #endif
  769. return startAddress;
  770. }
  771. LPVOID PreReservedSectionAllocWrapper::AllocPages(LPVOID lpAddress, DECLSPEC_GUARD_OVERFLOW size_t pageCount, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation)
  772. {
  773. if (pageCount > AutoSystemInfo::MaxPageCount)
  774. {
  775. return nullptr;
  776. }
  777. size_t dwSize = pageCount * AutoSystemInfo::PageSize;
  778. AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?");
  779. Assert(dwSize != 0);
  780. {
  781. AutoCriticalSection autocs(&this->cs);
  782. //Return nullptr, if no space to Reserve
  783. if (EnsurePreReservedRegionInternal() == nullptr)
  784. {
  785. PreReservedHeapTrace(_u("No space to pre-reserve memory with %d pages. Returning NULL\n"), PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount());
  786. return nullptr;
  787. }
  788. char * addressToReserve = nullptr;
  789. uint freeSegmentsBVIndex = BVInvalidIndex;
  790. size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  791. Assert(requestedNumOfSegments <= MAXUINT32);
  792. if (lpAddress == nullptr)
  793. {
  794. Assert(requestedNumOfSegments != 0);
  795. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "dwSize should be aligned with Allocation Granularity");
  796. do
  797. {
  798. freeSegmentsBVIndex = freeSegments.GetNextBit(freeSegmentsBVIndex + 1);
  799. //Return nullptr, if we don't have free/decommit pages to allocate
  800. if ((freeSegments.Length() - freeSegmentsBVIndex < requestedNumOfSegments) ||
  801. freeSegmentsBVIndex == BVInvalidIndex)
  802. {
  803. PreReservedHeapTrace(_u("No more space to commit in PreReserved Memory region.\n"));
  804. return nullptr;
  805. }
  806. } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)));
  807. uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  808. addressToReserve = (char*)this->preReservedStartAddress + offset;
  809. }
  810. else
  811. {
  812. //Check If the lpAddress is within the range of the preReserved Memory Region
  813. Assert(((char*)lpAddress) >= (char*)this->preReservedStartAddress || ((char*)lpAddress + dwSize) < GetPreReservedEndAddress());
  814. addressToReserve = (char*)lpAddress;
  815. freeSegmentsBVIndex = (uint)((addressToReserve - (char*)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  816. // pages could be filled with debugbreak
  817. // zero one page at a time to minimize working set impact while zeroing
  818. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  819. {
  820. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  821. if (localAddr == nullptr)
  822. {
  823. return nullptr;
  824. }
  825. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  826. FreeLocal(localAddr);
  827. }
  828. #if DBG
  829. uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  830. Assert(numOfSegments != 0);
  831. Assert(freeSegmentsBVIndex + numOfSegments - 1 < freeSegments.Length());
  832. Assert(!freeSegments.TestRange(freeSegmentsBVIndex, numOfSegments));
  833. #endif
  834. }
  835. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?");
  836. AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity");
  837. // Keep track of the committed pages within the preReserved Memory Region
  838. if (lpAddress == nullptr)
  839. {
  840. Assert(requestedNumOfSegments != 0);
  841. freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  842. }
  843. PreReservedHeapTrace(_u("MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n"), addressToReserve, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  844. return addressToReserve;
  845. }
  846. }
  847. BOOL
  848. PreReservedSectionAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType)
  849. {
  850. AutoCriticalSection autocs(&this->cs);
  851. if (dwSize == 0)
  852. {
  853. Assert(false);
  854. return FALSE;
  855. }
  856. if (this->preReservedStartAddress == nullptr)
  857. {
  858. Assert(false);
  859. return FALSE;
  860. }
  861. Assert(dwSize % AutoSystemInfo::PageSize == 0);
  862. // zero one page at a time to minimize working set impact while zeroing
  863. for (size_t i = 0; i < dwSize / AutoSystemInfo::PageSize; ++i)
  864. {
  865. LPVOID localAddr = AllocLocal((char*)lpAddress + i * AutoSystemInfo::PageSize, AutoSystemInfo::PageSize);
  866. if (localAddr == nullptr)
  867. {
  868. return FALSE;
  869. }
  870. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  871. {
  872. ZeroMemory(localAddr, AutoSystemInfo::PageSize);
  873. }
  874. else
  875. {
  876. CustomHeap::FillDebugBreak((BYTE*)localAddr, AutoSystemInfo::PageSize);
  877. }
  878. FreeLocal(localAddr);
  879. }
  880. size_t requestedNumOfSegments = dwSize / AutoSystemInfo::Data.GetAllocationGranularityPageSize();
  881. Assert(requestedNumOfSegments <= MAXUINT32);
  882. PreReservedHeapTrace(_u("MEM_DECOMMIT: Address: 0x%p of size: 0x%x bytes\n"), lpAddress, dwSize);
  883. if ((dwFreeType & MEM_RELEASE) == MEM_RELEASE)
  884. {
  885. Assert((uintptr_t)lpAddress >= (uintptr_t)this->preReservedStartAddress);
  886. AssertMsg(((uintptr_t)lpAddress & (AutoSystemInfo::Data.GetAllocationGranularityPageCount() - 1)) == 0, "Not aligned with Allocation Granularity?");
  887. AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "Release size should match the allocation granularity size");
  888. Assert(requestedNumOfSegments != 0);
  889. BVIndex freeSegmentsBVIndex = (BVIndex)(((uintptr_t)lpAddress - (uintptr_t)this->preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  890. AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid Index ?");
  891. freeSegments.SetRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments));
  892. PreReservedHeapTrace(_u("MEM_RELEASE: Address: 0x%p of size: 0x%x * 0x%x bytes\n"), lpAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize());
  893. UnlockMemory(this->process, lpAddress, dwSize);
  894. }
  895. return TRUE;
  896. }
  897. bool
  898. PreReservedSectionAllocWrapper::GetFileInfo(LPVOID address, HANDLE* fileHandle, PVOID* baseAddress)
  899. {
  900. if (!this->IsPreReservedRegionPresent())
  901. {
  902. return false;
  903. }
  904. *fileHandle = this->section;
  905. *baseAddress = this->preReservedStartAddress;
  906. return true;
  907. }
  908. LPVOID
  909. PreReservedSectionAllocWrapper::AllocLocal(LPVOID requestAddress, size_t dwSize)
  910. {
  911. return AllocLocalView(this->section, this->preReservedStartAddress, requestAddress, dwSize);
  912. }
  913. BOOL
  914. PreReservedSectionAllocWrapper::FreeLocal(LPVOID lpAddress)
  915. {
  916. return FreeLocalView(lpAddress);
  917. }
  918. } // namespace Memory
  919. #endif
  920. #endif