RecyclerWriteBarrierManager.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "CommonMemoryPch.h"
  6. // Initialization order
  7. // AB AutoSystemInfo
  8. // AD PerfCounter
  9. // AE PerfCounterSet
  10. // AM Output/Configuration
  11. // AN MemProtectHeap
  12. // AP DbgHelpSymbolManager
  13. // AQ CFGLogger
  14. // AR LeakReport
  15. // AS JavascriptDispatch/RecyclerObjectDumper
  16. // AT HeapAllocator/RecyclerHeuristic
  17. // AU RecyclerWriteBarrierManager
  18. #pragma warning(disable:4075) // initializers put in unrecognized initialization area on purpose
  19. #pragma init_seg(".CRT$XCAU")
  20. #ifdef RECYCLER_WRITE_BARRIER
  21. #if ENABLE_DEBUG_CONFIG_OPTIONS
  22. namespace Memory
  23. {
  24. FN_VerifyIsNotBarrierAddress* g_verifyIsNotBarrierAddress = nullptr;
  25. }
  26. #endif
  27. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  28. #ifdef _M_X64_OR_ARM64
  29. X64WriteBarrierCardTableManager RecyclerWriteBarrierManager::x64CardTableManager;
  30. X64WriteBarrierCardTableManager::CommittedSectionBitVector X64WriteBarrierCardTableManager::committedSections(&HeapAllocator::Instance);
  31. BYTE* RecyclerWriteBarrierManager::cardTable = RecyclerWriteBarrierManager::x64CardTableManager.Initialize();
  32. #else
  33. // Each byte in the card table covers 4096 bytes so the range covered by the table is 4GB
  34. BYTE RecyclerWriteBarrierManager::cardTable[1 * 1024 * 1024];
  35. #if ENABLE_DEBUG_CONFIG_OPTIONS
  36. bool dummy = RecyclerWriteBarrierManager::Initialize();
  37. #endif
  38. #endif
  39. #else
  40. // Each *bit* in the card table covers 128 bytes. So each DWORD covers 4096 bytes and therefore the cardTable covers 4GB
  41. DWORD RecyclerWriteBarrierManager::cardTable[1 * 1024 * 1024];
  42. #endif
  43. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  44. #ifdef _M_X64_OR_ARM64
  45. bool
  46. X64WriteBarrierCardTableManager::OnThreadInit()
  47. {
  48. // We page in the card table sections for the current threads stack reservation
  49. // So any writes to stack allocated vars can also have the write barrier set
  50. // xplat-todo: Replace this on Windows too with GetCurrentThreadStackBounds
  51. #ifdef _WIN32
  52. // check StackProber.cpp for the stack pages layout information
  53. NT_TIB* teb = (NT_TIB*) ::NtCurrentTeb();
  54. char* stackBase = (char*) teb->StackBase;
  55. char* stackEnd = (char*)__readgsqword(0x1478); // 0x1478 is offset of DeallocationStack field on ntdll!_TEB on x64
  56. // this is undocumented, verifying with following code
  57. #if DBG
  58. MEMORY_BASIC_INFORMATION memInfo;
  59. VirtualQuery((LPCVOID)teb->StackLimit, &memInfo, sizeof(memInfo));
  60. Assert((char*)memInfo.AllocationBase == stackEnd);
  61. Assert(memInfo.AllocationProtect == PAGE_READWRITE);
  62. #endif
  63. #else
  64. ULONG_PTR stackBase = 0;
  65. ULONG_PTR stackEnd = 0;
  66. ::GetCurrentThreadStackLimits(&stackEnd, &stackBase);
  67. #endif
  68. #ifdef X64_WB_DIAG
  69. this->_stackbase = (char*)stackBase;
  70. this->_stacklimit = (char*)stackEnd;
  71. #endif
  72. size_t numPages = (stackBase - stackEnd) / AutoSystemInfo::PageSize;
  73. // stackEnd is the lower boundary
  74. bool ret = OnSegmentAlloc((char*) stackEnd, numPages);
  75. #if ENABLE_DEBUG_CONFIG_OPTIONS
  76. RecyclerWriteBarrierManager::ToggleBarrier((char*)stackEnd, (stackBase - stackEnd), true);
  77. #endif
  78. return ret;
  79. }
  80. bool
  81. X64WriteBarrierCardTableManager::OnSegmentAlloc(_In_ char* segmentAddress, size_t numPages)
  82. {
  83. Assert(_cardTable);
  84. SetCommitState(OnSegmentAlloc);
  85. if (segmentAddress >= AutoSystemInfo::Data.lpMaximumApplicationAddress)
  86. {
  87. Assert(false); // How did this happen?
  88. SetCommitState(FailedMaxAddressExceeded);
  89. Js::Throw::FatalInternalError();
  90. }
  91. AutoCriticalSection critSec(&_cardTableInitCriticalSection);
  92. size_t pageSize = AutoSystemInfo::PageSize;
  93. // First, check if the pages for this segment have already been committed
  94. // If they have, there is nothing for us to do here.
  95. void* segmentEndAddress = segmentAddress + (numPages * pageSize);
  96. void* segmentLastWritableAddress = (char*)segmentEndAddress - 1;
  97. BVIndex sectionStartIndex = GetSectionIndex(segmentAddress);
  98. BVIndex sectionLastIndex = GetSectionIndex(segmentLastWritableAddress);
  99. #ifdef X64_WB_DIAG
  100. this->_lastSegmentAddress = segmentAddress;
  101. this->_lastSegmentNumPages = numPages;
  102. this->_lastSectionIndexStart = sectionStartIndex;
  103. this->_lastSectionIndexLast = sectionLastIndex;
  104. #endif
  105. bool needCommit = false;
  106. for (BVIndex i = sectionStartIndex; i <= sectionLastIndex; i++)
  107. {
  108. if (!committedSections.Test(i))
  109. {
  110. needCommit = true;
  111. break;
  112. }
  113. }
  114. if (!needCommit)
  115. {
  116. // The pages for this segment have already been committed.
  117. // We don't need to do anything more, since write barriers can
  118. // already be set for writes to this segment
  119. return true;
  120. }
  121. SetCommitState(OnNeedCommit);
  122. // There are uncommitted pages in this range. We'll commit the full range
  123. // We might commit some pages that are already committed but that's okay
  124. const uintptr_t startIndex = RecyclerWriteBarrierManager::GetCardTableIndex(segmentAddress);
  125. const uintptr_t endIndex = RecyclerWriteBarrierManager::GetCardTableIndex(segmentEndAddress);
  126. Assert(startIndex <= endIndex);
  127. // Section Start is the card table's starting entry aligned *down* to the page boundary
  128. // Section End is the card table's ending entry aligned *up* to the page boundary
  129. BYTE* sectionStart = (BYTE*) (((uintptr_t) &_cardTable[startIndex]) & ~(pageSize - 1));
  130. BYTE* sectionEnd = (BYTE*) Math::Align<uintptr_t>((uintptr_t)&_cardTable[endIndex], pageSize);
  131. size_t commitSize = (sectionEnd - sectionStart);
  132. #ifdef X64_WB_DIAG
  133. _lastSectionStart = sectionStart;
  134. _lastSectionEnd = sectionEnd;
  135. #endif
  136. Assert(commitSize > 0);
  137. Assert(commitSize % pageSize == 0);
  138. Assert(commitSize / pageSize == sectionLastIndex - sectionStartIndex + 1);
  139. LPVOID ret = ::VirtualAlloc((LPVOID) sectionStart, commitSize, MEM_COMMIT, PAGE_READWRITE);
  140. if (!ret)
  141. {
  142. // If this is the error that occurred while trying to commit the page, this likely means
  143. // that the page we tried to commit is outside out reservation, which means that our reservation
  144. // was too small. This can happen if Windows increases the maximum process address space size
  145. // If this happens, X64WriteBarrierCardTableManager::Initialize will have to be updated
  146. Assert(::GetLastError() != ERROR_INVALID_ADDRESS);
  147. SetCommitState(FailedVirtualAlloc);
  148. return false;
  149. }
  150. SetCommitState(OnSectionCommitted);
  151. BVIndex sectionIndex = sectionStartIndex;
  152. try
  153. {
  154. #ifdef EXCEPTION_CHECK
  155. AUTO_NESTED_HANDLED_EXCEPTION_TYPE(ExceptionType_DisableCheck);
  156. #endif
  157. for (; sectionIndex <= sectionLastIndex; sectionIndex++)
  158. {
  159. committedSections.Set(sectionIndex);
  160. }
  161. SetCommitState(OnCommitBitSet);
  162. }
  163. catch (Js::OutOfMemoryException)
  164. {
  165. SetCommitState(FailedCommitBitSet);
  166. // We ran out of memory allocating a node for the sparse bit vector, so clean up
  167. // and return false
  168. // Since setting sectionIndex threw the exception, we don't clear it, we clear until the index before it
  169. for (BVIndex i = sectionStartIndex; i < sectionIndex; i++)
  170. {
  171. BOOLEAN wasSet = committedSections.TestAndClear(i);
  172. Assert(wasSet == TRUE);
  173. }
  174. #pragma prefast(suppress:6250, "This method decommits memory")
  175. BOOL result = ::VirtualFree((LPVOID)sectionStart, commitSize, MEM_DECOMMIT);
  176. Assert(result != 0);
  177. return false;
  178. }
  179. return true;
  180. }
  181. bool
  182. X64WriteBarrierCardTableManager::OnSegmentFree(_In_ char* segmentAddress, size_t numPages)
  183. {
  184. Assert(_cardTable);
  185. return true;
  186. }
  187. X64WriteBarrierCardTableManager::~X64WriteBarrierCardTableManager()
  188. {
  189. if (_cardTable != nullptr)
  190. {
  191. BOOL fSuccess = ::VirtualFree(_cardTable, 0, MEM_RELEASE);
  192. Assert(fSuccess == TRUE);
  193. }
  194. }
  195. BVIndex
  196. X64WriteBarrierCardTableManager::GetSectionIndex(void* address)
  197. {
  198. size_t pageSize = AutoSystemInfo::PageSize;
  199. size_t sectionSize = (pageSize * pageSize);
  200. BVIndex sectionIndex = (BVIndex)(((uintptr_t)address) / sectionSize);
  201. return sectionIndex;
  202. }
  203. BYTE *
  204. X64WriteBarrierCardTableManager::Initialize()
  205. {
  206. AutoCriticalSection critSec(&_cardTableInitCriticalSection);
  207. #if ENABLE_DEBUG_CONFIG_OPTIONS
  208. RecyclerWriteBarrierManager::Initialize();
  209. #endif
  210. if (_cardTable == nullptr)
  211. {
  212. // We have two sizes for the card table on 64 bit builds
  213. // On Win8.1 and later, the process address space size is 128 TB, so we reserve 32 GB for the card table
  214. // On Win7, the max address space size is 192 GB, so we reserve 48 MB for the card table.
  215. // On Win8, reserving 32 GB is fine since reservations don't incur a cost. On Win7, the cost
  216. // of a reservation can be approximated as 2KB per MB of reserved size. In our case, we take
  217. // an overhead of 96KB for our card table.
  218. #if defined(ENABLE_VALGRIND)
  219. // this will fail (cardTable) due to stack ptr > 32GB
  220. #error "Not supported. Disable concurrent GC and try again"
  221. #endif
  222. // xplat: GetRLimit AS / RSS for ``the maximum size of the process's virtual memory``
  223. size_t memoryLimit;
  224. if (!PlatformAgnostic::SystemInfo::GetMaxVirtualMemory(&memoryLimit))
  225. {
  226. memoryLimit = (size_t) AutoSystemInfo::Data.lpMaximumApplicationAddress; // try upper limit
  227. }
  228. else
  229. {
  230. // Safest option : Max RSS can be beyond what we can allocate, aim the smaller one
  231. memoryLimit = min(memoryLimit, (size_t) AutoSystemInfo::Data.lpMaximumApplicationAddress);
  232. }
  233. const unsigned __int64 maxUmProcessAddressSpace = (__int64) memoryLimit;
  234. _cardTableNumEntries = Math::Align<size_t>(maxUmProcessAddressSpace / AutoSystemInfo::PageSize,
  235. AutoSystemInfo::PageSize) /* s_writeBarrierPageSize */;
  236. LPVOID cardTableSpace = ::VirtualAlloc(NULL, _cardTableNumEntries, MEM_RESERVE, PAGE_READWRITE);
  237. if (!cardTableSpace) // Crash Early with a meaningful message. Otherwise the behavior is undefined.
  238. {
  239. fprintf(stderr, "Out of Memory\n"); fflush(stderr);
  240. abort();
  241. }
  242. _cardTable = (BYTE*) cardTableSpace;
  243. }
  244. OnThreadInit();
  245. return _cardTable;
  246. }
  247. bool
  248. RecyclerWriteBarrierManager::OnThreadInit()
  249. {
  250. return x64CardTableManager.OnThreadInit();
  251. }
  252. bool
  253. RecyclerWriteBarrierManager::OnSegmentAlloc(_In_ char* segmentAddress, size_t numPages)
  254. {
  255. return x64CardTableManager.OnSegmentAlloc(segmentAddress, numPages);
  256. }
  257. bool
  258. RecyclerWriteBarrierManager::OnSegmentFree(_In_ char* segmentAddress, size_t numPages)
  259. {
  260. return x64CardTableManager.OnSegmentFree(segmentAddress, numPages);
  261. }
  262. #endif
  263. #else
  264. #error Not implemented for bit-array card table
  265. #endif
  266. void
  267. RecyclerWriteBarrierManager::WriteBarrier(void * address)
  268. {
  269. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  270. #if ENABLE_DEBUG_CONFIG_OPTIONS
  271. VerifyIsBarrierAddress(address);
  272. #endif
  273. const uintptr_t index = GetCardTableIndex(address);
  274. cardTable[index] |= DIRTYBIT;
  275. #else
  276. uint bitShift = (((uint)address) >> s_BitArrayCardTableShift);
  277. uint bitMask = 1 << bitShift;
  278. const uint cardIndex = ((uint) address) / (s_BytesPerCard);
  279. cardTable[cardIndex] |= bitMask;
  280. #endif
  281. #if DBG_DUMP
  282. // Global to process, use global configuration here
  283. if (PHASE_VERBOSE_TRACE1(Js::SWBPhase))
  284. {
  285. Output::Print(_u("Writing to 0x%p (CIndex: %u)\n"), address, index);
  286. }
  287. #endif
  288. }
  289. void
  290. RecyclerWriteBarrierManager::WriteBarrier(void * address, size_t bytes)
  291. {
  292. #if ENABLE_DEBUG_CONFIG_OPTIONS
  293. VerifyIsBarrierAddress(address, bytes);
  294. #endif
  295. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  296. uintptr_t startIndex = GetCardTableIndex(address);
  297. char * endAddress = (char *)Math::Align<INT_PTR>((INT_PTR)((char *)address + bytes), s_WriteBarrierPageSize);
  298. uintptr_t endIndex = GetCardTableIndex(endAddress);
  299. Assert(startIndex <= endIndex);
  300. memset(cardTable + startIndex, WRITE_BARRIER_PAGE_BIT | DIRTYBIT, endIndex - startIndex);
  301. GlobalSwbVerboseTrace(_u("Writing to 0x%p (CIndex: %u-%u)\n"), address, startIndex, endIndex);
  302. #else
  303. uint bitShift = (((uint)address) >> s_BitArrayCardTableShift);
  304. uint bitMask = 0xFFFFFFFF << bitShift;
  305. uint cardIndex = ((uint)address) / s_BytesPerCard);
  306. char * endAddress = (char *)Math::Align((INT_PTR)((char *)address + bytes), s_BytesPerCardBit);
  307. char * alignedAddress = (char *)Math::Align((INT_PTR)address, s_WriteBarrierPageSize);
  308. if (alignedAddress > endAddress)
  309. {
  310. uint endAddressShift = (((uint)endAddress) >> s_BitArrayCardTableShift);
  311. uint endAddressBitMask = 0xFFFFFFFF << endAddressShift;
  312. bitMask &= ~endAddressBitMask;
  313. cardTable[cardIndex] |= bitMask;
  314. return;
  315. }
  316. cardTable[cardIndex] |= bitMask;
  317. size_t remainingBytes = endAddress - alignedAddress;
  318. size_t fullMaskCount = remainingBytes / g_WriteBarrierPageSize;
  319. memset(&cardTable[cardIndex + 1], 0xFFFFFFFF, fullMaskCount * sizeof(DWORD));
  320. uint endAddressShift = (((uint)endAddress) >> s_BitArrayCardTableShift);
  321. uint endAddressBitMask = 0xFFFFFFFF << endAddressShift;
  322. cardTable[cardIndex + 1 + fullMaskCount] |= ~endAddressBitMask;
  323. #endif
  324. }
  325. #if ENABLE_DEBUG_CONFIG_OPTIONS
  326. void
  327. RecyclerWriteBarrierManager::ToggleBarrier(void * address, size_t bytes, bool enable)
  328. {
  329. if (CONFIG_FLAG(StrictWriteBarrierCheck))
  330. {
  331. uintptr_t startIndex = GetCardTableIndex(address);
  332. char * endAddress = (char *)Math::Align<INT_PTR>((INT_PTR)((char *)address + bytes), s_WriteBarrierPageSize);
  333. uintptr_t endIndex = GetCardTableIndex(endAddress);
  334. if (enable)
  335. {
  336. for (uintptr_t i = startIndex; i < endIndex; i++)
  337. {
  338. cardTable[i] |= WRITE_BARRIER_PAGE_BIT;
  339. }
  340. }
  341. else
  342. {
  343. for (uintptr_t i = startIndex; i < endIndex; i++)
  344. {
  345. cardTable[i] &= ~WRITE_BARRIER_PAGE_BIT;
  346. }
  347. }
  348. GlobalSwbVerboseTrace(_u("Enableing 0x%p (CIndex: %u-%u)\n"), address, startIndex, endIndex);
  349. }
  350. }
  351. bool
  352. RecyclerWriteBarrierManager::IsBarrierAddress(void * address)
  353. {
  354. return IsBarrierAddress(GetCardTableIndex(address));
  355. }
  356. bool
  357. RecyclerWriteBarrierManager::IsBarrierAddress(uintptr_t index)
  358. {
  359. return (cardTable[index] & WRITE_BARRIER_PAGE_BIT) == WRITE_BARRIER_PAGE_BIT;
  360. }
  361. // TODO: SWB, looks we didn't initialize card table for heap allocation.
  362. // we didn't hit such issue because we are not allocating write barrier
  363. // annotated struct with heap today.
  364. // after SWB is widely enabled and if an annotated structure can be allocated
  365. // with both Heap and Recycler/Arena we'll capture the issue
  366. void
  367. RecyclerWriteBarrierManager::VerifyIsBarrierAddress(void * address)
  368. {
  369. if (CONFIG_FLAG(StrictWriteBarrierCheck))
  370. {
  371. if (!IsBarrierAddress(GetCardTableIndex(address)))
  372. {
  373. Js::Throw::FatalInternalError();
  374. }
  375. }
  376. }
  377. void
  378. RecyclerWriteBarrierManager::VerifyIsBarrierAddress(void * address, size_t bytes)
  379. {
  380. if (CONFIG_FLAG(StrictWriteBarrierCheck))
  381. {
  382. uintptr_t startIndex = GetCardTableIndex(address);
  383. char * endAddress = (char *)Math::Align<INT_PTR>((INT_PTR)((char *)address + bytes), s_WriteBarrierPageSize);
  384. uintptr_t endIndex = GetCardTableIndex(endAddress);
  385. do
  386. {
  387. // no need to check if cardTable is commited or not, if it's not commited it'll AV instead of assertion
  388. if (!IsBarrierAddress(startIndex))
  389. {
  390. Js::Throw::FatalInternalError();
  391. }
  392. } while (startIndex++ < endIndex);
  393. }
  394. }
  395. void
  396. RecyclerWriteBarrierManager::VerifyIsNotBarrierAddress(void * address, size_t bytes)
  397. {
  398. if (CONFIG_FLAG(StrictWriteBarrierCheck))
  399. {
  400. uintptr_t startIndex = GetCardTableIndex(address);
  401. char * endAddress = (char *)Math::Align<INT_PTR>((INT_PTR)((char *)address + bytes), s_WriteBarrierPageSize);
  402. uintptr_t endIndex = GetCardTableIndex(endAddress);
  403. do
  404. {
  405. if(IsCardTableCommited(startIndex))
  406. {
  407. if (IsBarrierAddress(startIndex))
  408. {
  409. Js::Throw::FatalInternalError();
  410. }
  411. }
  412. } while (++startIndex < endIndex);
  413. }
  414. }
  415. bool
  416. RecyclerWriteBarrierManager::Initialize()
  417. {
  418. g_verifyIsNotBarrierAddress = RecyclerWriteBarrierManager::VerifyIsNotBarrierAddress;
  419. return true;
  420. }
  421. #endif
  422. uintptr_t
  423. RecyclerWriteBarrierManager::GetCardTableIndex(void *address)
  424. {
  425. return ((uintptr_t)address) / s_BytesPerCard;
  426. }
  427. void
  428. RecyclerWriteBarrierManager::ResetWriteBarrier(void * address, size_t pageCount)
  429. {
  430. uintptr_t cardIndex = GetCardTableIndex(address);
  431. #if DBG
  432. for (size_t i = 0; i < pageCount; i++)
  433. {
  434. if (cardTable[cardIndex + i] & DIRTYBIT)
  435. {
  436. cardTable[cardIndex + i] = WRITE_BARRIER_CLEAR_MARK | (cardTable[cardIndex + i] & ~DIRTYBIT);
  437. }
  438. }
  439. #else
  440. if (pageCount == 1)
  441. {
  442. cardTable[cardIndex] = WRITE_BARRIER_PAGE_BIT;
  443. }
  444. else
  445. {
  446. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  447. memset(&cardTable[cardIndex], WRITE_BARRIER_PAGE_BIT, pageCount);
  448. #else
  449. memset(&cardTable[cardIndex], 0, sizeof(DWORD) * pageCount);
  450. #endif
  451. }
  452. #endif
  453. #if DBG_DUMP
  454. // Global to process, use global configuration here
  455. if (PHASE_VERBOSE_TRACE1(Js::SWBPhase))
  456. {
  457. Output::Print(_u("Resetting %u pages at CIndex: %u\n"), address, pageCount, cardIndex);
  458. }
  459. #endif
  460. }
  461. #ifdef RECYCLER_WRITE_BARRIER_BYTE
  462. BYTE
  463. #else
  464. DWORD
  465. #endif
  466. RecyclerWriteBarrierManager::GetWriteBarrier(void * address)
  467. {
  468. // TODO: SWB remove after all write barrier annotation, this is in order to test the recycler change
  469. if (CONFIG_FLAG(WriteBarrierTest))
  470. {
  471. return WRITE_BARRIER_PAGE_BIT | DIRTYBIT;
  472. }
  473. else
  474. {
  475. return cardTable[GetCardTableIndex(address)];
  476. }
  477. }
  478. #endif