MarkStack.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #ifdef MARKSTACK_TEST
  6. #define MARKSTACK_DIAG 1
  7. #endif
  8. #define DEFAULT_CHUNK_SIZE 2 MEGABYTES // Reserve in 8 MB chunks
  9. #define DEFAULT_PAGE_SIZE 4 KILOBYTES
  10. #define DEFAULT_COMMIT_SIZE ((256 KILOBYTES) - DEFAULT_PAGE_SIZE) // Commit in 1 MB slices adjusted for the guard page
  11. namespace Helpers
  12. {
  13. void* CommitMemory(void* address, DWORD size, DWORD flags) throw();
  14. BOOL DecommitMemory(void* address, DWORD size) throw();
  15. void* ReserveMemory(DWORD size, DWORD flags) throw();
  16. BOOL ReleaseMemory(void* address, DWORD size) throw();
  17. };
  18. //
  19. // Definition of the MarkStack class
  20. // This is a guard-page based stack class that's used for the purposes of marking objects in the Recycler
  21. // By default, it reserves an 8 MB chunk of memory, and commits in 1 MB chunks.
  22. // At the end of 1 MB of memory is a guard page. When we've added 1 MB's worth of entries into the mark stack,
  23. // the stack is moved to the guard page. When we attempt to write to this entry, a trap is triggered.
  24. // At this point, we grow the stack. We then resume execution. Note that an entry is written to the start
  25. // of the guard page when the trap is initially triggered but the rest of the page is left blank.
  26. // When we pop the stack, we *do* do a bounds check. If the stack is not at the start, we simply move the stack back
  27. // and return the current value. If the stack is at the start, we then "pop" the chunk. When the chunk is popped,
  28. // the stack points to the start of the guard page.
  29. //
  30. // The stack consists of "Chunks", which is regions of memory that are initially reserved
  31. // When we need more of the chunk, we commit pages within a chunk. If there are no more pages to commit, we allocate a new chunk.
  32. // Chunks are linked through a doubly linked list whose pointers live on the start of the chunk
  33. //
  34. // By default, Chunks are 8MB, and commits are in 1MB ranges.
  35. // There is a preallocated chunk- this means that when the recycler is created, we reserve 8MB for the purpose of the mark stack
  36. // We commit 1MB of this memory up front (so it becomes part of the fixed cost of a Recycler instance)
  37. // The preallocated chunk memory remains reserved for the lifetime of the Recycler
  38. namespace MarkStack
  39. {
  40. struct Chunk
  41. {
  42. Chunk* next;
  43. Chunk* previous;
  44. };
  45. struct MarkCandidate
  46. {
  47. void ** obj;
  48. size_t byteCount;
  49. };
  50. template <uint ChunkSize, uint PageSize, uint CommitSize>
  51. class MarkStack
  52. {
  53. public:
  54. __forceinline MarkCandidate* Push(void** obj, size_t byteCount) throw()
  55. {
  56. // We first cache the current stack locally
  57. // We then increment the stack pointer- at this point it could be pointing to an address that is past the guard page
  58. // We then try to write the item to the cached location. If that's within the guard page, the official stack gets adjusted
  59. MarkCandidate* item = (MarkCandidate*)stack;
  60. stack = stack + sizeof(MarkCandidate);
  61. item->obj = obj;
  62. item->byteCount = byteCount;
  63. #if MARKSTACK_DIAG
  64. count++;
  65. #endif
  66. #if DBG
  67. if (this->chunkCount > 0)
  68. {
  69. for (Chunk* chunk = this->preAllocatedChunk; chunk->next != null; chunk = chunk->next)
  70. {
  71. if (chunk->next == null)
  72. {
  73. char* page = (((char*)chunk) + ChunkSize - PageSize);
  74. // Don't write to the old page when a new chunk is pushed.
  75. ::VirtualProtect((LPVOID)page, PageSize, PAGE_NOACCESS, NULL);
  76. }
  77. }
  78. }
  79. #endif
  80. return item;
  81. }
  82. MarkStack() throw();
  83. ~MarkStack() throw();
  84. void Clear() throw();
  85. bool Empty() throw()
  86. {
  87. return (stack == start && chunkTail == preAllocatedChunk);
  88. }
  89. __forceinline MarkCandidate* Pop() throw()
  90. {
  91. // TODO: Can we make this faster?
  92. // One option (at the expense of wasting another 4K/Chunk) is to use a guard page at the start of the chunk too
  93. // Then we can eliminate this bound-check
  94. // Currently, the fast case takes 5 instructions:
  95. // 2 to load stack and start
  96. // 1 to check if they're not equal
  97. // 1 to subtract the stack
  98. // 1 to reload it back into the register
  99. if (stack != start)
  100. {
  101. stack = stack - sizeof(MarkCandidate);
  102. #if MARKSTACK_DIAG
  103. count--;
  104. #endif
  105. return (MarkCandidate*)stack;
  106. }
  107. return SlowPop();
  108. }
  109. #if MARKSTACK_DIAG
  110. size_t Count() { return count; }
  111. #endif
  112. static int HandleException(LPEXCEPTION_POINTERS pEP, Recycler* recycler) throw();
  113. #ifndef MARKSTACK_TEST
  114. private:
  115. #endif
  116. static int HandleExceptionInternal(LPEXCEPTION_POINTERS pEP, MarkStack<ChunkSize, PageSize, CommitSize>* markStack) throw();
  117. char* start;
  118. char* stack;
  119. char* end;
  120. Chunk* preAllocatedChunk;
  121. Chunk* chunkTail;
  122. // We copy the stack contents here if we pop the chunk
  123. // and have to reset the guard page
  124. MarkCandidate cachedEnd;
  125. #if MARKSTACK_DIAG || defined(F_JSETW)
  126. unsigned int chunkCount;
  127. #endif
  128. private:
  129. bool GrowStack() throw();
  130. void FreeNonPreAllocatedChunks() throw();
  131. void CreatePreallocatedChunk() throw();
  132. void ResetPreAllocatedChunk() throw();
  133. __forceinline MarkCandidate* SlowPop() throw();
  134. // Chunk manipulation
  135. Chunk* ReserveMemoryForNewChunk() throw();
  136. void FreeChunk(Chunk* chunk) throw();
  137. char* CommitAndPushChunk(Chunk* chunk) throw();
  138. __forceinline Chunk* PopChunk() throw();
  139. char* CommitNextPage() throw();
  140. // Memory wrappers
  141. void* CommitMemory(void* address, DWORD size, DWORD flags) throw();
  142. BOOL DecommitMemory(void* address, DWORD size) throw();
  143. void* ReserveMemory(DWORD size, DWORD flags) throw();
  144. BOOL ReleaseMemory(void* address, DWORD size) throw();
  145. #if MARKSTACK_DIAG
  146. //
  147. // Debug structure to keep track of every call to VirtualAlloc with MEM_RESERVE
  148. //
  149. struct VirtualAllocationEntry
  150. {
  151. void* address;
  152. DWORD size;
  153. };
  154. typedef SList<VirtualAllocationEntry, HeapAllocator> VAEList;
  155. //
  156. // Find an allocation entry corresponding to the given address
  157. // This is fairly naive- we walk through the list of all entries and find one within
  158. // whose range this address lives
  159. //
  160. VAEList _allocations;
  161. VirtualAllocationEntry* CheckAllocationEntryExists(void* address)
  162. {
  163. VAEList::Iterator iterator(&this->_allocations);
  164. while (iterator.Next())
  165. {
  166. VirtualAllocationEntry& entry = iterator.Data();
  167. // Make sure we don't already have the new address isn't in range of existing allocations
  168. if (entry.address <= address && address < ((char*)entry.address) + entry.size)
  169. {
  170. return (&entry);
  171. }
  172. }
  173. return nullptr;
  174. }
  175. size_t count;
  176. size_t committedBytes;
  177. size_t reservedBytes;
  178. #endif
  179. void* lastGuardPage;
  180. Chunk staticChunk;
  181. };
  182. };
  183. typedef MarkStack::MarkStack<DEFAULT_CHUNK_SIZE, DEFAULT_PAGE_SIZE, DEFAULT_COMMIT_SIZE> RecyclerMarkStack;