Encoder.cpp 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "BackEnd.h"
  6. ///----------------------------------------------------------------------------
  7. ///
  8. /// Encoder::Encode
  9. ///
  10. /// Main entrypoint of encoder. Encode each IR instruction into the
  11. /// appropriate machine encoding.
  12. ///
  13. ///----------------------------------------------------------------------------
  14. void
  15. Encoder::Encode()
  16. {
  17. NoRecoverMemoryArenaAllocator localAlloc(L"BE-Encoder", m_func->m_alloc->GetPageAllocator(), Js::Throw::OutOfMemory);
  18. m_tempAlloc = &localAlloc;
  19. uint32 instrCount = m_func->GetInstrCount();
  20. size_t totalJmpTableSizeInBytes = 0;
  21. JmpTableList * jumpTableListForSwitchStatement = nullptr;
  22. m_encoderMD.Init(this);
  23. m_encodeBufferSize = UInt32Math::Mul(instrCount, MachMaxInstrSize);
  24. m_encodeBufferSize += m_func->m_totalJumpTableSizeInBytesForSwitchStatements;
  25. m_encodeBuffer = AnewArray(m_tempAlloc, BYTE, m_encodeBufferSize);
  26. #if DBG_DUMP
  27. m_instrNumber = 0;
  28. m_offsetBuffer = AnewArray(m_tempAlloc, uint, instrCount);
  29. #endif
  30. m_pragmaInstrToRecordMap = Anew(m_tempAlloc, PragmaInstrList, m_tempAlloc);
  31. if (DoTrackAllStatementBoundary())
  32. {
  33. // Create a new list, if we are tracking all statement boundaries.
  34. m_pragmaInstrToRecordOffset = Anew(m_tempAlloc, PragmaInstrList, m_tempAlloc);
  35. }
  36. else
  37. {
  38. // Set the list to the same as the throw map list, so that processing of the list
  39. // of pragma are done on those only.
  40. m_pragmaInstrToRecordOffset = m_pragmaInstrToRecordMap;
  41. }
  42. #if defined(_M_IX86) || defined(_M_X64)
  43. // for BR shortening
  44. m_inlineeFrameRecords = Anew(m_tempAlloc, InlineeFrameRecords, m_tempAlloc);
  45. #endif
  46. m_pc = m_encodeBuffer;
  47. m_inlineeFrameMap = Anew(m_tempAlloc, InlineeFrameMap, m_tempAlloc);
  48. m_bailoutRecordMap = Anew(m_tempAlloc, BailoutRecordMap, m_tempAlloc);
  49. CodeGenWorkItem* workItem = m_func->m_workItem;
  50. uint loopNum = Js::LoopHeader::NoLoop;
  51. if (workItem->Type() == JsLoopBodyWorkItemType)
  52. {
  53. loopNum = ((JsLoopBodyCodeGen*)workItem)->GetLoopNumber();
  54. }
  55. Js::SmallSpanSequenceIter iter;
  56. IR::PragmaInstr* pragmaInstr = nullptr;
  57. uint32 pragmaOffsetInBuffer = 0;
  58. #ifdef _M_X64
  59. bool inProlog = false;
  60. #endif
  61. bool isCallInstr = false;
  62. FOREACH_INSTR_IN_FUNC(instr, m_func)
  63. {
  64. Assert(Lowerer::ValidOpcodeAfterLower(instr, m_func));
  65. if (GetCurrentOffset() + MachMaxInstrSize < m_encodeBufferSize)
  66. {
  67. ptrdiff_t count;
  68. #if DBG_DUMP
  69. AssertMsg(m_instrNumber < instrCount, "Bad instr count?");
  70. __analysis_assume(m_instrNumber < instrCount);
  71. m_offsetBuffer[m_instrNumber++] = GetCurrentOffset();
  72. #endif
  73. if (instr->IsPragmaInstr())
  74. {
  75. switch(instr->m_opcode)
  76. {
  77. #ifdef _M_X64
  78. case Js::OpCode::PrologStart:
  79. inProlog = true;
  80. continue;
  81. case Js::OpCode::PrologEnd:
  82. inProlog = false;
  83. continue;
  84. #endif
  85. case Js::OpCode::StatementBoundary:
  86. pragmaOffsetInBuffer = GetCurrentOffset();
  87. pragmaInstr = instr->AsPragmaInstr();
  88. pragmaInstr->m_offsetInBuffer = pragmaOffsetInBuffer;
  89. // will record after BR shortening with adjusted offsets
  90. if (DoTrackAllStatementBoundary())
  91. {
  92. m_pragmaInstrToRecordOffset->Add(pragmaInstr);
  93. }
  94. break;
  95. default:
  96. continue;
  97. }
  98. }
  99. else if (instr->IsBranchInstr() && instr->AsBranchInstr()->IsMultiBranch())
  100. {
  101. Assert(instr->GetSrc1() && instr->GetSrc1()->IsRegOpnd());
  102. IR::MultiBranchInstr * multiBranchInstr = instr->AsBranchInstr()->AsMultiBrInstr();
  103. if (multiBranchInstr->m_isSwitchBr &&
  104. (multiBranchInstr->m_kind == IR::MultiBranchInstr::IntJumpTable || multiBranchInstr->m_kind == IR::MultiBranchInstr::SingleCharStrJumpTable))
  105. {
  106. BranchJumpTableWrapper * branchJumpTableWrapper = multiBranchInstr->GetBranchJumpTable();
  107. if (jumpTableListForSwitchStatement == nullptr)
  108. {
  109. jumpTableListForSwitchStatement = Anew(m_tempAlloc, JmpTableList, m_tempAlloc);
  110. }
  111. jumpTableListForSwitchStatement->Add(branchJumpTableWrapper);
  112. totalJmpTableSizeInBytes += (branchJumpTableWrapper->tableSize * sizeof(void*));
  113. }
  114. else
  115. {
  116. //Reloc Records
  117. EncoderMD * encoderMD = &(this->m_encoderMD);
  118. multiBranchInstr->MapMultiBrTargetByAddress([=](void ** offset) -> void
  119. {
  120. #if defined(_M_ARM32_OR_ARM64)
  121. encoderMD->AddLabelReloc((byte*) offset);
  122. #else
  123. encoderMD->AppendRelocEntry(RelocTypeLabelUse, (void*) (offset));
  124. #endif
  125. });
  126. }
  127. }
  128. else
  129. {
  130. isCallInstr = LowererMD::IsCall(instr);
  131. if (pragmaInstr && (instr->isInlineeEntryInstr || isCallInstr))
  132. {
  133. // will record throw map after BR shortening with adjusted offsets
  134. m_pragmaInstrToRecordMap->Add(pragmaInstr);
  135. pragmaInstr = nullptr; // Only once per pragma instr -- do we need to make this record?
  136. }
  137. if (instr->HasBailOutInfo())
  138. {
  139. Assert(this->m_func->hasBailout);
  140. Assert(LowererMD::IsCall(instr));
  141. instr->GetBailOutInfo()->FinalizeBailOutRecord(this->m_func);
  142. }
  143. if (instr->isInlineeEntryInstr)
  144. {
  145. m_encoderMD.EncodeInlineeCallInfo(instr, GetCurrentOffset());
  146. }
  147. if (instr->m_opcode == Js::OpCode::InlineeStart)
  148. {
  149. Func* inlinee = instr->m_func;
  150. if (inlinee->frameInfo && inlinee->frameInfo->record)
  151. {
  152. inlinee->frameInfo->record->Finalize(inlinee, GetCurrentOffset());
  153. #if defined(_M_IX86) || defined(_M_X64)
  154. // Store all records to be adjusted for BR shortening
  155. m_inlineeFrameRecords->Add(inlinee->frameInfo->record);
  156. #endif
  157. }
  158. continue;
  159. }
  160. }
  161. count = m_encoderMD.Encode(instr, m_pc, m_encodeBuffer);
  162. #if DBG_DUMP
  163. if (PHASE_TRACE(Js::EncoderPhase, this->m_func))
  164. {
  165. instr->Dump((IRDumpFlags)(IRDumpFlags_SimpleForm | IRDumpFlags_SkipEndLine | IRDumpFlags_SkipByteCodeOffset));
  166. Output::SkipToColumn(80);
  167. for (BYTE * current = m_pc; current < m_pc + count; current++)
  168. {
  169. Output::Print(L"%02X ", *current);
  170. }
  171. Output::Print(L"\n");
  172. Output::Flush();
  173. }
  174. #endif
  175. #ifdef _M_X64
  176. if (inProlog)
  177. m_func->m_prologEncoder.EncodeInstr(instr, count & 0xFF);
  178. #endif
  179. m_pc += count;
  180. #if defined(_M_IX86) || defined(_M_X64)
  181. // for BR shortening.
  182. if (instr->isInlineeEntryInstr)
  183. m_encoderMD.AppendRelocEntry(RelocType::RelocTypeInlineeEntryOffset, (void*) (m_pc - MachPtr));
  184. #endif
  185. if (isCallInstr)
  186. {
  187. isCallInstr = false;
  188. this->RecordInlineeFrame(instr->m_func, GetCurrentOffset());
  189. }
  190. if (instr->HasBailOutInfo() && Lowerer::DoLazyBailout(this->m_func))
  191. {
  192. this->RecordBailout(instr, (uint32)(m_pc - m_encodeBuffer));
  193. }
  194. }
  195. else
  196. {
  197. Fatal();
  198. }
  199. } NEXT_INSTR_IN_FUNC;
  200. ptrdiff_t codeSize = m_pc - m_encodeBuffer + totalJmpTableSizeInBytes;
  201. #if defined(_M_IX86) || defined(_M_X64)
  202. BOOL isSuccessBrShortAndLoopAlign = false;
  203. // Shorten branches. ON by default
  204. if (!PHASE_OFF(Js::BrShortenPhase, m_func))
  205. {
  206. isSuccessBrShortAndLoopAlign = ShortenBranchesAndLabelAlign(&m_encodeBuffer, &codeSize);
  207. }
  208. #endif
  209. #if DBG_DUMP | defined(VTUNE_PROFILING)
  210. if (this->m_func->DoRecordNativeMap())
  211. {
  212. // Record PragmaInstr offsets and throw maps
  213. for (int32 i = 0; i < m_pragmaInstrToRecordOffset->Count(); i++)
  214. {
  215. IR::PragmaInstr *inst = m_pragmaInstrToRecordOffset->Item(i);
  216. inst->Record(inst->m_offsetInBuffer);
  217. }
  218. }
  219. #endif
  220. for (int32 i = 0; i < m_pragmaInstrToRecordMap->Count(); i ++)
  221. {
  222. IR::PragmaInstr *inst = m_pragmaInstrToRecordMap->Item(i);
  223. inst->RecordThrowMap(iter, inst->m_offsetInBuffer);
  224. }
  225. BEGIN_CODEGEN_PHASE(m_func, Js::EmitterPhase);
  226. // Copy to permanent buffer.
  227. Assert(Math::FitsInDWord(codeSize));
  228. ushort xdataSize;
  229. ushort pdataCount;
  230. #ifdef _M_X64
  231. pdataCount = 1;
  232. xdataSize = (ushort)m_func->m_prologEncoder.SizeOfUnwindInfo();
  233. #elif _M_ARM
  234. pdataCount = (ushort)m_func->m_unwindInfo.GetPDataCount(codeSize);
  235. xdataSize = (UnwindInfoManager::MaxXdataBytes + 3) * pdataCount;
  236. #else
  237. xdataSize = 0;
  238. pdataCount = 0;
  239. #endif
  240. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, L"PDATA count:%u\n", pdataCount);
  241. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, L"Size of XDATA:%u\n", xdataSize);
  242. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, L"Size of code:%u\n", codeSize);
  243. TryCopyAndAddRelocRecordsForSwitchJumpTableEntries(m_encodeBuffer, codeSize, jumpTableListForSwitchStatement, totalJmpTableSizeInBytes);
  244. workItem->RecordNativeCodeSize(m_func, (DWORD)codeSize, pdataCount, xdataSize);
  245. this->m_bailoutRecordMap->MapAddress([=](int index, LazyBailOutRecord* record)
  246. {
  247. this->m_encoderMD.AddLabelReloc((BYTE*)&record->instructionPointer);
  248. });
  249. // Relocs
  250. m_encoderMD.ApplyRelocs((size_t) workItem->GetCodeAddress());
  251. workItem->RecordNativeCode(m_func, m_encodeBuffer);
  252. m_func->GetScriptContext()->GetThreadContext()->SetValidCallTargetForCFG((PVOID) workItem->GetCodeAddress());
  253. #ifdef _M_X64
  254. m_func->m_prologEncoder.FinalizeUnwindInfo();
  255. workItem->RecordUnwindInfo(0, m_func->m_prologEncoder.GetUnwindInfo(), m_func->m_prologEncoder.SizeOfUnwindInfo());
  256. #elif _M_ARM
  257. m_func->m_unwindInfo.EmitUnwindInfo(workItem);
  258. workItem->SetCodeAddress(workItem->GetCodeAddress() | 0x1); // Set thumb mode
  259. #endif
  260. Js::EntryPointInfo* entryPointInfo = this->m_func->m_workItem->GetEntryPoint();
  261. const bool isSimpleJit = m_func->IsSimpleJit();
  262. Assert(
  263. isSimpleJit ||
  264. entryPointInfo->GetJitTransferData() != nullptr && !entryPointInfo->GetJitTransferData()->GetIsReady());
  265. if (this->m_inlineeFrameMap->Count() > 0 &&
  266. !(this->m_inlineeFrameMap->Count() == 1 && this->m_inlineeFrameMap->Item(0).record == nullptr))
  267. {
  268. entryPointInfo->RecordInlineeFrameMap(m_inlineeFrameMap);
  269. }
  270. if (this->m_bailoutRecordMap->Count() > 0)
  271. {
  272. entryPointInfo->RecordBailOutMap(m_bailoutRecordMap);
  273. }
  274. if (this->m_func->pinnedTypeRefs != nullptr)
  275. {
  276. Assert(!isSimpleJit);
  277. Func::TypeRefSet* pinnedTypeRefs = this->m_func->pinnedTypeRefs;
  278. int pinnedTypeRefCount = pinnedTypeRefs->Count();
  279. void** compactPinnedTypeRefs = HeapNewArrayZ(void*, pinnedTypeRefCount);
  280. int index = 0;
  281. pinnedTypeRefs->Map([compactPinnedTypeRefs, &index](void* typeRef) -> void
  282. {
  283. compactPinnedTypeRefs[index++] = typeRef;
  284. });
  285. if (PHASE_TRACE(Js::TracePinnedTypesPhase, this->m_func))
  286. {
  287. wchar_t debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
  288. Output::Print(L"PinnedTypes: function %s(%s) pinned %d types.\n",
  289. this->m_func->GetJnFunction()->GetDisplayName(), this->m_func->GetJnFunction()->GetDebugNumberSet(debugStringBuffer), pinnedTypeRefCount);
  290. Output::Flush();
  291. }
  292. entryPointInfo->GetJitTransferData()->SetRuntimeTypeRefs(compactPinnedTypeRefs, pinnedTypeRefCount);
  293. }
  294. // Save all equivalent type guards in a fixed size array on the JIT transfer data
  295. if (this->m_func->equivalentTypeGuards != nullptr)
  296. {
  297. AssertMsg(!PHASE_OFF(Js::EquivObjTypeSpecPhase, this->m_func), "Why do we have equivalent type guards if we don't do equivalent object type spec?");
  298. int count = this->m_func->equivalentTypeGuards->Count();
  299. Js::JitEquivalentTypeGuard** guards = HeapNewArrayZ(Js::JitEquivalentTypeGuard*, count);
  300. Js::JitEquivalentTypeGuard** dstGuard = guards;
  301. this->m_func->equivalentTypeGuards->Map([&dstGuard](Js::JitEquivalentTypeGuard* srcGuard) -> void
  302. {
  303. *dstGuard++ = srcGuard;
  304. });
  305. entryPointInfo->GetJitTransferData()->SetEquivalentTypeGuards(guards, count);
  306. }
  307. if (this->m_func->lazyBailoutProperties.Count() > 0)
  308. {
  309. int count = this->m_func->lazyBailoutProperties.Count();
  310. Js::PropertyId* lazyBailoutProperties = HeapNewArrayZ(Js::PropertyId, count);
  311. Js::PropertyId* dstProperties = lazyBailoutProperties;
  312. this->m_func->lazyBailoutProperties.Map([&](Js::PropertyId propertyId)
  313. {
  314. *dstProperties++ = propertyId;
  315. });
  316. entryPointInfo->GetJitTransferData()->SetLazyBailoutProperties(lazyBailoutProperties, count);
  317. }
  318. // Save all property guards on the JIT transfer data in a map keyed by property ID. We will use this map when installing the entry
  319. // point to register each guard for invalidation.
  320. if (this->m_func->propertyGuardsByPropertyId != nullptr)
  321. {
  322. Assert(!isSimpleJit);
  323. AssertMsg(!(PHASE_OFF(Js::ObjTypeSpecPhase, this->m_func) && PHASE_OFF(Js::FixedMethodsPhase, this->m_func)),
  324. "Why do we have type guards if we don't do object type spec or fixed methods?");
  325. int propertyCount = this->m_func->propertyGuardsByPropertyId->Count();
  326. Assert(propertyCount > 0);
  327. #if DBG
  328. int totalGuardCount = (this->m_func->singleTypeGuards != nullptr ? this->m_func->singleTypeGuards->Count() : 0)
  329. + (this->m_func->equivalentTypeGuards != nullptr ? this->m_func->equivalentTypeGuards->Count() : 0);
  330. Assert(totalGuardCount > 0);
  331. Assert(totalGuardCount == this->m_func->indexedPropertyGuardCount);
  332. #endif
  333. int guardSlotCount = 0;
  334. this->m_func->propertyGuardsByPropertyId->Map([&guardSlotCount](Js::PropertyId propertyId, Func::IndexedPropertyGuardSet* set) -> void
  335. {
  336. guardSlotCount += set->Count();
  337. });
  338. size_t typeGuardTransferSize = // Reserve enough room for:
  339. propertyCount * sizeof(Js::TypeGuardTransferEntry) + // each propertyId,
  340. propertyCount * sizeof(Js::JitIndexedPropertyGuard*) + // terminating nullptr guard for each propertyId,
  341. guardSlotCount * sizeof(Js::JitIndexedPropertyGuard*); // a pointer for each guard we counted above.
  342. // The extra room for sizeof(Js::TypePropertyGuardEntry) allocated by HeapNewPlus will be used for the terminating invalid propertyId.
  343. // Review (jedmiad): Skip zeroing? This is heap allocated so there shouldn't be any false recycler references.
  344. Js::TypeGuardTransferEntry* typeGuardTransferRecord = HeapNewPlusZ(typeGuardTransferSize, Js::TypeGuardTransferEntry);
  345. Func* func = this->m_func;
  346. Js::TypeGuardTransferEntry* dstEntry = typeGuardTransferRecord;
  347. this->m_func->propertyGuardsByPropertyId->Map([func, &dstEntry](Js::PropertyId propertyId, Func::IndexedPropertyGuardSet* srcSet) -> void
  348. {
  349. dstEntry->propertyId = propertyId;
  350. int guardIndex = 0;
  351. srcSet->Map([dstEntry, &guardIndex](Js::JitIndexedPropertyGuard* guard) -> void
  352. {
  353. dstEntry->guards[guardIndex++] = guard;
  354. });
  355. dstEntry->guards[guardIndex++] = nullptr;
  356. dstEntry = reinterpret_cast<Js::TypeGuardTransferEntry*>(&dstEntry->guards[guardIndex]);
  357. });
  358. dstEntry->propertyId = Js::Constants::NoProperty;
  359. dstEntry++;
  360. Assert(reinterpret_cast<char*>(dstEntry) <= reinterpret_cast<char*>(typeGuardTransferRecord) + typeGuardTransferSize + sizeof(Js::TypeGuardTransferEntry));
  361. entryPointInfo->RecordTypeGuards(this->m_func->indexedPropertyGuardCount, typeGuardTransferRecord, typeGuardTransferSize);
  362. }
  363. // Save all constructor caches on the JIT transfer data in a map keyed by property ID. We will use this map when installing the entry
  364. // point to register each cache for invalidation.
  365. if (this->m_func->ctorCachesByPropertyId != nullptr)
  366. {
  367. Assert(!isSimpleJit);
  368. AssertMsg(!(PHASE_OFF(Js::ObjTypeSpecPhase, this->m_func) && PHASE_OFF(Js::FixedMethodsPhase, this->m_func)),
  369. "Why do we have constructor cache guards if we don't do object type spec or fixed methods?");
  370. int propertyCount = this->m_func->ctorCachesByPropertyId->Count();
  371. Assert(propertyCount > 0);
  372. #if DBG
  373. int cacheCount = entryPointInfo->GetConstructorCacheCount();
  374. Assert(cacheCount > 0);
  375. #endif
  376. int cacheSlotCount = 0;
  377. this->m_func->ctorCachesByPropertyId->Map([&cacheSlotCount](Js::PropertyId propertyId, Func::CtorCacheSet* cacheSet) -> void
  378. {
  379. cacheSlotCount += cacheSet->Count();
  380. });
  381. size_t ctorCachesTransferSize = // Reserve enough room for:
  382. propertyCount * sizeof(Js::CtorCacheGuardTransferEntry) + // each propertyId,
  383. propertyCount * sizeof(Js::ConstructorCache*) + // terminating null cache for each propertyId,
  384. cacheSlotCount * sizeof(Js::JitIndexedPropertyGuard*); // a pointer for each cache we counted above.
  385. // The extra room for sizeof(Js::CtorCacheGuardTransferEntry) allocated by HeapNewPlus will be used for the terminating invalid propertyId.
  386. // Review (jedmiad): Skip zeroing? This is heap allocated so there shouldn't be any false recycler references.
  387. Js::CtorCacheGuardTransferEntry* ctorCachesTransferRecord = HeapNewPlusZ(ctorCachesTransferSize, Js::CtorCacheGuardTransferEntry);
  388. Func* func = this->m_func;
  389. Js::CtorCacheGuardTransferEntry* dstEntry = ctorCachesTransferRecord;
  390. this->m_func->ctorCachesByPropertyId->Map([func, &dstEntry](Js::PropertyId propertyId, Func::CtorCacheSet* srcCacheSet) -> void
  391. {
  392. dstEntry->propertyId = propertyId;
  393. int cacheIndex = 0;
  394. srcCacheSet->Map([dstEntry, &cacheIndex](Js::ConstructorCache* cache) -> void
  395. {
  396. dstEntry->caches[cacheIndex++] = cache;
  397. });
  398. dstEntry->caches[cacheIndex++] = nullptr;
  399. dstEntry = reinterpret_cast<Js::CtorCacheGuardTransferEntry*>(&dstEntry->caches[cacheIndex]);
  400. });
  401. dstEntry->propertyId = Js::Constants::NoProperty;
  402. dstEntry++;
  403. Assert(reinterpret_cast<char*>(dstEntry) <= reinterpret_cast<char*>(ctorCachesTransferRecord) + ctorCachesTransferSize + sizeof(Js::CtorCacheGuardTransferEntry));
  404. entryPointInfo->RecordCtorCacheGuards(ctorCachesTransferRecord, ctorCachesTransferSize);
  405. }
  406. if(!isSimpleJit)
  407. {
  408. entryPointInfo->GetJitTransferData()->SetIsReady();
  409. }
  410. workItem->FinalizeNativeCode(m_func);
  411. END_CODEGEN_PHASE(m_func, Js::EmitterPhase);
  412. #if DBG_DUMP
  413. m_func->m_codeSize = codeSize;
  414. if (PHASE_DUMP(Js::EncoderPhase, m_func) || PHASE_DUMP(Js::BackEndPhase, m_func))
  415. {
  416. bool dumpIRAddressesValue = Js::Configuration::Global.flags.DumpIRAddresses;
  417. Js::Configuration::Global.flags.DumpIRAddresses = true;
  418. this->m_func->DumpHeader();
  419. m_instrNumber = 0;
  420. FOREACH_INSTR_IN_FUNC(instr, m_func)
  421. {
  422. __analysis_assume(m_instrNumber < instrCount);
  423. instr->DumpGlobOptInstrString();
  424. #ifdef _WIN64
  425. Output::Print(L"%12IX ", m_offsetBuffer[m_instrNumber++] + (BYTE *)workItem->GetCodeAddress());
  426. #else
  427. Output::Print(L"%8IX ", m_offsetBuffer[m_instrNumber++] + (BYTE *)workItem->GetCodeAddress());
  428. #endif
  429. instr->Dump();
  430. } NEXT_INSTR_IN_FUNC;
  431. Output::Flush();
  432. Js::Configuration::Global.flags.DumpIRAddresses = dumpIRAddressesValue;
  433. }
  434. if (PHASE_DUMP(Js::EncoderPhase, m_func) && Js::Configuration::Global.flags.Verbose)
  435. {
  436. workItem->DumpNativeOffsetMaps();
  437. workItem->DumpNativeThrowSpanSequence();
  438. this->DumpInlineeFrameMap(workItem->GetCodeAddress());
  439. Output::Flush();
  440. }
  441. #endif
  442. }
  443. bool Encoder::DoTrackAllStatementBoundary() const
  444. {
  445. #if DBG_DUMP | defined(VTUNE_PROFILING)
  446. return this->m_func->DoRecordNativeMap();
  447. #else
  448. return false;
  449. #endif
  450. }
  451. void Encoder::TryCopyAndAddRelocRecordsForSwitchJumpTableEntries(BYTE *codeStart, size_t codeSize, JmpTableList * jumpTableListForSwitchStatement, size_t totalJmpTableSizeInBytes)
  452. {
  453. if (jumpTableListForSwitchStatement == nullptr)
  454. {
  455. return;
  456. }
  457. BYTE * jmpTableStartAddress = codeStart + codeSize - totalJmpTableSizeInBytes;
  458. JitArenaAllocator * allocator = this->m_func->m_alloc;
  459. EncoderMD * encoderMD = &m_encoderMD;
  460. jumpTableListForSwitchStatement->Map([&](uint index, BranchJumpTableWrapper * branchJumpTableWrapper) -> void
  461. {
  462. Assert(branchJumpTableWrapper != nullptr);
  463. void ** srcJmpTable = branchJumpTableWrapper->jmpTable;
  464. size_t jmpTableSizeInBytes = branchJumpTableWrapper->tableSize * sizeof(void*);
  465. AssertMsg(branchJumpTableWrapper->labelInstr != nullptr, "Label not yet created?");
  466. Assert(branchJumpTableWrapper->labelInstr->GetPC() == nullptr);
  467. branchJumpTableWrapper->labelInstr->SetPC(jmpTableStartAddress);
  468. memcpy(jmpTableStartAddress, srcJmpTable, jmpTableSizeInBytes);
  469. for (int i = 0; i < branchJumpTableWrapper->tableSize; i++)
  470. {
  471. void * addressOfJmpTableEntry = jmpTableStartAddress + (i * sizeof(void*));
  472. Assert((ptrdiff_t) addressOfJmpTableEntry - (ptrdiff_t) jmpTableStartAddress < (ptrdiff_t) jmpTableSizeInBytes);
  473. #if defined(_M_ARM32_OR_ARM64)
  474. encoderMD->AddLabelReloc((byte*) addressOfJmpTableEntry);
  475. #else
  476. encoderMD->AppendRelocEntry(RelocTypeLabelUse, addressOfJmpTableEntry);
  477. #endif
  478. }
  479. jmpTableStartAddress += (jmpTableSizeInBytes);
  480. BranchJumpTableWrapper::Delete(allocator, branchJumpTableWrapper);
  481. });
  482. Assert(jmpTableStartAddress == codeStart + codeSize);
  483. }
  484. uint32 Encoder::GetCurrentOffset() const
  485. {
  486. Assert(m_pc - m_encodeBuffer <= UINT_MAX); // encode buffer size is uint32
  487. return static_cast<uint32>(m_pc - m_encodeBuffer);
  488. }
  489. void Encoder::RecordInlineeFrame(Func* inlinee, uint32 currentOffset)
  490. {
  491. // The only restriction for not supporting loop bodies is that inlinee frame map is created on FunctionEntryPointInfo & not
  492. // the base class EntryPointInfo.
  493. if (!this->m_func->IsLoopBody() && !this->m_func->IsSimpleJit())
  494. {
  495. InlineeFrameRecord* record = nullptr;
  496. if (inlinee->frameInfo && inlinee->m_hasInlineArgsOpt)
  497. {
  498. record = inlinee->frameInfo->record;
  499. Assert(record != nullptr);
  500. }
  501. if (m_inlineeFrameMap->Count() > 0)
  502. {
  503. // update existing record if the entry is the same.
  504. NativeOffsetInlineeFramePair& lastPair = m_inlineeFrameMap->Item(m_inlineeFrameMap->Count() - 1);
  505. if (lastPair.record == record)
  506. {
  507. lastPair.offset = currentOffset;
  508. return;
  509. }
  510. }
  511. NativeOffsetInlineeFramePair pair = { currentOffset, record };
  512. m_inlineeFrameMap->Add(pair);
  513. }
  514. }
  515. #if defined(_M_IX86) || defined(_M_X64)
  516. ///----------------------------------------------------------------------------
  517. ///
  518. /// EncoderMD::ShortenBranchesAndLabelAlign
  519. /// We try to shorten branches if the label instr is within 8-bits target range (-128 to 127)
  520. /// and fix the relocList accordingly.
  521. /// Also align LoopTop Label and TryCatchLabel
  522. ///----------------------------------------------------------------------------
  523. BOOL
  524. Encoder::ShortenBranchesAndLabelAlign(BYTE **codeStart, ptrdiff_t *codeSize)
  525. {
  526. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  527. static uint32 globalTotalBytesSaved = 0, globalTotalBytesWithoutShortening = 0;
  528. static uint32 globalTotalBytesInserted = 0; // loop alignment nops
  529. #endif
  530. uint32 brShortenedCount = 0;
  531. bool codeChange = false; // any overall BR shortened or label aligned ?
  532. BYTE* buffStart = *codeStart;
  533. BYTE* buffEnd = buffStart + *codeSize;
  534. ptrdiff_t newCodeSize = *codeSize;
  535. #if DBG
  536. // Sanity check
  537. m_encoderMD.VerifyRelocList(buffStart, buffEnd);
  538. #endif
  539. // Copy of original maps. Used to revert from BR shortening.
  540. OffsetList *m_origInlineeFrameRecords = nullptr,
  541. *m_origInlineeFrameMap = nullptr,
  542. *m_origPragmaInstrToRecordOffset = nullptr;
  543. OffsetList *m_origOffsetBuffer = nullptr;
  544. // we record the original maps, in case we have to revert.
  545. CopyMaps<false>(&m_origInlineeFrameRecords
  546. , &m_origInlineeFrameMap
  547. , &m_origPragmaInstrToRecordOffset
  548. , &m_origOffsetBuffer );
  549. RelocList* relocList = m_encoderMD.GetRelocList();
  550. Assert(relocList != nullptr);
  551. // Here we mark BRs to be shortened and adjust Labels and relocList entries offsets.
  552. uint32 offsetBuffIndex = 0, pragmaInstToRecordOffsetIndex = 0, inlineeFrameRecordsIndex = 0, inlineeFrameMapIndex = 0;
  553. int32 totalBytesSaved = 0;
  554. // loop over all BRs, find the ones we can convert to short form
  555. for (int32 j = 0; j < relocList->Count(); j++)
  556. {
  557. IR::LabelInstr *targetLabel;
  558. int32 relOffset;
  559. uint32 bytesSaved = 0;
  560. BYTE* labelPc, *opcodeByte;
  561. BYTE* shortBrPtr, *fixedBrPtr; // without shortening
  562. EncodeRelocAndLabels &reloc = relocList->Item(j);
  563. // If not a long branch, just fix the reloc entry and skip.
  564. if (!reloc.isLongBr())
  565. {
  566. // if loop alignment is required, total bytes saved can change
  567. int32 newTotalBytesSaved = m_encoderMD.FixRelocListEntry(j, totalBytesSaved, buffStart, buffEnd);
  568. if (newTotalBytesSaved != totalBytesSaved)
  569. {
  570. AssertMsg(reloc.isAlignedLabel(), "Expecting aligned label.");
  571. // we aligned a loop, fix maps
  572. m_encoderMD.FixMaps((uint32)(reloc.getLabelOrigPC() - buffStart), totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  573. codeChange = true;
  574. }
  575. totalBytesSaved = newTotalBytesSaved;
  576. continue;
  577. }
  578. AssertMsg(reloc.isLongBr(), "Cannot shorten already shortened branch.");
  579. // long branch
  580. opcodeByte = reloc.getBrOpCodeByte();
  581. targetLabel = reloc.getBrTargetLabel();
  582. AssertMsg(targetLabel != nullptr, "Branch to non-existing label");
  583. labelPc = targetLabel->GetPC();
  584. // compute the new offset of that Br because of previous shortening/alignment
  585. shortBrPtr = fixedBrPtr = (BYTE*)reloc.m_ptr - totalBytesSaved;
  586. if (*opcodeByte == 0xe9 /* JMP rel32 */)
  587. {
  588. bytesSaved = 3;
  589. }
  590. else if (*opcodeByte >= 0x80 && *opcodeByte < 0x90 /* Jcc rel32 */)
  591. {
  592. Assert(*(opcodeByte - 1) == 0x0f);
  593. bytesSaved = 4;
  594. // Jcc rel8 is one byte shorter in opcode, fix Br ptr to point to start of rel8
  595. shortBrPtr--;
  596. }
  597. else
  598. {
  599. Assert(false);
  600. }
  601. // compute current distance to label
  602. if (labelPc >= (BYTE*) reloc.m_ptr)
  603. {
  604. // forward Br. We compare using the unfixed m_ptr, because the label is ahead and its Pc is not fixed it.
  605. relOffset = (int32)(labelPc - ((BYTE*)reloc.m_ptr + 4));
  606. }
  607. else
  608. {
  609. // backward Br. We compute relOffset after fixing the Br, since the label is already fixed.
  610. // We also include the 3-4 bytes saved after shortening the Br since the Br itself is included in the relative offset.
  611. relOffset = (int32)(labelPc - (shortBrPtr + 1));
  612. }
  613. // update Br offset (overwritten later if Br is shortened)
  614. reloc.m_ptr = fixedBrPtr;
  615. // can we shorten ?
  616. if (relOffset >= -128 && relOffset <= 127)
  617. {
  618. uint32 brOffset;
  619. brShortenedCount++;
  620. // update with shortened br offset
  621. reloc.m_ptr = shortBrPtr;
  622. // fix all maps entries from last shortened br to this one, before updating total bytes saved.
  623. brOffset = (uint32) ((BYTE*)reloc.m_origPtr - buffStart);
  624. m_encoderMD.FixMaps(brOffset, totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  625. codeChange = true;
  626. totalBytesSaved += bytesSaved;
  627. // mark br reloc entry as shortened
  628. #ifdef _M_IX86
  629. reloc.setAsShortBr(targetLabel);
  630. #else
  631. reloc.setAsShortBr();
  632. #endif
  633. }
  634. }
  635. // Fix the rest of the maps, if needed.
  636. if (totalBytesSaved != 0)
  637. {
  638. m_encoderMD.FixMaps((uint32) -1, totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  639. codeChange = true;
  640. newCodeSize -= totalBytesSaved;
  641. }
  642. // no BR shortening or Label alignment happened, no need to copy code
  643. if (!codeChange)
  644. return codeChange;
  645. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  646. globalTotalBytesWithoutShortening += (uint32)(*codeSize);
  647. globalTotalBytesSaved += (uint32)(*codeSize - newCodeSize);
  648. if (PHASE_TRACE(Js::BrShortenPhase, this->m_func))
  649. {
  650. OUTPUT_VERBOSE_TRACE(Js::BrShortenPhase, L"func: %s, bytes saved: %d, bytes saved %%:%.2f, total bytes saved: %d, total bytes saved%%: %.2f, BR shortened: %d\n",
  651. this->m_func->GetJnFunction()->GetDisplayName(), (*codeSize - newCodeSize), ((float)*codeSize - newCodeSize) / *codeSize * 100,
  652. globalTotalBytesSaved, ((float)globalTotalBytesSaved) / globalTotalBytesWithoutShortening * 100 , brShortenedCount);
  653. Output::Flush();
  654. }
  655. #endif
  656. // At this point BRs are marked to be shortened, and relocList offsets are adjusted to new instruction length.
  657. // Next, we re-write the code to shorten the BRs and adjust relocList offsets to point to new buffer.
  658. // We also write NOPs for aligned loops.
  659. BYTE* tmpBuffer = AnewArray(m_tempAlloc, BYTE, newCodeSize);
  660. // start copying to new buffer
  661. // this can possibly be done during fixing, but there is no evidence it is an overhead to justify the complexity.
  662. BYTE *from = buffStart, *to = nullptr;
  663. BYTE *dst_p = (BYTE*)tmpBuffer;
  664. size_t dst_size = newCodeSize;
  665. size_t src_size;
  666. for (int32 i = 0; i < relocList->Count(); i++)
  667. {
  668. EncodeRelocAndLabels &reloc = relocList->Item(i);
  669. // shorten BR and copy
  670. if (reloc.isShortBr())
  671. {
  672. // validate that short BR offset is within 1 byte offset range.
  673. // This handles the rare case with loop alignment breaks br shortening.
  674. // Consider:
  675. // BR $L1 // shortened
  676. // ...
  677. // L2: // aligned, and makes the BR $L1 non-shortable anymore
  678. // ...
  679. // BR $L2
  680. // ...
  681. // L1:
  682. // In this case, we simply give up and revert the relocList.
  683. if(!reloc.validateShortBrTarget())
  684. {
  685. revertRelocList();
  686. // restore maps
  687. CopyMaps<true>(&m_origInlineeFrameRecords
  688. , &m_origInlineeFrameMap
  689. , &m_origPragmaInstrToRecordOffset
  690. , &m_origOffsetBuffer
  691. );
  692. return false;
  693. }
  694. // m_origPtr points to imm32 field in the original buffer
  695. BYTE *opcodeByte = (BYTE*)reloc.m_origPtr - 1;
  696. if (*opcodeByte == 0xe9 /* JMP rel32 */)
  697. {
  698. to = opcodeByte - 1;
  699. }
  700. else if (*opcodeByte >= 0x80 && *opcodeByte < 0x90 /* Jcc rel32 */)
  701. {
  702. Assert(*(opcodeByte - 1) == 0x0f);
  703. to = opcodeByte - 2;
  704. }
  705. else
  706. {
  707. Assert(false);
  708. }
  709. src_size = to - from + 1;
  710. Assert(dst_size >= src_size);
  711. memcpy_s(dst_p, dst_size, from, src_size);
  712. dst_p += src_size;
  713. dst_size -= src_size;
  714. // fix the BR
  715. // write new opcode
  716. *dst_p = (*opcodeByte == 0xe9) ? (BYTE)0xeb : (BYTE)(*opcodeByte - 0x10);
  717. dst_p += 2; // 1 byte for opcode + 1 byte for imm8
  718. dst_size -= 2;
  719. from = (BYTE*)reloc.m_origPtr + 4;
  720. }
  721. // insert NOPs for aligned labels
  722. else if ((!PHASE_OFF(Js::LoopAlignPhase, m_func) && reloc.isAlignedLabel()) && reloc.getLabelNopCount() > 0)
  723. {
  724. IR::LabelInstr *label = reloc.getLabel();
  725. BYTE nop_count = reloc.getLabelNopCount();
  726. AssertMsg((BYTE*)label < buffStart || (BYTE*)label >= buffEnd, "Invalid label pointer.");
  727. AssertMsg((((uint32)(label->GetPC() - buffStart)) & 0xf) == 0, "Misaligned Label");
  728. to = reloc.getLabelOrigPC() - 1;
  729. CopyPartialBuffer(&dst_p, dst_size, from, to);
  730. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  731. if (PHASE_TRACE(Js::LoopAlignPhase, this->m_func))
  732. {
  733. globalTotalBytesInserted += nop_count;
  734. OUTPUT_VERBOSE_TRACE(Js::LoopAlignPhase, L"func: %s, bytes inserted: %d, bytes inserted %%:%.4f, total bytes inserted:%d, total bytes inserted %%:%.4f\n",
  735. this->m_func->GetJnFunction()->GetDisplayName(), nop_count, (float)nop_count / newCodeSize * 100, globalTotalBytesInserted, (float)globalTotalBytesInserted / (globalTotalBytesWithoutShortening - globalTotalBytesSaved) * 100);
  736. Output::Flush();
  737. }
  738. #endif
  739. InsertNopsForLabelAlignment(nop_count, &dst_p);
  740. dst_size -= nop_count;
  741. from = to + 1;
  742. }
  743. }
  744. // copy last chunk
  745. CopyPartialBuffer(&dst_p, dst_size, from, buffStart + *codeSize - 1);
  746. m_encoderMD.UpdateRelocListWithNewBuffer(relocList, tmpBuffer, buffStart, buffEnd);
  747. // switch buffers
  748. *codeStart = tmpBuffer;
  749. *codeSize = newCodeSize;
  750. return true;
  751. }
  752. BYTE Encoder::FindNopCountFor16byteAlignment(size_t address)
  753. {
  754. return (16 - (BYTE) (address & 0xf)) % 16;
  755. }
  756. void Encoder::CopyPartialBuffer(BYTE ** ptrDstBuffer, size_t &dstSize, BYTE * srcStart, BYTE * srcEnd)
  757. {
  758. BYTE * destBuffer = *ptrDstBuffer;
  759. size_t srcSize = srcEnd - srcStart + 1;
  760. Assert(dstSize >= srcSize);
  761. memcpy_s(destBuffer, dstSize, srcStart, srcSize);
  762. *ptrDstBuffer += srcSize;
  763. dstSize -= srcSize;
  764. }
  765. void Encoder::InsertNopsForLabelAlignment(int nopCount, BYTE ** ptrDstBuffer)
  766. {
  767. // write NOPs
  768. for (int32 i = 0; i < nopCount; i++, (*ptrDstBuffer)++)
  769. {
  770. **ptrDstBuffer = 0x90;
  771. }
  772. }
  773. void Encoder::revertRelocList()
  774. {
  775. RelocList* relocList = m_encoderMD.GetRelocList();
  776. for (int32 i = 0; i < relocList->Count(); i++)
  777. {
  778. relocList->Item(i).revert();
  779. }
  780. }
  781. template <bool restore>
  782. void Encoder::CopyMaps(OffsetList **m_origInlineeFrameRecords
  783. , OffsetList **m_origInlineeFrameMap
  784. , OffsetList **m_origPragmaInstrToRecordOffset
  785. , OffsetList **m_origOffsetBuffer
  786. )
  787. {
  788. InlineeFrameRecords *recList = m_inlineeFrameRecords;
  789. InlineeFrameMap *mapList = m_inlineeFrameMap;
  790. PragmaInstrList *pInstrList = m_pragmaInstrToRecordOffset;
  791. OffsetList *origRecList, *origMapList, *origPInstrList;
  792. if (!restore)
  793. {
  794. Assert(*m_origInlineeFrameRecords == nullptr);
  795. Assert(*m_origInlineeFrameMap == nullptr);
  796. Assert(*m_origPragmaInstrToRecordOffset == nullptr);
  797. *m_origInlineeFrameRecords = origRecList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  798. *m_origInlineeFrameMap = origMapList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  799. *m_origPragmaInstrToRecordOffset = origPInstrList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  800. #if DBG_DUMP
  801. Assert((*m_origOffsetBuffer) == nullptr);
  802. *m_origOffsetBuffer = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  803. #endif
  804. }
  805. else
  806. {
  807. Assert((*m_origInlineeFrameRecords) && (*m_origInlineeFrameMap) && (*m_origPragmaInstrToRecordOffset));
  808. origRecList = *m_origInlineeFrameRecords;
  809. origMapList = *m_origInlineeFrameMap;
  810. origPInstrList = *m_origPragmaInstrToRecordOffset;
  811. Assert(origRecList->Count() == recList->Count());
  812. Assert(origMapList->Count() == mapList->Count());
  813. Assert(origPInstrList->Count() == pInstrList->Count());
  814. #if DBG_DUMP
  815. Assert(m_origOffsetBuffer)
  816. Assert((uint32)(*m_origOffsetBuffer)->Count() == m_instrNumber);
  817. #endif
  818. }
  819. for (int i = 0; i < recList->Count(); i++)
  820. {
  821. if (!restore)
  822. {
  823. origRecList->Add(recList->Item(i)->inlineeStartOffset);
  824. }
  825. else
  826. {
  827. recList->Item(i)->inlineeStartOffset = origRecList->Item(i);
  828. }
  829. }
  830. for (int i = 0; i < mapList->Count(); i++)
  831. {
  832. if (!restore)
  833. {
  834. origMapList->Add(mapList->Item(i).offset);
  835. }
  836. else
  837. {
  838. mapList->Item(i).offset = origMapList->Item(i);
  839. }
  840. }
  841. for (int i = 0; i < pInstrList->Count(); i++)
  842. {
  843. if (!restore)
  844. {
  845. origPInstrList->Add(pInstrList->Item(i)->m_offsetInBuffer);
  846. }
  847. else
  848. {
  849. pInstrList->Item(i)->m_offsetInBuffer = origPInstrList->Item(i);
  850. }
  851. }
  852. if (restore)
  853. {
  854. (*m_origInlineeFrameRecords)->Delete();
  855. (*m_origInlineeFrameMap)->Delete();
  856. (*m_origPragmaInstrToRecordOffset)->Delete();
  857. (*m_origInlineeFrameRecords) = nullptr;
  858. (*m_origInlineeFrameMap) = nullptr;
  859. (*m_origPragmaInstrToRecordOffset) = nullptr;
  860. }
  861. #if DBG_DUMP
  862. for (uint i = 0; i < m_instrNumber; i++)
  863. {
  864. if (!restore)
  865. {
  866. (*m_origOffsetBuffer)->Add(m_offsetBuffer[i]);
  867. }
  868. else
  869. {
  870. m_offsetBuffer[i] = (*m_origOffsetBuffer)->Item(i);
  871. }
  872. }
  873. if (restore)
  874. {
  875. (*m_origOffsetBuffer)->Delete();
  876. (*m_origOffsetBuffer) = nullptr;
  877. }
  878. #endif
  879. }
  880. #endif
  881. void Encoder::RecordBailout(IR::Instr* instr, uint32 currentOffset)
  882. {
  883. BailOutInfo* bailoutInfo = instr->GetBailOutInfo();
  884. if (bailoutInfo->bailOutRecord == nullptr)
  885. {
  886. return;
  887. }
  888. #if DBG_DUMP
  889. if (PHASE_DUMP(Js::LazyBailoutPhase, m_func))
  890. {
  891. Output::Print(L"Offset: %u Instr: ", currentOffset);
  892. instr->Dump();
  893. Output::Print(L"Bailout label: ");
  894. bailoutInfo->bailOutInstr->Dump();
  895. }
  896. #endif
  897. Assert(bailoutInfo->bailOutInstr->IsLabelInstr());
  898. LazyBailOutRecord record(currentOffset, (BYTE*)bailoutInfo->bailOutInstr, bailoutInfo->bailOutRecord);
  899. m_bailoutRecordMap->Add(record);
  900. }
  901. #if DBG_DUMP
  902. void Encoder::DumpInlineeFrameMap(size_t baseAddress)
  903. {
  904. Output::Print(L"Inlinee frame info mapping\n");
  905. Output::Print(L"---------------------------------------\n");
  906. m_inlineeFrameMap->Map([=](uint index, NativeOffsetInlineeFramePair& pair) {
  907. Output::Print(L"%Ix", baseAddress + pair.offset);
  908. Output::SkipToColumn(20);
  909. if (pair.record)
  910. {
  911. pair.record->Dump();
  912. }
  913. else
  914. {
  915. Output::Print(L"<NULL>");
  916. }
  917. Output::Print(L"\n");
  918. });
  919. }
  920. #endif