Encoder.cpp 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "Backend.h"
  6. #include "CRC.h"
  7. ///----------------------------------------------------------------------------
  8. ///
  9. /// Encoder::Encode
  10. ///
  11. /// Main entrypoint of encoder. Encode each IR instruction into the
  12. /// appropriate machine encoding.
  13. ///
  14. ///----------------------------------------------------------------------------
  15. void
  16. Encoder::Encode()
  17. {
  18. NoRecoverMemoryArenaAllocator localAlloc(_u("BE-Encoder"), m_func->m_alloc->GetPageAllocator(), Js::Throw::OutOfMemory);
  19. m_tempAlloc = &localAlloc;
  20. uint32 instrCount = m_func->GetInstrCount();
  21. size_t totalJmpTableSizeInBytes = 0;
  22. JmpTableList * jumpTableListForSwitchStatement = nullptr;
  23. m_encoderMD.Init(this);
  24. m_encodeBufferSize = UInt32Math::Mul(instrCount, MachMaxInstrSize);
  25. m_encodeBufferSize += m_func->m_totalJumpTableSizeInBytesForSwitchStatements;
  26. m_encodeBuffer = AnewArray(m_tempAlloc, BYTE, m_encodeBufferSize);
  27. #if DBG_DUMP
  28. m_instrNumber = 0;
  29. m_offsetBuffer = AnewArray(m_tempAlloc, uint, instrCount);
  30. #endif
  31. m_pragmaInstrToRecordMap = Anew(m_tempAlloc, PragmaInstrList, m_tempAlloc);
  32. if (DoTrackAllStatementBoundary())
  33. {
  34. // Create a new list, if we are tracking all statement boundaries.
  35. m_pragmaInstrToRecordOffset = Anew(m_tempAlloc, PragmaInstrList, m_tempAlloc);
  36. }
  37. else
  38. {
  39. // Set the list to the same as the throw map list, so that processing of the list
  40. // of pragma are done on those only.
  41. m_pragmaInstrToRecordOffset = m_pragmaInstrToRecordMap;
  42. }
  43. #if defined(_M_IX86) || defined(_M_X64)
  44. // for BR shortening
  45. m_inlineeFrameRecords = Anew(m_tempAlloc, InlineeFrameRecords, m_tempAlloc);
  46. #endif
  47. m_pc = m_encodeBuffer;
  48. m_inlineeFrameMap = Anew(m_tempAlloc, InlineeFrameMap, m_tempAlloc);
  49. m_bailoutRecordMap = Anew(m_tempAlloc, BailoutRecordMap, m_tempAlloc);
  50. IR::PragmaInstr* pragmaInstr = nullptr;
  51. uint32 pragmaOffsetInBuffer = 0;
  52. #ifdef _M_X64
  53. bool inProlog = false;
  54. #endif
  55. bool isCallInstr = false;
  56. // CRC Check to ensure the integrity of the encoded bytes.
  57. uint initialCRCSeed = 0;
  58. errno_t err = rand_s(&initialCRCSeed);
  59. if (err != 0)
  60. {
  61. Fatal();
  62. }
  63. uint bufferCRC = initialCRCSeed;
  64. FOREACH_INSTR_IN_FUNC(instr, m_func)
  65. {
  66. Assert(Lowerer::ValidOpcodeAfterLower(instr, m_func));
  67. if (GetCurrentOffset() + MachMaxInstrSize < m_encodeBufferSize)
  68. {
  69. ptrdiff_t count;
  70. #if DBG_DUMP
  71. AssertMsg(m_instrNumber < instrCount, "Bad instr count?");
  72. __analysis_assume(m_instrNumber < instrCount);
  73. m_offsetBuffer[m_instrNumber++] = GetCurrentOffset();
  74. #endif
  75. if (instr->IsPragmaInstr())
  76. {
  77. switch (instr->m_opcode)
  78. {
  79. #ifdef _M_X64
  80. case Js::OpCode::PrologStart:
  81. m_func->m_prologEncoder.Begin(m_pc - m_encodeBuffer);
  82. inProlog = true;
  83. continue;
  84. case Js::OpCode::PrologEnd:
  85. m_func->m_prologEncoder.End();
  86. inProlog = false;
  87. continue;
  88. #endif
  89. case Js::OpCode::StatementBoundary:
  90. pragmaOffsetInBuffer = GetCurrentOffset();
  91. pragmaInstr = instr->AsPragmaInstr();
  92. pragmaInstr->m_offsetInBuffer = pragmaOffsetInBuffer;
  93. // will record after BR shortening with adjusted offsets
  94. if (DoTrackAllStatementBoundary())
  95. {
  96. m_pragmaInstrToRecordOffset->Add(pragmaInstr);
  97. }
  98. break;
  99. default:
  100. continue;
  101. }
  102. }
  103. else if (instr->IsBranchInstr() && instr->AsBranchInstr()->IsMultiBranch())
  104. {
  105. Assert(instr->GetSrc1() && instr->GetSrc1()->IsRegOpnd());
  106. IR::MultiBranchInstr * multiBranchInstr = instr->AsBranchInstr()->AsMultiBrInstr();
  107. if (multiBranchInstr->m_isSwitchBr &&
  108. (multiBranchInstr->m_kind == IR::MultiBranchInstr::IntJumpTable || multiBranchInstr->m_kind == IR::MultiBranchInstr::SingleCharStrJumpTable))
  109. {
  110. BranchJumpTableWrapper * branchJumpTableWrapper = multiBranchInstr->GetBranchJumpTable();
  111. if (jumpTableListForSwitchStatement == nullptr)
  112. {
  113. jumpTableListForSwitchStatement = Anew(m_tempAlloc, JmpTableList, m_tempAlloc);
  114. }
  115. jumpTableListForSwitchStatement->Add(branchJumpTableWrapper);
  116. totalJmpTableSizeInBytes += (branchJumpTableWrapper->tableSize * sizeof(void*));
  117. }
  118. else
  119. {
  120. //Reloc Records
  121. EncoderMD * encoderMD = &(this->m_encoderMD);
  122. multiBranchInstr->MapMultiBrTargetByAddress([=](void ** offset) -> void
  123. {
  124. #if defined(_M_ARM32_OR_ARM64)
  125. encoderMD->AddLabelReloc((byte*)offset);
  126. #else
  127. encoderMD->AppendRelocEntry(RelocTypeLabelUse, (void*)(offset), *(IR::LabelInstr**)(offset));
  128. *((size_t*)offset) = 0;
  129. #endif
  130. });
  131. }
  132. }
  133. else
  134. {
  135. isCallInstr = LowererMD::IsCall(instr);
  136. if (pragmaInstr && (instr->isInlineeEntryInstr || isCallInstr))
  137. {
  138. // will record throw map after BR shortening with adjusted offsets
  139. m_pragmaInstrToRecordMap->Add(pragmaInstr);
  140. pragmaInstr = nullptr; // Only once per pragma instr -- do we need to make this record?
  141. }
  142. if (instr->HasBailOutInfo())
  143. {
  144. Assert(this->m_func->hasBailout);
  145. Assert(LowererMD::IsCall(instr));
  146. instr->GetBailOutInfo()->FinalizeBailOutRecord(this->m_func);
  147. }
  148. if (instr->isInlineeEntryInstr)
  149. {
  150. m_encoderMD.EncodeInlineeCallInfo(instr, GetCurrentOffset());
  151. }
  152. if (instr->m_opcode == Js::OpCode::InlineeStart)
  153. {
  154. Assert(!instr->isInlineeEntryInstr);
  155. if (pragmaInstr)
  156. {
  157. m_pragmaInstrToRecordMap->Add(pragmaInstr);
  158. pragmaInstr = nullptr;
  159. }
  160. Func* inlinee = instr->m_func;
  161. if (inlinee->frameInfo && inlinee->frameInfo->record)
  162. {
  163. inlinee->frameInfo->record->Finalize(inlinee, GetCurrentOffset());
  164. #if defined(_M_IX86) || defined(_M_X64)
  165. // Store all records to be adjusted for BR shortening
  166. m_inlineeFrameRecords->Add(inlinee->frameInfo->record);
  167. #endif
  168. }
  169. continue;
  170. }
  171. }
  172. count = m_encoderMD.Encode(instr, m_pc, m_encodeBuffer);
  173. #if defined(_M_IX86) || defined(_M_X64)
  174. bufferCRC = CalculateCRC(bufferCRC, count, m_pc);
  175. #endif
  176. #if DBG_DUMP
  177. if (PHASE_TRACE(Js::EncoderPhase, this->m_func))
  178. {
  179. instr->Dump((IRDumpFlags)(IRDumpFlags_SimpleForm | IRDumpFlags_SkipEndLine | IRDumpFlags_SkipByteCodeOffset));
  180. Output::SkipToColumn(80);
  181. for (BYTE * current = m_pc; current < m_pc + count; current++)
  182. {
  183. Output::Print(_u("%02X "), *current);
  184. }
  185. Output::Print(_u("\n"));
  186. Output::Flush();
  187. }
  188. #endif
  189. #ifdef _M_X64
  190. if (inProlog)
  191. m_func->m_prologEncoder.EncodeInstr(instr, count & 0xFF);
  192. #endif
  193. m_pc += count;
  194. #if defined(_M_IX86) || defined(_M_X64)
  195. // for BR shortening.
  196. if (instr->isInlineeEntryInstr)
  197. m_encoderMD.AppendRelocEntry(RelocType::RelocTypeInlineeEntryOffset, (void*)(m_pc - MachPtr));
  198. #endif
  199. if (isCallInstr)
  200. {
  201. isCallInstr = false;
  202. this->RecordInlineeFrame(instr->m_func, GetCurrentOffset());
  203. }
  204. if (instr->HasBailOutInfo() && Lowerer::DoLazyBailout(this->m_func))
  205. {
  206. this->RecordBailout(instr, (uint32)(m_pc - m_encodeBuffer));
  207. }
  208. }
  209. else
  210. {
  211. Fatal();
  212. }
  213. } NEXT_INSTR_IN_FUNC;
  214. ptrdiff_t codeSize = m_pc - m_encodeBuffer + totalJmpTableSizeInBytes;
  215. BOOL isSuccessBrShortAndLoopAlign = false;
  216. #if defined(_M_IX86) || defined(_M_X64)
  217. // Shorten branches. ON by default
  218. if (!PHASE_OFF(Js::BrShortenPhase, m_func))
  219. {
  220. uint brShortenedbufferCRC = initialCRCSeed;
  221. isSuccessBrShortAndLoopAlign = ShortenBranchesAndLabelAlign(&m_encodeBuffer, &codeSize, &brShortenedbufferCRC, bufferCRC, totalJmpTableSizeInBytes);
  222. if (isSuccessBrShortAndLoopAlign)
  223. {
  224. bufferCRC = brShortenedbufferCRC;
  225. }
  226. }
  227. #endif
  228. #if DBG_DUMP | defined(VTUNE_PROFILING)
  229. if (this->m_func->DoRecordNativeMap())
  230. {
  231. // Record PragmaInstr offsets and throw maps
  232. for (int32 i = 0; i < m_pragmaInstrToRecordOffset->Count(); i++)
  233. {
  234. IR::PragmaInstr *inst = m_pragmaInstrToRecordOffset->Item(i);
  235. inst->Record(inst->m_offsetInBuffer);
  236. }
  237. }
  238. #endif
  239. if (m_pragmaInstrToRecordMap->Count() > 0)
  240. {
  241. if (m_func->IsOOPJIT())
  242. {
  243. Js::ThrowMapEntry * throwMap = NativeCodeDataNewArrayNoFixup(m_func->GetNativeCodeDataAllocator(), Js::ThrowMapEntry, m_pragmaInstrToRecordMap->Count());
  244. for (int32 i = 0; i < m_pragmaInstrToRecordMap->Count(); i++)
  245. {
  246. IR::PragmaInstr *inst = m_pragmaInstrToRecordMap->Item(i);
  247. throwMap[i].nativeBufferOffset = inst->m_offsetInBuffer;
  248. throwMap[i].statementIndex = inst->m_statementIndex;
  249. }
  250. m_func->GetJITOutput()->RecordThrowMap(throwMap, m_pragmaInstrToRecordMap->Count());
  251. }
  252. else
  253. {
  254. auto entryPointInfo = m_func->GetInProcJITEntryPointInfo();
  255. auto functionBody = entryPointInfo->GetFunctionBody();
  256. Js::SmallSpanSequenceIter iter;
  257. for (int32 i = 0; i < m_pragmaInstrToRecordMap->Count(); i++)
  258. {
  259. IR::PragmaInstr *inst = m_pragmaInstrToRecordMap->Item(i);
  260. functionBody->RecordNativeThrowMap(iter, inst->m_offsetInBuffer, inst->m_statementIndex, entryPointInfo, Js::LoopHeader::NoLoop);
  261. }
  262. }
  263. }
  264. BEGIN_CODEGEN_PHASE(m_func, Js::EmitterPhase);
  265. // Copy to permanent buffer.
  266. Assert(Math::FitsInDWord(codeSize));
  267. ushort xdataSize;
  268. ushort pdataCount;
  269. #ifdef _M_X64
  270. pdataCount = 1;
  271. xdataSize = (ushort)m_func->m_prologEncoder.SizeOfUnwindInfo();
  272. #elif _M_ARM
  273. pdataCount = (ushort)m_func->m_unwindInfo.GetPDataCount(codeSize);
  274. xdataSize = (UnwindInfoManager::MaxXdataBytes + 3) * pdataCount;
  275. #else
  276. xdataSize = 0;
  277. pdataCount = 0;
  278. #endif
  279. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, _u("PDATA count:%u\n"), pdataCount);
  280. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, _u("Size of XDATA:%u\n"), xdataSize);
  281. OUTPUT_VERBOSE_TRACE(Js::EmitterPhase, _u("Size of code:%u\n"), codeSize);
  282. TryCopyAndAddRelocRecordsForSwitchJumpTableEntries(m_encodeBuffer, codeSize, jumpTableListForSwitchStatement, totalJmpTableSizeInBytes);
  283. EmitBufferAllocation * alloc = m_func->GetJITOutput()->RecordNativeCodeSize(m_func, (DWORD)codeSize, pdataCount, xdataSize);
  284. if (!alloc->inPrereservedRegion)
  285. {
  286. m_func->GetThreadContextInfo()->ResetIsAllJITCodeInPreReservedRegion();
  287. }
  288. this->m_bailoutRecordMap->MapAddress([=](int index, LazyBailOutRecord* record)
  289. {
  290. this->m_encoderMD.AddLabelReloc((BYTE*)&record->instructionPointer);
  291. });
  292. // Relocs
  293. m_encoderMD.ApplyRelocs((size_t)alloc->allocation->address, codeSize, &bufferCRC, isSuccessBrShortAndLoopAlign);
  294. m_func->GetJITOutput()->RecordNativeCode(m_func, m_encodeBuffer, alloc);
  295. #if defined(_M_IX86) || defined(_M_X64)
  296. if (!JITManager::GetJITManager()->IsJITServer())
  297. {
  298. ValidateCRCOnFinalBuffer((BYTE *)alloc->allocation->address, codeSize, totalJmpTableSizeInBytes, m_encodeBuffer, initialCRCSeed, bufferCRC, isSuccessBrShortAndLoopAlign);
  299. }
  300. #endif
  301. #ifdef _M_X64
  302. m_func->m_prologEncoder.FinalizeUnwindInfo(
  303. (BYTE*)m_func->GetJITOutput()->GetCodeAddress(), (DWORD)codeSize);
  304. m_func->GetJITOutput()->RecordUnwindInfo(
  305. 0,
  306. m_func->m_prologEncoder.GetUnwindInfo(),
  307. m_func->m_prologEncoder.SizeOfUnwindInfo(),
  308. alloc->allocation->xdata.address,
  309. m_func->GetThreadContextInfo()->GetProcessHandle());
  310. #elif _M_ARM
  311. m_func->m_unwindInfo.EmitUnwindInfo(m_func->GetJITOutput(), alloc);
  312. if (m_func->IsOOPJIT())
  313. {
  314. size_t allocSize = XDataAllocator::GetAllocSize(alloc->allocation->xdata.pdataCount, alloc->allocation->xdata.xdataSize);
  315. BYTE * xprocXdata = NativeCodeDataNewArrayNoFixup(m_func->GetNativeCodeDataAllocator(), BYTE, allocSize);
  316. memcpy_s(xprocXdata, allocSize, alloc->allocation->xdata.address, allocSize);
  317. m_func->GetJITOutput()->RecordXData(xprocXdata);
  318. }
  319. else
  320. {
  321. XDataAllocator::Register(&alloc->allocation->xdata, m_func->GetJITOutput()->GetCodeAddress(), m_func->GetJITOutput()->GetCodeSize());
  322. m_func->GetInProcJITEntryPointInfo()->SetXDataInfo(&alloc->allocation->xdata);
  323. }
  324. m_func->GetJITOutput()->SetCodeAddress(m_func->GetJITOutput()->GetCodeAddress() | 0x1); // Set thumb mode
  325. #endif
  326. if (CONFIG_FLAG(OOPCFGRegistration))
  327. {
  328. m_func->GetThreadContextInfo()->SetValidCallTargetForCFG((PVOID)m_func->GetJITOutput()->GetCodeAddress());
  329. }
  330. const bool isSimpleJit = m_func->IsSimpleJit();
  331. if (this->m_inlineeFrameMap->Count() > 0 &&
  332. !(this->m_inlineeFrameMap->Count() == 1 && this->m_inlineeFrameMap->Item(0).record == nullptr))
  333. {
  334. if (!m_func->IsOOPJIT()) // in-proc JIT
  335. {
  336. m_func->GetInProcJITEntryPointInfo()->RecordInlineeFrameMap(m_inlineeFrameMap);
  337. }
  338. else // OOP JIT
  339. {
  340. NativeOffsetInlineeFrameRecordOffset* pairs = NativeCodeDataNewArrayZNoFixup(m_func->GetNativeCodeDataAllocator(), NativeOffsetInlineeFrameRecordOffset, this->m_inlineeFrameMap->Count());
  341. this->m_inlineeFrameMap->Map([&pairs](int i, NativeOffsetInlineeFramePair& p)
  342. {
  343. pairs[i].offset = p.offset;
  344. if (p.record)
  345. {
  346. pairs[i].recordOffset = NativeCodeData::GetDataChunk(p.record)->offset;
  347. }
  348. else
  349. {
  350. pairs[i].recordOffset = NativeOffsetInlineeFrameRecordOffset::InvalidRecordOffset;
  351. }
  352. });
  353. m_func->GetJITOutput()->RecordInlineeFrameOffsetsInfo(NativeCodeData::GetDataChunk(pairs)->offset, this->m_inlineeFrameMap->Count());
  354. }
  355. }
  356. if (this->m_bailoutRecordMap->Count() > 0)
  357. {
  358. m_func->GetInProcJITEntryPointInfo()->RecordBailOutMap(m_bailoutRecordMap);
  359. }
  360. if (this->m_func->pinnedTypeRefs != nullptr)
  361. {
  362. Assert(!isSimpleJit);
  363. int pinnedTypeRefCount = this->m_func->pinnedTypeRefs->Count();
  364. PinnedTypeRefsIDL* pinnedTypeRefs = nullptr;
  365. if (this->m_func->IsOOPJIT())
  366. {
  367. pinnedTypeRefs = (PinnedTypeRefsIDL*)midl_user_allocate(offsetof(PinnedTypeRefsIDL, typeRefs) + sizeof(void*)*pinnedTypeRefCount);
  368. if (!pinnedTypeRefs)
  369. {
  370. Js::Throw::OutOfMemory();
  371. }
  372. __analysis_assume(pinnedTypeRefs);
  373. pinnedTypeRefs->count = pinnedTypeRefCount;
  374. pinnedTypeRefs->isOOPJIT = true;
  375. this->m_func->GetJITOutput()->GetOutputData()->pinnedTypeRefs = pinnedTypeRefs;
  376. }
  377. else
  378. {
  379. pinnedTypeRefs = HeapNewStructPlus(offsetof(PinnedTypeRefsIDL, typeRefs) + sizeof(void*)*pinnedTypeRefCount - sizeof(PinnedTypeRefsIDL), PinnedTypeRefsIDL);
  380. pinnedTypeRefs->count = pinnedTypeRefCount;
  381. pinnedTypeRefs->isOOPJIT = false;
  382. }
  383. int index = 0;
  384. this->m_func->pinnedTypeRefs->Map([&pinnedTypeRefs, &index](void* typeRef) -> void
  385. {
  386. pinnedTypeRefs->typeRefs[index++] = ((JITType*)typeRef)->GetAddr();
  387. });
  388. if (PHASE_TRACE(Js::TracePinnedTypesPhase, this->m_func))
  389. {
  390. char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
  391. Output::Print(_u("PinnedTypes: function %s(%s) pinned %d types.\n"),
  392. this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), pinnedTypeRefCount);
  393. Output::Flush();
  394. }
  395. if (!this->m_func->IsOOPJIT())
  396. {
  397. m_func->GetInProcJITEntryPointInfo()->GetJitTransferData()->SetRuntimeTypeRefs(pinnedTypeRefs);
  398. }
  399. }
  400. // Save all equivalent type guards in a fixed size array on the JIT transfer data
  401. if (this->m_func->equivalentTypeGuards != nullptr)
  402. {
  403. AssertMsg(!PHASE_OFF(Js::EquivObjTypeSpecPhase, this->m_func), "Why do we have equivalent type guards if we don't do equivalent object type spec?");
  404. int equivalentTypeGuardsCount = this->m_func->equivalentTypeGuards->Count();
  405. if (this->m_func->IsOOPJIT())
  406. {
  407. auto& equivalentTypeGuardOffsets = this->m_func->GetJITOutput()->GetOutputData()->equivalentTypeGuardOffsets;
  408. size_t allocSize = offsetof(EquivalentTypeGuardOffsets, guards) + equivalentTypeGuardsCount * sizeof(EquivalentTypeGuardIDL);
  409. equivalentTypeGuardOffsets = (EquivalentTypeGuardOffsets*)midl_user_allocate(allocSize);
  410. if (equivalentTypeGuardOffsets == nullptr)
  411. {
  412. Js::Throw::OutOfMemory();
  413. }
  414. equivalentTypeGuardOffsets->count = equivalentTypeGuardsCount;
  415. int i = 0;
  416. this->m_func->equivalentTypeGuards->Map([&equivalentTypeGuardOffsets, &i](Js::JitEquivalentTypeGuard* srcGuard) -> void
  417. {
  418. equivalentTypeGuardOffsets->guards[i].offset = NativeCodeData::GetDataTotalOffset(srcGuard);
  419. auto cache = srcGuard->GetCache();
  420. equivalentTypeGuardOffsets->guards[i].cache.guardOffset = NativeCodeData::GetDataTotalOffset(cache->guard);
  421. equivalentTypeGuardOffsets->guards[i].cache.hasFixedValue = cache->hasFixedValue;
  422. equivalentTypeGuardOffsets->guards[i].cache.isLoadedFromProto = cache->isLoadedFromProto;
  423. equivalentTypeGuardOffsets->guards[i].cache.nextEvictionVictim = cache->nextEvictionVictim;
  424. equivalentTypeGuardOffsets->guards[i].cache.record.propertyCount = cache->record.propertyCount;
  425. equivalentTypeGuardOffsets->guards[i].cache.record.propertyOffset = NativeCodeData::GetDataTotalOffset(cache->record.properties);
  426. for (int j = 0; j < EQUIVALENT_TYPE_CACHE_SIZE; j++)
  427. {
  428. equivalentTypeGuardOffsets->guards[i].cache.types[j] = (intptr_t)PointerValue(cache->types[j]);
  429. }
  430. i++;
  431. });
  432. Assert(equivalentTypeGuardsCount == i);
  433. }
  434. else
  435. {
  436. Js::JitEquivalentTypeGuard** guards = HeapNewArrayZ(Js::JitEquivalentTypeGuard*, equivalentTypeGuardsCount);
  437. Js::JitEquivalentTypeGuard** dstGuard = guards;
  438. this->m_func->equivalentTypeGuards->Map([&dstGuard](Js::JitEquivalentTypeGuard* srcGuard) -> void
  439. {
  440. *dstGuard++ = srcGuard;
  441. });
  442. m_func->GetInProcJITEntryPointInfo()->GetJitTransferData()->SetEquivalentTypeGuards(guards, equivalentTypeGuardsCount);
  443. }
  444. }
  445. if (this->m_func->lazyBailoutProperties.Count() > 0)
  446. {
  447. int count = this->m_func->lazyBailoutProperties.Count();
  448. Js::PropertyId* lazyBailoutProperties = HeapNewArrayZ(Js::PropertyId, count);
  449. Js::PropertyId* dstProperties = lazyBailoutProperties;
  450. this->m_func->lazyBailoutProperties.Map([&](Js::PropertyId propertyId)
  451. {
  452. *dstProperties++ = propertyId;
  453. });
  454. m_func->GetInProcJITEntryPointInfo()->GetJitTransferData()->SetLazyBailoutProperties(lazyBailoutProperties, count);
  455. }
  456. // Save all property guards on the JIT transfer data in a map keyed by property ID. We will use this map when installing the entry
  457. // point to register each guard for invalidation.
  458. if (this->m_func->propertyGuardsByPropertyId != nullptr)
  459. {
  460. Assert(!isSimpleJit);
  461. AssertMsg(!(PHASE_OFF(Js::ObjTypeSpecPhase, this->m_func) && PHASE_OFF(Js::FixedMethodsPhase, this->m_func)),
  462. "Why do we have type guards if we don't do object type spec or fixed methods?");
  463. #if DBG
  464. int totalGuardCount = (this->m_func->singleTypeGuards != nullptr ? this->m_func->singleTypeGuards->Count() : 0)
  465. + (this->m_func->equivalentTypeGuards != nullptr ? this->m_func->equivalentTypeGuards->Count() : 0);
  466. Assert(totalGuardCount > 0);
  467. Assert(totalGuardCount == this->m_func->indexedPropertyGuardCount);
  468. #endif
  469. if (!this->m_func->IsOOPJIT())
  470. {
  471. int propertyCount = this->m_func->propertyGuardsByPropertyId->Count();
  472. Assert(propertyCount > 0);
  473. int guardSlotCount = 0;
  474. this->m_func->propertyGuardsByPropertyId->Map([&guardSlotCount](Js::PropertyId propertyId, Func::IndexedPropertyGuardSet* set) -> void
  475. {
  476. guardSlotCount += set->Count();
  477. });
  478. size_t typeGuardTransferSize = // Reserve enough room for:
  479. propertyCount * sizeof(Js::TypeGuardTransferEntry) + // each propertyId,
  480. propertyCount * sizeof(Js::JitIndexedPropertyGuard*) + // terminating nullptr guard for each propertyId,
  481. guardSlotCount * sizeof(Js::JitIndexedPropertyGuard*); // a pointer for each guard we counted above.
  482. // The extra room for sizeof(Js::TypePropertyGuardEntry) allocated by HeapNewPlus will be used for the terminating invalid propertyId.
  483. // Review (jedmiad): Skip zeroing? This is heap allocated so there shouldn't be any false recycler references.
  484. Js::TypeGuardTransferEntry* typeGuardTransferRecord = HeapNewPlusZ(typeGuardTransferSize, Js::TypeGuardTransferEntry);
  485. Func* func = this->m_func;
  486. Js::TypeGuardTransferEntry* dstEntry = typeGuardTransferRecord;
  487. this->m_func->propertyGuardsByPropertyId->Map([func, &dstEntry](Js::PropertyId propertyId, Func::IndexedPropertyGuardSet* srcSet) -> void
  488. {
  489. dstEntry->propertyId = propertyId;
  490. int guardIndex = 0;
  491. srcSet->Map([dstEntry, &guardIndex](Js::JitIndexedPropertyGuard* guard) -> void
  492. {
  493. dstEntry->guards[guardIndex++] = guard;
  494. });
  495. dstEntry->guards[guardIndex++] = nullptr;
  496. dstEntry = reinterpret_cast<Js::TypeGuardTransferEntry*>(&dstEntry->guards[guardIndex]);
  497. });
  498. dstEntry->propertyId = Js::Constants::NoProperty;
  499. dstEntry++;
  500. Assert(reinterpret_cast<char*>(dstEntry) <= reinterpret_cast<char*>(typeGuardTransferRecord) + typeGuardTransferSize + sizeof(Js::TypeGuardTransferEntry));
  501. m_func->GetInProcJITEntryPointInfo()->RecordTypeGuards(this->m_func->indexedPropertyGuardCount, typeGuardTransferRecord, typeGuardTransferSize);
  502. }
  503. else
  504. {
  505. Func* func = this->m_func;
  506. this->m_func->GetJITOutput()->GetOutputData()->propertyGuardCount = this->m_func->indexedPropertyGuardCount;
  507. auto entry = &this->m_func->GetJITOutput()->GetOutputData()->typeGuardEntries;
  508. this->m_func->propertyGuardsByPropertyId->Map([func, &entry](Js::PropertyId propertyId, Func::IndexedPropertyGuardSet* srcSet) -> void
  509. {
  510. auto count = srcSet->Count();
  511. (*entry) = (TypeGuardTransferEntryIDL*)midl_user_allocate(offsetof(TypeGuardTransferEntryIDL, guardOffsets) + count*sizeof(int));
  512. if (!*entry)
  513. {
  514. Js::Throw::OutOfMemory();
  515. }
  516. __analysis_assume(*entry);
  517. (*entry)->propId = propertyId;
  518. (*entry)->guardsCount = count;
  519. (*entry)->next = nullptr;
  520. auto& guardOffsets = (*entry)->guardOffsets;
  521. int guardIndex = 0;
  522. srcSet->Map([&guardOffsets, &guardIndex](Js::JitIndexedPropertyGuard* guard) -> void
  523. {
  524. guardOffsets[guardIndex++] = NativeCodeData::GetDataTotalOffset(guard);
  525. });
  526. Assert(guardIndex == count);
  527. entry = &(*entry)->next;
  528. });
  529. }
  530. }
  531. // Save all constructor caches on the JIT transfer data in a map keyed by property ID. We will use this map when installing the entry
  532. // point to register each cache for invalidation.
  533. if (this->m_func->ctorCachesByPropertyId != nullptr)
  534. {
  535. Assert(!isSimpleJit);
  536. AssertMsg(!(PHASE_OFF(Js::ObjTypeSpecPhase, this->m_func) && PHASE_OFF(Js::FixedMethodsPhase, this->m_func)),
  537. "Why do we have constructor cache guards if we don't do object type spec or fixed methods?");
  538. int propertyCount = this->m_func->ctorCachesByPropertyId->Count();
  539. Assert(propertyCount > 0);
  540. int cacheSlotCount = 0;
  541. this->m_func->ctorCachesByPropertyId->Map([&cacheSlotCount](Js::PropertyId propertyId, Func::CtorCacheSet* cacheSet) -> void
  542. {
  543. cacheSlotCount += cacheSet->Count();
  544. });
  545. if (m_func->IsOOPJIT())
  546. {
  547. Func* func = this->m_func;
  548. m_func->GetJITOutput()->GetOutputData()->ctorCachesCount = propertyCount;
  549. m_func->GetJITOutput()->GetOutputData()->ctorCacheEntries = (CtorCacheTransferEntryIDL**)midl_user_allocate(propertyCount * sizeof(CtorCacheTransferEntryIDL*));
  550. CtorCacheTransferEntryIDL** entries = m_func->GetJITOutput()->GetOutputData()->ctorCacheEntries;
  551. if (!entries)
  552. {
  553. Js::Throw::OutOfMemory();
  554. }
  555. __analysis_assume(entries);
  556. uint propIndex = 0;
  557. m_func->ctorCachesByPropertyId->Map([func, entries, &propIndex](Js::PropertyId propertyId, Func::CtorCacheSet* srcCacheSet) -> void
  558. {
  559. entries[propIndex] = (CtorCacheTransferEntryIDL*)midl_user_allocate(srcCacheSet->Count() * sizeof(intptr_t) + sizeof(CtorCacheTransferEntryIDL));
  560. if (!entries[propIndex])
  561. {
  562. Js::Throw::OutOfMemory();
  563. }
  564. __analysis_assume(entries[propIndex]);
  565. entries[propIndex]->propId = propertyId;
  566. int cacheIndex = 0;
  567. srcCacheSet->Map([entries, propIndex, &cacheIndex](intptr_t cache) -> void
  568. {
  569. entries[propIndex]->caches[cacheIndex++] = cache;
  570. });
  571. entries[propIndex]->cacheCount = cacheIndex;
  572. propIndex++;
  573. });
  574. }
  575. else
  576. {
  577. Assert(m_func->GetInProcJITEntryPointInfo()->GetConstructorCacheCount() > 0);
  578. size_t ctorCachesTransferSize = // Reserve enough room for:
  579. propertyCount * sizeof(Js::CtorCacheGuardTransferEntry) + // each propertyId,
  580. propertyCount * sizeof(Js::ConstructorCache*) + // terminating null cache for each propertyId,
  581. cacheSlotCount * sizeof(Js::JitIndexedPropertyGuard*); // a pointer for each cache we counted above.
  582. // The extra room for sizeof(Js::CtorCacheGuardTransferEntry) allocated by HeapNewPlus will be used for the terminating invalid propertyId.
  583. // Review (jedmiad): Skip zeroing? This is heap allocated so there shouldn't be any false recycler references.
  584. Js::CtorCacheGuardTransferEntry* ctorCachesTransferRecord = HeapNewPlusZ(ctorCachesTransferSize, Js::CtorCacheGuardTransferEntry);
  585. Func* func = this->m_func;
  586. Js::CtorCacheGuardTransferEntry* dstEntry = ctorCachesTransferRecord;
  587. this->m_func->ctorCachesByPropertyId->Map([func, &dstEntry](Js::PropertyId propertyId, Func::CtorCacheSet* srcCacheSet) -> void
  588. {
  589. dstEntry->propertyId = propertyId;
  590. int cacheIndex = 0;
  591. srcCacheSet->Map([dstEntry, &cacheIndex](intptr_t cache) -> void
  592. {
  593. dstEntry->caches[cacheIndex++] = cache;
  594. });
  595. dstEntry->caches[cacheIndex++] = 0;
  596. dstEntry = reinterpret_cast<Js::CtorCacheGuardTransferEntry*>(&dstEntry->caches[cacheIndex]);
  597. });
  598. dstEntry->propertyId = Js::Constants::NoProperty;
  599. dstEntry++;
  600. Assert(reinterpret_cast<char*>(dstEntry) <= reinterpret_cast<char*>(ctorCachesTransferRecord) + ctorCachesTransferSize + sizeof(Js::CtorCacheGuardTransferEntry));
  601. m_func->GetInProcJITEntryPointInfo()->RecordCtorCacheGuards(ctorCachesTransferRecord, ctorCachesTransferSize);
  602. }
  603. }
  604. m_func->GetJITOutput()->FinalizeNativeCode(m_func, alloc);
  605. END_CODEGEN_PHASE(m_func, Js::EmitterPhase);
  606. #if DBG_DUMP
  607. m_func->m_codeSize = codeSize;
  608. if (PHASE_DUMP(Js::EncoderPhase, m_func) || PHASE_DUMP(Js::BackEndPhase, m_func))
  609. {
  610. bool dumpIRAddressesValue = Js::Configuration::Global.flags.DumpIRAddresses;
  611. Js::Configuration::Global.flags.DumpIRAddresses = true;
  612. this->m_func->DumpHeader();
  613. m_instrNumber = 0;
  614. FOREACH_INSTR_IN_FUNC(instr, m_func)
  615. {
  616. __analysis_assume(m_instrNumber < instrCount);
  617. instr->DumpGlobOptInstrString();
  618. #ifdef _WIN64
  619. Output::Print(_u("%12IX "), m_offsetBuffer[m_instrNumber++] + (BYTE *)m_func->GetJITOutput()->GetCodeAddress());
  620. #else
  621. Output::Print(_u("%8IX "), m_offsetBuffer[m_instrNumber++] + (BYTE *)m_func->GetJITOutput()->GetCodeAddress());
  622. #endif
  623. instr->Dump();
  624. } NEXT_INSTR_IN_FUNC;
  625. Output::Flush();
  626. Js::Configuration::Global.flags.DumpIRAddresses = dumpIRAddressesValue;
  627. }
  628. if (PHASE_DUMP(Js::EncoderPhase, m_func) && Js::Configuration::Global.flags.Verbose && !m_func->IsOOPJIT())
  629. {
  630. m_func->GetInProcJITEntryPointInfo()->DumpNativeOffsetMaps();
  631. m_func->GetInProcJITEntryPointInfo()->DumpNativeThrowSpanSequence();
  632. this->DumpInlineeFrameMap(m_func->GetJITOutput()->GetCodeAddress());
  633. Output::Flush();
  634. }
  635. #endif
  636. }
  637. bool Encoder::DoTrackAllStatementBoundary() const
  638. {
  639. #if DBG_DUMP | defined(VTUNE_PROFILING)
  640. return this->m_func->DoRecordNativeMap();
  641. #else
  642. return false;
  643. #endif
  644. }
  645. void Encoder::TryCopyAndAddRelocRecordsForSwitchJumpTableEntries(BYTE *codeStart, size_t codeSize, JmpTableList * jumpTableListForSwitchStatement, size_t totalJmpTableSizeInBytes)
  646. {
  647. if (jumpTableListForSwitchStatement == nullptr)
  648. {
  649. return;
  650. }
  651. BYTE * jmpTableStartAddress = codeStart + codeSize - totalJmpTableSizeInBytes;
  652. EncoderMD * encoderMD = &m_encoderMD;
  653. jumpTableListForSwitchStatement->Map([&](uint index, BranchJumpTableWrapper * branchJumpTableWrapper) -> void
  654. {
  655. Assert(branchJumpTableWrapper != nullptr);
  656. void ** srcJmpTable = branchJumpTableWrapper->jmpTable;
  657. size_t jmpTableSizeInBytes = branchJumpTableWrapper->tableSize * sizeof(void*);
  658. AssertMsg(branchJumpTableWrapper->labelInstr != nullptr, "Label not yet created?");
  659. Assert(branchJumpTableWrapper->labelInstr->GetPC() == nullptr);
  660. branchJumpTableWrapper->labelInstr->SetPC(jmpTableStartAddress);
  661. memcpy(jmpTableStartAddress, srcJmpTable, jmpTableSizeInBytes);
  662. for (int i = 0; i < branchJumpTableWrapper->tableSize; i++)
  663. {
  664. void * addressOfJmpTableEntry = jmpTableStartAddress + (i * sizeof(void*));
  665. Assert((ptrdiff_t) addressOfJmpTableEntry - (ptrdiff_t) jmpTableStartAddress < (ptrdiff_t) jmpTableSizeInBytes);
  666. #if defined(_M_ARM32_OR_ARM64)
  667. encoderMD->AddLabelReloc((byte*) addressOfJmpTableEntry);
  668. #else
  669. encoderMD->AppendRelocEntry(RelocTypeLabelUse, addressOfJmpTableEntry, *(IR::LabelInstr**)addressOfJmpTableEntry);
  670. *((size_t*)addressOfJmpTableEntry) = 0;
  671. #endif
  672. }
  673. jmpTableStartAddress += (jmpTableSizeInBytes);
  674. });
  675. Assert(jmpTableStartAddress == codeStart + codeSize);
  676. }
  677. uint32 Encoder::GetCurrentOffset() const
  678. {
  679. Assert(m_pc - m_encodeBuffer <= UINT_MAX); // encode buffer size is uint32
  680. return static_cast<uint32>(m_pc - m_encodeBuffer);
  681. }
  682. void Encoder::RecordInlineeFrame(Func* inlinee, uint32 currentOffset)
  683. {
  684. // The only restriction for not supporting loop bodies is that inlinee frame map is created on FunctionEntryPointInfo & not
  685. // the base class EntryPointInfo.
  686. if (!(this->m_func->IsLoopBody() && PHASE_OFF(Js::InlineInJitLoopBodyPhase, this->m_func)) && !this->m_func->IsSimpleJit())
  687. {
  688. InlineeFrameRecord* record = nullptr;
  689. if (inlinee->frameInfo && inlinee->m_hasInlineArgsOpt)
  690. {
  691. record = inlinee->frameInfo->record;
  692. Assert(record != nullptr);
  693. }
  694. if (m_inlineeFrameMap->Count() > 0)
  695. {
  696. // update existing record if the entry is the same.
  697. NativeOffsetInlineeFramePair& lastPair = m_inlineeFrameMap->Item(m_inlineeFrameMap->Count() - 1);
  698. if (lastPair.record == record)
  699. {
  700. lastPair.offset = currentOffset;
  701. return;
  702. }
  703. }
  704. NativeOffsetInlineeFramePair pair = { currentOffset, record };
  705. m_inlineeFrameMap->Add(pair);
  706. }
  707. }
  708. #if defined(_M_IX86) || defined(_M_X64)
  709. /*
  710. * ValidateCRCOnFinalBuffer
  711. * - Validates the CRC that is last computed (could be either the one after BranchShortening or after encoding itself)
  712. * - We calculate the CRC for jump table and dictionary after computing the code section.
  713. * - Also, all reloc data are computed towards the end - after computing the code section - because we don't have to deal with the changes relocs while operating on the code section.
  714. * - The version of CRC that we are validating with, doesn't have Relocs applied but the final buffer does - So we have to make adjustments while calculating the final buffer's CRC.
  715. */
  716. void Encoder::ValidateCRCOnFinalBuffer(_In_reads_bytes_(finalCodeSize) BYTE * finalCodeBufferStart, size_t finalCodeSize, size_t jumpTableSize, _In_reads_bytes_(finalCodeSize) BYTE * oldCodeBufferStart, uint initialCrcSeed, uint bufferCrcToValidate, BOOL isSuccessBrShortAndLoopAlign)
  717. {
  718. RelocList * relocList = m_encoderMD.GetRelocList();
  719. BYTE * currentStartAddress = finalCodeBufferStart;
  720. BYTE * currentEndAddress = nullptr;
  721. size_t crcSizeToCompute = 0;
  722. size_t finalCodeSizeWithoutJumpTable = finalCodeSize - jumpTableSize;
  723. uint finalBufferCRC = initialCrcSeed;
  724. BYTE * oldPtr = nullptr;
  725. if (relocList != nullptr)
  726. {
  727. for (int index = 0; index < relocList->Count(); index++)
  728. {
  729. EncodeRelocAndLabels * relocTuple = &relocList->Item(index);
  730. //We will deal with the jump table and dictionary entries along with other reloc records in ApplyRelocs()
  731. if ((BYTE*)m_encoderMD.GetRelocBufferAddress(relocTuple) >= oldCodeBufferStart && (BYTE*)m_encoderMD.GetRelocBufferAddress(relocTuple) < (oldCodeBufferStart + finalCodeSizeWithoutJumpTable))
  732. {
  733. BYTE* finalBufferRelocTuplePtr = (BYTE*)m_encoderMD.GetRelocBufferAddress(relocTuple) - oldCodeBufferStart + finalCodeBufferStart;
  734. Assert(finalBufferRelocTuplePtr >= finalCodeBufferStart && finalBufferRelocTuplePtr < (finalCodeBufferStart + finalCodeSizeWithoutJumpTable));
  735. uint relocDataSize = m_encoderMD.GetRelocDataSize(relocTuple);
  736. if (relocDataSize != 0)
  737. {
  738. AssertMsg(oldPtr == nullptr || oldPtr < finalBufferRelocTuplePtr, "Assumption here is that the reloc list is strictly increasing in terms of bufferAddress");
  739. oldPtr = finalBufferRelocTuplePtr;
  740. currentEndAddress = finalBufferRelocTuplePtr;
  741. crcSizeToCompute = currentEndAddress - currentStartAddress;
  742. Assert(currentEndAddress >= currentStartAddress);
  743. finalBufferCRC = CalculateCRC(finalBufferCRC, crcSizeToCompute, currentStartAddress);
  744. for (uint i = 0; i < relocDataSize; i++)
  745. {
  746. finalBufferCRC = CalculateCRC(finalBufferCRC, 0);
  747. }
  748. currentStartAddress = currentEndAddress + relocDataSize;
  749. }
  750. }
  751. }
  752. }
  753. currentEndAddress = finalCodeBufferStart + finalCodeSizeWithoutJumpTable;
  754. crcSizeToCompute = currentEndAddress - currentStartAddress;
  755. Assert(currentEndAddress >= currentStartAddress);
  756. finalBufferCRC = CalculateCRC(finalBufferCRC, crcSizeToCompute, currentStartAddress);
  757. //Include all offsets from the reloc records to the CRC.
  758. m_encoderMD.ApplyRelocs((size_t)finalCodeBufferStart, finalCodeSize, &finalBufferCRC, isSuccessBrShortAndLoopAlign, true);
  759. if (finalBufferCRC != bufferCrcToValidate)
  760. {
  761. Assert(false);
  762. Fatal();
  763. }
  764. }
  765. #endif
  766. /*
  767. * EnsureRelocEntryIntegrity
  768. * - We compute the target address as the processor would compute it and check if the target is within the final buffer's bounds.
  769. * - For relative addressing, Target = current m_pc + offset
  770. * - For absolute addressing, Target = direct address
  771. */
  772. void Encoder::EnsureRelocEntryIntegrity(size_t newBufferStartAddress, size_t codeSize, size_t oldBufferAddress, size_t relocAddress, uint offsetBytes, ptrdiff_t opndData, bool isRelativeAddr)
  773. {
  774. size_t targetBrAddress = 0;
  775. size_t newBufferEndAddress = newBufferStartAddress + codeSize;
  776. //Handle Dictionary addresses here - The target address will be in the dictionary.
  777. if (relocAddress < oldBufferAddress || relocAddress >= (oldBufferAddress + codeSize))
  778. {
  779. targetBrAddress = (size_t)(*(size_t*)relocAddress);
  780. }
  781. else
  782. {
  783. size_t newBufferRelocAddr = relocAddress - oldBufferAddress + newBufferStartAddress;
  784. if (isRelativeAddr)
  785. {
  786. targetBrAddress = (size_t)newBufferRelocAddr + offsetBytes + opndData;
  787. }
  788. else // Absolute Address
  789. {
  790. targetBrAddress = (size_t)opndData;
  791. }
  792. }
  793. if (targetBrAddress < newBufferStartAddress || targetBrAddress >= newBufferEndAddress)
  794. {
  795. Assert(false);
  796. Fatal();
  797. }
  798. }
  799. uint Encoder::CalculateCRC(uint bufferCRC, size_t data)
  800. {
  801. #if defined(_WIN32) || defined(__SSE4_2__)
  802. #if defined(_M_IX86)
  803. if (AutoSystemInfo::Data.SSE4_2Available())
  804. {
  805. return _mm_crc32_u32(bufferCRC, data);
  806. }
  807. #elif defined(_M_X64)
  808. if (AutoSystemInfo::Data.SSE4_2Available())
  809. {
  810. //CRC32 always returns a 32-bit result
  811. return (uint)_mm_crc32_u64(bufferCRC, data);
  812. }
  813. #endif
  814. #endif
  815. return CalculateCRC32(bufferCRC, data);
  816. }
  817. uint Encoder::CalculateCRC(uint bufferCRC, size_t count, _In_reads_bytes_(count) void * buffer)
  818. {
  819. for (uint index = 0; index < count; index++)
  820. {
  821. bufferCRC = CalculateCRC(bufferCRC, *((BYTE*)buffer + index));
  822. }
  823. return bufferCRC;
  824. }
  825. void Encoder::ValidateCRC(uint bufferCRC, uint initialCRCSeed, _In_reads_bytes_(count) void* buffer, size_t count)
  826. {
  827. uint validationCRC = initialCRCSeed;
  828. validationCRC = CalculateCRC(validationCRC, count, buffer);
  829. if (validationCRC != bufferCRC)
  830. {
  831. //TODO: This throws internal error. Is this error type, Fine?
  832. Fatal();
  833. }
  834. }
  835. #if defined(_M_IX86) || defined(_M_X64)
  836. ///----------------------------------------------------------------------------
  837. ///
  838. /// EncoderMD::ShortenBranchesAndLabelAlign
  839. /// We try to shorten branches if the label instr is within 8-bits target range (-128 to 127)
  840. /// and fix the relocList accordingly.
  841. /// Also align LoopTop Label and TryCatchLabel
  842. ///----------------------------------------------------------------------------
  843. BOOL
  844. Encoder::ShortenBranchesAndLabelAlign(BYTE **codeStart, ptrdiff_t *codeSize, uint * pShortenedBufferCRC, uint bufferCrcToValidate, size_t jumpTableSize)
  845. {
  846. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  847. static uint32 globalTotalBytesSaved = 0, globalTotalBytesWithoutShortening = 0;
  848. static uint32 globalTotalBytesInserted = 0; // loop alignment nops
  849. #endif
  850. uint32 brShortenedCount = 0;
  851. bool codeChange = false; // any overall BR shortened or label aligned ?
  852. BYTE* buffStart = *codeStart;
  853. BYTE* buffEnd = buffStart + *codeSize;
  854. ptrdiff_t newCodeSize = *codeSize;
  855. RelocList* relocList = m_encoderMD.GetRelocList();
  856. if (relocList == nullptr)
  857. {
  858. return false;
  859. }
  860. #if DBG
  861. // Sanity check
  862. m_encoderMD.VerifyRelocList(buffStart, buffEnd);
  863. #endif
  864. // Copy of original maps. Used to revert from BR shortening.
  865. OffsetList *m_origInlineeFrameRecords = nullptr,
  866. *m_origInlineeFrameMap = nullptr,
  867. *m_origPragmaInstrToRecordOffset = nullptr;
  868. OffsetList *m_origOffsetBuffer = nullptr;
  869. // we record the original maps, in case we have to revert.
  870. CopyMaps<false>(&m_origInlineeFrameRecords
  871. , &m_origInlineeFrameMap
  872. , &m_origPragmaInstrToRecordOffset
  873. , &m_origOffsetBuffer );
  874. // Here we mark BRs to be shortened and adjust Labels and relocList entries offsets.
  875. uint32 offsetBuffIndex = 0, pragmaInstToRecordOffsetIndex = 0, inlineeFrameRecordsIndex = 0, inlineeFrameMapIndex = 0;
  876. int32 totalBytesSaved = 0;
  877. // loop over all BRs, find the ones we can convert to short form
  878. for (int32 j = 0; j < relocList->Count(); j++)
  879. {
  880. IR::LabelInstr *targetLabel;
  881. int32 relOffset;
  882. uint32 bytesSaved = 0;
  883. BYTE* labelPc, *opcodeByte;
  884. BYTE* shortBrPtr, *fixedBrPtr; // without shortening
  885. EncodeRelocAndLabels &reloc = relocList->Item(j);
  886. // If not a long branch, just fix the reloc entry and skip.
  887. if (!reloc.isLongBr())
  888. {
  889. // if loop alignment is required, total bytes saved can change
  890. int32 newTotalBytesSaved = m_encoderMD.FixRelocListEntry(j, totalBytesSaved, buffStart, buffEnd);
  891. if (newTotalBytesSaved != totalBytesSaved)
  892. {
  893. AssertMsg(reloc.isAlignedLabel(), "Expecting aligned label.");
  894. // we aligned a loop, fix maps
  895. m_encoderMD.FixMaps((uint32)(reloc.getLabelOrigPC() - buffStart), totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  896. codeChange = true;
  897. }
  898. totalBytesSaved = newTotalBytesSaved;
  899. continue;
  900. }
  901. AssertMsg(reloc.isLongBr(), "Cannot shorten already shortened branch.");
  902. // long branch
  903. opcodeByte = reloc.getBrOpCodeByte();
  904. targetLabel = reloc.getBrTargetLabel();
  905. AssertMsg(targetLabel != nullptr, "Branch to non-existing label");
  906. labelPc = targetLabel->GetPC();
  907. // compute the new offset of that Br because of previous shortening/alignment
  908. shortBrPtr = fixedBrPtr = (BYTE*)reloc.m_ptr - totalBytesSaved;
  909. if (*opcodeByte == 0xe9 /* JMP rel32 */)
  910. {
  911. bytesSaved = 3;
  912. }
  913. else if (*opcodeByte >= 0x80 && *opcodeByte < 0x90 /* Jcc rel32 */)
  914. {
  915. Assert(*(opcodeByte - 1) == 0x0f);
  916. bytesSaved = 4;
  917. // Jcc rel8 is one byte shorter in opcode, fix Br ptr to point to start of rel8
  918. shortBrPtr--;
  919. }
  920. else
  921. {
  922. Assert(false);
  923. }
  924. // compute current distance to label
  925. if (labelPc >= (BYTE*) reloc.m_ptr)
  926. {
  927. // forward Br. We compare using the unfixed m_ptr, because the label is ahead and its Pc is not fixed it.
  928. relOffset = (int32)(labelPc - ((BYTE*)reloc.m_ptr + 4));
  929. }
  930. else
  931. {
  932. // backward Br. We compute relOffset after fixing the Br, since the label is already fixed.
  933. // We also include the 3-4 bytes saved after shortening the Br since the Br itself is included in the relative offset.
  934. relOffset = (int32)(labelPc - (shortBrPtr + 1));
  935. }
  936. // update Br offset (overwritten later if Br is shortened)
  937. reloc.m_ptr = fixedBrPtr;
  938. // can we shorten ?
  939. if (relOffset >= -128 && relOffset <= 127)
  940. {
  941. uint32 brOffset;
  942. brShortenedCount++;
  943. // update with shortened br offset
  944. reloc.m_ptr = shortBrPtr;
  945. // fix all maps entries from last shortened br to this one, before updating total bytes saved.
  946. brOffset = (uint32) ((BYTE*)reloc.m_origPtr - buffStart);
  947. m_encoderMD.FixMaps(brOffset, totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  948. codeChange = true;
  949. totalBytesSaved += bytesSaved;
  950. // mark br reloc entry as shortened
  951. #ifdef _M_IX86
  952. reloc.setAsShortBr(targetLabel);
  953. #else
  954. reloc.setAsShortBr();
  955. #endif
  956. }
  957. }
  958. // Fix the rest of the maps, if needed.
  959. if (totalBytesSaved != 0)
  960. {
  961. m_encoderMD.FixMaps((uint32) -1, totalBytesSaved, &inlineeFrameRecordsIndex, &inlineeFrameMapIndex, &pragmaInstToRecordOffsetIndex, &offsetBuffIndex);
  962. codeChange = true;
  963. newCodeSize -= totalBytesSaved;
  964. }
  965. // no BR shortening or Label alignment happened, no need to copy code
  966. if (!codeChange)
  967. return codeChange;
  968. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  969. globalTotalBytesWithoutShortening += (uint32)(*codeSize);
  970. globalTotalBytesSaved += (uint32)(*codeSize - newCodeSize);
  971. if (PHASE_TRACE(Js::BrShortenPhase, this->m_func))
  972. {
  973. OUTPUT_VERBOSE_TRACE(Js::BrShortenPhase, _u("func: %s, bytes saved: %d, bytes saved %%:%.2f, total bytes saved: %d, total bytes saved%%: %.2f, BR shortened: %d\n"),
  974. this->m_func->GetJITFunctionBody()->GetDisplayName(), (*codeSize - newCodeSize), ((float)*codeSize - newCodeSize) / *codeSize * 100,
  975. globalTotalBytesSaved, ((float)globalTotalBytesSaved) / globalTotalBytesWithoutShortening * 100 , brShortenedCount);
  976. Output::Flush();
  977. }
  978. #endif
  979. // At this point BRs are marked to be shortened, and relocList offsets are adjusted to new instruction length.
  980. // Next, we re-write the code to shorten the BRs and adjust relocList offsets to point to new buffer.
  981. // We also write NOPs for aligned loops.
  982. BYTE* tmpBuffer = AnewArray(m_tempAlloc, BYTE, newCodeSize);
  983. uint srcBufferCrc = *pShortenedBufferCRC; //This has the intial Random CRC seed to start with.
  984. // start copying to new buffer
  985. // this can possibly be done during fixing, but there is no evidence it is an overhead to justify the complexity.
  986. BYTE *from = buffStart, *to = nullptr;
  987. BYTE *dst_p = (BYTE*)tmpBuffer;
  988. size_t dst_size = newCodeSize;
  989. size_t src_size;
  990. for (int32 i = 0; i < relocList->Count(); i++)
  991. {
  992. EncodeRelocAndLabels &reloc = relocList->Item(i);
  993. // shorten BR and copy
  994. if (reloc.isShortBr())
  995. {
  996. // validate that short BR offset is within 1 byte offset range.
  997. // This handles the rare case with loop alignment breaks br shortening.
  998. // Consider:
  999. // BR $L1 // shortened
  1000. // ...
  1001. // L2: // aligned, and makes the BR $L1 non-shortable anymore
  1002. // ...
  1003. // BR $L2
  1004. // ...
  1005. // L1:
  1006. // In this case, we simply give up and revert the relocList.
  1007. if(!reloc.validateShortBrTarget())
  1008. {
  1009. revertRelocList();
  1010. // restore maps
  1011. CopyMaps<true>(&m_origInlineeFrameRecords
  1012. , &m_origInlineeFrameMap
  1013. , &m_origPragmaInstrToRecordOffset
  1014. , &m_origOffsetBuffer
  1015. );
  1016. return false;
  1017. }
  1018. // m_origPtr points to imm32 field in the original buffer
  1019. BYTE *opcodeByte = (BYTE*)reloc.m_origPtr - 1;
  1020. if (*opcodeByte == 0xe9 /* JMP rel32 */)
  1021. {
  1022. to = opcodeByte - 1;
  1023. }
  1024. else if (*opcodeByte >= 0x80 && *opcodeByte < 0x90 /* Jcc rel32 */)
  1025. {
  1026. Assert(*(opcodeByte - 1) == 0x0f);
  1027. to = opcodeByte - 2;
  1028. }
  1029. else
  1030. {
  1031. Assert(false);
  1032. }
  1033. src_size = to - from + 1;
  1034. AnalysisAssert(dst_size >= src_size);
  1035. memcpy_s(dst_p, dst_size, from, src_size);
  1036. srcBufferCrc = CalculateCRC(srcBufferCrc, (BYTE*)reloc.m_origPtr - from + 4, from);
  1037. *pShortenedBufferCRC = CalculateCRC(*pShortenedBufferCRC, src_size, dst_p);
  1038. dst_p += src_size;
  1039. dst_size -= src_size;
  1040. // fix the BR
  1041. // write new opcode
  1042. AnalysisAssert(dst_p < tmpBuffer + newCodeSize);
  1043. *dst_p = (*opcodeByte == 0xe9) ? (BYTE)0xeb : (BYTE)(*opcodeByte - 0x10);
  1044. *(dst_p + 1) = 0; // imm8
  1045. *pShortenedBufferCRC = CalculateCRC(*pShortenedBufferCRC, 2, dst_p);
  1046. dst_p += 2; // 1 byte for opcode + 1 byte for imm8
  1047. dst_size -= 2;
  1048. from = (BYTE*)reloc.m_origPtr + 4;
  1049. }
  1050. else if (reloc.m_type == RelocTypeInlineeEntryOffset)
  1051. {
  1052. to = (BYTE*)reloc.m_origPtr - 1;
  1053. CopyPartialBufferAndCalculateCRC(&dst_p, dst_size, from, to, pShortenedBufferCRC);
  1054. *(size_t*)dst_p = reloc.GetInlineOffset();
  1055. *pShortenedBufferCRC = CalculateCRC(*pShortenedBufferCRC, sizeof(size_t), dst_p);
  1056. dst_p += sizeof(size_t);
  1057. dst_size -= sizeof(size_t);
  1058. srcBufferCrc = CalculateCRC(srcBufferCrc, (BYTE*)reloc.m_origPtr + sizeof(size_t) - from , from);
  1059. from = (BYTE*)reloc.m_origPtr + sizeof(size_t);
  1060. }
  1061. // insert NOPs for aligned labels
  1062. else if ((!PHASE_OFF(Js::LoopAlignPhase, m_func) && reloc.isAlignedLabel()) && reloc.getLabelNopCount() > 0)
  1063. {
  1064. IR::LabelInstr *label = reloc.getLabel();
  1065. BYTE nop_count = reloc.getLabelNopCount();
  1066. AssertMsg((BYTE*)label < buffStart || (BYTE*)label >= buffEnd, "Invalid label pointer.");
  1067. AssertMsg((((uint32)(label->GetPC() - buffStart)) & 0xf) == 0, "Misaligned Label");
  1068. to = reloc.getLabelOrigPC() - 1;
  1069. CopyPartialBufferAndCalculateCRC(&dst_p, dst_size, from, to, pShortenedBufferCRC);
  1070. srcBufferCrc = CalculateCRC(srcBufferCrc, to - from + 1, from);
  1071. #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
  1072. if (PHASE_TRACE(Js::LoopAlignPhase, this->m_func))
  1073. {
  1074. globalTotalBytesInserted += nop_count;
  1075. OUTPUT_VERBOSE_TRACE(Js::LoopAlignPhase, _u("func: %s, bytes inserted: %d, bytes inserted %%:%.4f, total bytes inserted:%d, total bytes inserted %%:%.4f\n"),
  1076. this->m_func->GetJITFunctionBody()->GetDisplayName(), nop_count, (float)nop_count / newCodeSize * 100, globalTotalBytesInserted, (float)globalTotalBytesInserted / (globalTotalBytesWithoutShortening - globalTotalBytesSaved) * 100);
  1077. Output::Flush();
  1078. }
  1079. #endif
  1080. BYTE * tmpDst_p = dst_p;
  1081. InsertNopsForLabelAlignment(nop_count, &dst_p);
  1082. *pShortenedBufferCRC = CalculateCRC(*pShortenedBufferCRC, nop_count, tmpDst_p);
  1083. dst_size -= nop_count;
  1084. from = to + 1;
  1085. }
  1086. }
  1087. // copy last chunk
  1088. //Exclude jumpTable content from CRC calculation.
  1089. //Though jumpTable is not part of the encoded bytes, codeSize has jumpTableSize included in it.
  1090. CopyPartialBufferAndCalculateCRC(&dst_p, dst_size, from, buffStart + *codeSize - 1, pShortenedBufferCRC, jumpTableSize);
  1091. srcBufferCrc = CalculateCRC(srcBufferCrc, buffStart + *codeSize - from - jumpTableSize, from);
  1092. m_encoderMD.UpdateRelocListWithNewBuffer(relocList, tmpBuffer, buffStart, buffEnd);
  1093. if (srcBufferCrc != bufferCrcToValidate)
  1094. {
  1095. Assert(false);
  1096. Fatal();
  1097. }
  1098. // switch buffers
  1099. *codeStart = tmpBuffer;
  1100. *codeSize = newCodeSize;
  1101. return true;
  1102. }
  1103. BYTE Encoder::FindNopCountFor16byteAlignment(size_t address)
  1104. {
  1105. return (16 - (BYTE) (address & 0xf)) % 16;
  1106. }
  1107. void Encoder::CopyPartialBufferAndCalculateCRC(BYTE ** ptrDstBuffer, size_t &dstSize, BYTE * srcStart, BYTE * srcEnd, uint* pBufferCRC, size_t jumpTableSize)
  1108. {
  1109. BYTE * destBuffer = *ptrDstBuffer;
  1110. size_t srcSize = srcEnd - srcStart + 1;
  1111. Assert(dstSize >= srcSize);
  1112. memcpy_s(destBuffer, dstSize, srcStart, srcSize);
  1113. Assert(srcSize >= jumpTableSize);
  1114. //Exclude the jump table content (which is at the end of the buffer) for calculating CRC - at this point.
  1115. *pBufferCRC = CalculateCRC(*pBufferCRC, srcSize - jumpTableSize, destBuffer);
  1116. *ptrDstBuffer += srcSize;
  1117. dstSize -= srcSize;
  1118. }
  1119. void Encoder::InsertNopsForLabelAlignment(int nopCount, BYTE ** ptrDstBuffer)
  1120. {
  1121. // write NOPs
  1122. for (int32 i = 0; i < nopCount; i++, (*ptrDstBuffer)++)
  1123. {
  1124. **ptrDstBuffer = 0x90;
  1125. }
  1126. }
  1127. void Encoder::revertRelocList()
  1128. {
  1129. RelocList* relocList = m_encoderMD.GetRelocList();
  1130. for (int32 i = 0; i < relocList->Count(); i++)
  1131. {
  1132. relocList->Item(i).revert();
  1133. }
  1134. }
  1135. template <bool restore>
  1136. void Encoder::CopyMaps(OffsetList **m_origInlineeFrameRecords
  1137. , OffsetList **m_origInlineeFrameMap
  1138. , OffsetList **m_origPragmaInstrToRecordOffset
  1139. , OffsetList **m_origOffsetBuffer
  1140. )
  1141. {
  1142. InlineeFrameRecords *recList = m_inlineeFrameRecords;
  1143. InlineeFrameMap *mapList = m_inlineeFrameMap;
  1144. PragmaInstrList *pInstrList = m_pragmaInstrToRecordOffset;
  1145. OffsetList *origRecList, *origMapList, *origPInstrList;
  1146. if (!restore)
  1147. {
  1148. Assert(*m_origInlineeFrameRecords == nullptr);
  1149. Assert(*m_origInlineeFrameMap == nullptr);
  1150. Assert(*m_origPragmaInstrToRecordOffset == nullptr);
  1151. *m_origInlineeFrameRecords = origRecList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  1152. *m_origInlineeFrameMap = origMapList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  1153. *m_origPragmaInstrToRecordOffset = origPInstrList = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  1154. #if DBG_DUMP
  1155. Assert((*m_origOffsetBuffer) == nullptr);
  1156. *m_origOffsetBuffer = Anew(m_tempAlloc, OffsetList, m_tempAlloc);
  1157. #endif
  1158. }
  1159. else
  1160. {
  1161. Assert((*m_origInlineeFrameRecords) && (*m_origInlineeFrameMap) && (*m_origPragmaInstrToRecordOffset));
  1162. origRecList = *m_origInlineeFrameRecords;
  1163. origMapList = *m_origInlineeFrameMap;
  1164. origPInstrList = *m_origPragmaInstrToRecordOffset;
  1165. Assert(origRecList->Count() == recList->Count());
  1166. Assert(origMapList->Count() == mapList->Count());
  1167. Assert(origPInstrList->Count() == pInstrList->Count());
  1168. #if DBG_DUMP
  1169. Assert(m_origOffsetBuffer);
  1170. Assert((uint32)(*m_origOffsetBuffer)->Count() == m_instrNumber);
  1171. #endif
  1172. }
  1173. for (int i = 0; i < recList->Count(); i++)
  1174. {
  1175. if (!restore)
  1176. {
  1177. origRecList->Add(recList->Item(i)->inlineeStartOffset);
  1178. }
  1179. else
  1180. {
  1181. recList->Item(i)->inlineeStartOffset = origRecList->Item(i);
  1182. }
  1183. }
  1184. for (int i = 0; i < mapList->Count(); i++)
  1185. {
  1186. if (!restore)
  1187. {
  1188. origMapList->Add(mapList->Item(i).offset);
  1189. }
  1190. else
  1191. {
  1192. mapList->Item(i).offset = origMapList->Item(i);
  1193. }
  1194. }
  1195. for (int i = 0; i < pInstrList->Count(); i++)
  1196. {
  1197. if (!restore)
  1198. {
  1199. origPInstrList->Add(pInstrList->Item(i)->m_offsetInBuffer);
  1200. }
  1201. else
  1202. {
  1203. pInstrList->Item(i)->m_offsetInBuffer = origPInstrList->Item(i);
  1204. }
  1205. }
  1206. if (restore)
  1207. {
  1208. (*m_origInlineeFrameRecords)->Delete();
  1209. (*m_origInlineeFrameMap)->Delete();
  1210. (*m_origPragmaInstrToRecordOffset)->Delete();
  1211. (*m_origInlineeFrameRecords) = nullptr;
  1212. (*m_origInlineeFrameMap) = nullptr;
  1213. (*m_origPragmaInstrToRecordOffset) = nullptr;
  1214. }
  1215. #if DBG_DUMP
  1216. for (uint i = 0; i < m_instrNumber; i++)
  1217. {
  1218. if (!restore)
  1219. {
  1220. (*m_origOffsetBuffer)->Add(m_offsetBuffer[i]);
  1221. }
  1222. else
  1223. {
  1224. m_offsetBuffer[i] = (*m_origOffsetBuffer)->Item(i);
  1225. }
  1226. }
  1227. if (restore)
  1228. {
  1229. (*m_origOffsetBuffer)->Delete();
  1230. (*m_origOffsetBuffer) = nullptr;
  1231. }
  1232. #endif
  1233. }
  1234. #endif
  1235. void Encoder::RecordBailout(IR::Instr* instr, uint32 currentOffset)
  1236. {
  1237. BailOutInfo* bailoutInfo = instr->GetBailOutInfo();
  1238. if (bailoutInfo->bailOutRecord == nullptr)
  1239. {
  1240. return;
  1241. }
  1242. #if DBG_DUMP
  1243. if (PHASE_DUMP(Js::LazyBailoutPhase, m_func))
  1244. {
  1245. Output::Print(_u("Offset: %u Instr: "), currentOffset);
  1246. instr->Dump();
  1247. Output::Print(_u("Bailout label: "));
  1248. bailoutInfo->bailOutInstr->Dump();
  1249. }
  1250. #endif
  1251. Assert(bailoutInfo->bailOutInstr->IsLabelInstr());
  1252. LazyBailOutRecord record(currentOffset, (BYTE*)bailoutInfo->bailOutInstr, bailoutInfo->bailOutRecord);
  1253. m_bailoutRecordMap->Add(record);
  1254. }
  1255. #if DBG_DUMP
  1256. void Encoder::DumpInlineeFrameMap(size_t baseAddress)
  1257. {
  1258. Output::Print(_u("Inlinee frame info mapping\n"));
  1259. Output::Print(_u("---------------------------------------\n"));
  1260. m_inlineeFrameMap->Map([=](uint index, NativeOffsetInlineeFramePair& pair) {
  1261. Output::Print(_u("%Ix"), baseAddress + pair.offset);
  1262. Output::SkipToColumn(20);
  1263. if (pair.record)
  1264. {
  1265. pair.record->Dump();
  1266. }
  1267. else
  1268. {
  1269. Output::Print(_u("<NULL>"));
  1270. }
  1271. Output::Print(_u("\n"));
  1272. });
  1273. }
  1274. #endif