ByteCodeWriter.cpp 120 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "RuntimeByteCodePch.h"
  6. namespace Js
  7. {
  8. void ByteCodeWriter::Create()
  9. {
  10. m_loopNest = 0;
  11. m_byteCodeCount = 0;
  12. m_byteCodeWithoutLDACount = 0;
  13. m_byteCodeInLoopCount = 0;
  14. m_functionWrite = nullptr;
  15. m_pMatchingNode = nullptr;
  16. m_matchingNodeRefCount = 0;
  17. m_tmpRegCount = 0;
  18. DebugOnly(isInitialized = false);
  19. DebugOnly(isInUse = false);
  20. }
  21. void ByteCodeWriter::InitData(ArenaAllocator* alloc, long initCodeBufferSize)
  22. {
  23. Assert(!isInUse);
  24. Assert(!isInitialized);
  25. DebugOnly(isInitialized = true);
  26. m_labelOffsets = JsUtil::List<uint, ArenaAllocator>::New(alloc);
  27. m_jumpOffsets = JsUtil::List<JumpInfo, ArenaAllocator>::New(alloc);
  28. m_loopHeaders = JsUtil::List<LoopHeaderData, ArenaAllocator>::New(alloc);
  29. m_byteCodeData.Create(initCodeBufferSize, alloc);
  30. m_subexpressionNodesStack = Anew(alloc, JsUtil::Stack<SubexpressionNode>, alloc);
  31. // These data units have exponential growth strategy - let's start small and grow them
  32. m_auxiliaryData.Create(256, alloc);
  33. m_auxContextData.Create(256, alloc);
  34. callRegToLdFldCacheIndexMap = Anew(alloc, CallRegToLdFldCacheIndexMap,
  35. alloc,
  36. 17);
  37. #ifdef BYTECODE_BRANCH_ISLAND
  38. useBranchIsland = true;
  39. inEnsureLongBranch = false;
  40. lastOpcode = Js::OpCode::FunctionEntry;
  41. this->UpdateNextBranchIslandOffset(0, 0);
  42. m_longJumpOffsets = JsUtil::List<JumpInfo, ArenaAllocator>::New(alloc);
  43. #endif
  44. }
  45. ///----------------------------------------------------------------------------
  46. ///
  47. /// Begin() configures this instance to generate byte-code for a specific
  48. /// JavascriptFunction:
  49. ///
  50. /// - Byte-code will be written until the caller uses End() to close and commit
  51. /// the stream to the given function, or Reset() to discard and reset to an
  52. /// empty state.
  53. ///
  54. /// - Each ByteCodeWriter may be used multiple times, but may only generate a
  55. /// single byte-code stream for a single function at a time.
  56. ///
  57. ///----------------------------------------------------------------------------
  58. void ByteCodeWriter::Begin(ByteCodeGenerator* byteCodeGenerator, FunctionBody* functionWrite, ArenaAllocator* alloc, bool doJitLoopBodies, bool hasLoop)
  59. {
  60. Assert(!isInUse);
  61. AssertMsg(m_functionWrite == nullptr, "Cannot nest Begin() calls");
  62. AssertMsg(functionWrite != nullptr, "Must have valid function to write");
  63. AssertMsg(functionWrite->GetByteCode() == nullptr, "Function should not already have a byte-code body");
  64. AssertMsg(functionWrite->GetLocalsCount() > 0, "Must always have R0 for return-value");
  65. DebugOnly(isInUse = true);
  66. m_functionWrite = functionWrite;
  67. m_doJitLoopBodies = doJitLoopBodies;
  68. m_doInterruptProbe = functionWrite->GetScriptContext()->GetThreadContext()->DoInterruptProbe(functionWrite);
  69. m_hasLoop = hasLoop;
  70. m_isInDebugMode = byteCodeGenerator->IsInDebugMode();
  71. }
  72. template <typename T>
  73. void ByteCodeWriter::PatchJumpOffset(JsUtil::List<JumpInfo, ArenaAllocator> * jumpOffset, byte * byteBuffer, uint byteCount)
  74. {
  75. jumpOffset->Map([=](int index, JumpInfo& jumpInfo)
  76. {
  77. //
  78. // Read "labelID" stored at the offset within the byte-code.
  79. //
  80. uint jumpByteOffset = jumpInfo.patchOffset;
  81. AssertMsg(jumpByteOffset < byteCount - sizeof(T),
  82. "Must have valid jump site within byte-code to back-patch");
  83. unaligned T * pnBackPatch = reinterpret_cast<unaligned T *>(&byteBuffer[jumpByteOffset]);
  84. ByteCodeLabel labelID = jumpInfo.labelId;
  85. CheckLabel(labelID);
  86. uint offsetToEndOfLayoutByteSize = *pnBackPatch;
  87. Assert(offsetToEndOfLayoutByteSize < 0x20);
  88. //
  89. // Use "labelID" to lookup the destination offset, replacing the temporary data in the
  90. // byte-code.
  91. //
  92. uint labelByteOffset = m_labelOffsets->Item(labelID);
  93. AssertMsg(labelByteOffset != UINT_MAX, "ERROR: Destination labels must be marked before closing");
  94. int relativeJumpOffset = labelByteOffset - jumpByteOffset - offsetToEndOfLayoutByteSize;
  95. #ifdef BYTECODE_BRANCH_ISLAND
  96. Assert(!useBranchIsland || (jumpOffset != m_jumpOffsets || (relativeJumpOffset < GetBranchLimit() && relativeJumpOffset >= -GetBranchLimit())));
  97. #endif
  98. Assert((T)relativeJumpOffset == relativeJumpOffset);
  99. *pnBackPatch = (T)relativeJumpOffset;
  100. });
  101. }
  102. ///----------------------------------------------------------------------------
  103. ///
  104. /// End() completes generating byte-code for the given JavascriptFunction and
  105. /// commits it to the function's body.
  106. ///
  107. ///----------------------------------------------------------------------------
  108. #ifdef LOG_BYTECODE_AST_RATIO
  109. void ByteCodeWriter::End(long currentAstSize, long maxAstSize)
  110. #else
  111. void ByteCodeWriter::End()
  112. #endif
  113. {
  114. Assert(isInUse);
  115. CheckOpen();
  116. Empty(OpCode::EndOfBlock);
  117. ByteBlock* finalByteCodeBlock;
  118. ScriptContext* scriptContext = m_functionWrite->GetScriptContext();
  119. m_byteCodeData.Copy(scriptContext->GetRecycler(), &finalByteCodeBlock);
  120. byte * byteBuffer = finalByteCodeBlock->GetBuffer();
  121. uint byteCount = m_byteCodeData.GetCurrentOffset();
  122. //
  123. // Update all branch targets with their actual label destinations.
  124. //
  125. #ifdef BYTECODE_BRANCH_ISLAND
  126. if (useBranchIsland)
  127. {
  128. PatchJumpOffset<JumpOffset>(m_jumpOffsets, byteBuffer, byteCount);
  129. PatchJumpOffset<LongJumpOffset>(m_longJumpOffsets, byteBuffer, byteCount);
  130. }
  131. else
  132. {
  133. PatchJumpOffset<LongJumpOffset>(m_jumpOffsets, byteBuffer, byteCount);
  134. }
  135. #else
  136. PatchJumpOffset<JumpOffset>(m_jumpOffsets, byteBuffer, byteCount);
  137. #endif
  138. // Patch up the root object load inline cache with the start index
  139. uint rootObjectLoadInlineCacheStart = this->m_functionWrite->GetRootObjectLoadInlineCacheStart();
  140. rootObjectLoadInlineCacheOffsets.Map([=](size_t offset)
  141. {
  142. Assert(offset < byteCount - sizeof(int));
  143. unaligned uint * pnBackPatch = reinterpret_cast<unaligned uint *>(&byteBuffer[offset]);
  144. *pnBackPatch += rootObjectLoadInlineCacheStart;
  145. });
  146. // Patch up the root object load method inline cache with the start index
  147. uint rootObjectLoadMethodInlineCacheStart = this->m_functionWrite->GetRootObjectLoadMethodInlineCacheStart();
  148. rootObjectLoadMethodInlineCacheOffsets.Map([=](size_t offset)
  149. {
  150. Assert(offset < byteCount - sizeof(int));
  151. unaligned uint * pnBackPatch = reinterpret_cast<unaligned uint *>(&byteBuffer[offset]);
  152. *pnBackPatch += rootObjectLoadMethodInlineCacheStart;
  153. });
  154. // Patch up the root object store inline cache with the start index
  155. uint rootObjectStoreInlineCacheStart = this->m_functionWrite->GetRootObjectStoreInlineCacheStart();
  156. rootObjectStoreInlineCacheOffsets.Map([=](size_t offset)
  157. {
  158. Assert(offset < byteCount - sizeof(int));
  159. unaligned uint * pnBackPatch = reinterpret_cast<unaligned uint *>(&byteBuffer[offset]);
  160. *pnBackPatch += rootObjectStoreInlineCacheStart;
  161. });
  162. //
  163. // Store the final trimmed byte-code on the function.
  164. //
  165. ByteBlock* finalAuxiliaryBlock;
  166. ByteBlock* finalAuxiliaryContextBlock;
  167. m_auxiliaryData.Copy(m_functionWrite->GetScriptContext()->GetRecycler(), &finalAuxiliaryBlock);
  168. m_auxContextData.Copy(m_functionWrite->GetScriptContext()->GetRecycler(), &finalAuxiliaryContextBlock);
  169. m_functionWrite->AllocateInlineCache();
  170. m_functionWrite->AllocateObjectLiteralTypeArray();
  171. if (!PHASE_OFF(Js::ScriptFunctionWithInlineCachePhase, m_functionWrite) && !PHASE_OFF(Js::InlineApplyTargetPhase, m_functionWrite))
  172. {
  173. if (m_functionWrite->CanFunctionObjectHaveInlineCaches())
  174. {
  175. m_functionWrite->SetInlineCachesOnFunctionObject(true);
  176. }
  177. }
  178. if (this->DoJitLoopBodies() &&
  179. !this->m_functionWrite->GetFunctionBody()->GetHasFinally() &&
  180. !(this->m_functionWrite->GetFunctionBody()->GetHasTry() && PHASE_OFF(Js::JITLoopBodyInTryCatchPhase, this->m_functionWrite)))
  181. {
  182. AllocateLoopHeaders();
  183. }
  184. m_functionWrite->MarkScript(finalByteCodeBlock, finalAuxiliaryBlock, finalAuxiliaryContextBlock,
  185. m_byteCodeCount, m_byteCodeInLoopCount, m_byteCodeWithoutLDACount);
  186. #if ENABLE_PROFILE_INFO
  187. m_functionWrite->LoadDynamicProfileInfo();
  188. #endif
  189. JS_ETW(EventWriteJSCRIPT_BYTECODEGEN_METHOD(m_functionWrite->GetHostSourceContext(), m_functionWrite->GetScriptContext(), m_functionWrite->GetLocalFunctionId(), m_functionWrite->GetByteCodeCount(), this->GetTotalSize(), m_functionWrite->GetExternalDisplayName()));
  190. #ifdef LOG_BYTECODE_AST_RATIO
  191. // log the bytecode AST ratio
  192. if (currentAstSize == maxAstSize)
  193. {
  194. float astBytecodeRatio = (float)currentAstSize / (float)byteCount;
  195. Output::Print(L"\tAST Bytecode ratio: %f\n", astBytecodeRatio);
  196. }
  197. #endif
  198. // TODO: add validation for source mapping under #dbg
  199. //
  200. // Reset the writer to prepare for the next user.
  201. //
  202. Reset();
  203. }
  204. void ByteCodeWriter::AllocateLoopHeaders()
  205. {
  206. m_functionWrite->AllocateLoopHeaders();
  207. m_loopHeaders->Map([this](int index, ByteCodeWriter::LoopHeaderData& data)
  208. {
  209. LoopHeader *loopHeader = m_functionWrite->GetLoopHeader(index);
  210. loopHeader->startOffset = data.startOffset;
  211. loopHeader->endOffset = data.endOffset;
  212. loopHeader->isNested = data.isNested;
  213. });
  214. }
  215. ///----------------------------------------------------------------------------
  216. ///
  217. /// Reset() discards any current byte-code and resets to a known "empty" state:
  218. /// - This method may be called at any time between Create() and Dispose().
  219. ///
  220. ///----------------------------------------------------------------------------
  221. void ByteCodeWriter::Reset()
  222. {
  223. DebugOnly(isInUse = false);
  224. Assert(isInitialized);
  225. m_byteCodeData.Reset();
  226. m_auxiliaryData.Reset();
  227. m_auxContextData.Reset();
  228. #ifdef BYTECODE_BRANCH_ISLAND
  229. lastOpcode = Js::OpCode::FunctionEntry;
  230. this->UpdateNextBranchIslandOffset(0, 0);
  231. m_longJumpOffsets->Clear();
  232. #endif
  233. m_labelOffsets->Clear();
  234. m_jumpOffsets->Clear();
  235. m_loopHeaders->Clear();
  236. rootObjectLoadInlineCacheOffsets.Clear(m_labelOffsets->GetAllocator());
  237. rootObjectStoreInlineCacheOffsets.Clear(m_labelOffsets->GetAllocator());
  238. rootObjectLoadMethodInlineCacheOffsets.Clear(m_labelOffsets->GetAllocator());
  239. callRegToLdFldCacheIndexMap->ResetNoDelete();
  240. m_pMatchingNode = nullptr;
  241. m_matchingNodeRefCount = 0;
  242. m_functionWrite = nullptr;
  243. m_byteCodeCount = 0;
  244. m_byteCodeWithoutLDACount = 0;
  245. m_byteCodeInLoopCount = 0;
  246. m_loopNest = 0;
  247. m_currentDebuggerScope = nullptr;
  248. }
  249. inline Js::RegSlot ByteCodeWriter::ConsumeReg(Js::RegSlot reg)
  250. {
  251. CheckReg(reg);
  252. Assert(this->m_functionWrite);
  253. return this->m_functionWrite->MapRegSlot(reg);
  254. }
  255. inline void ByteCodeWriter::CheckOpen()
  256. {
  257. AssertMsg(m_functionWrite != nullptr, "Must Begin() a function to write byte-code into");
  258. }
  259. inline void ByteCodeWriter::CheckOp(OpCode op, OpLayoutType layoutType)
  260. {
  261. AssertMsg(OpCodeUtil::IsValidByteCodeOpcode(op), "Ensure valid OpCode");
  262. #if ENABLE_NATIVE_CODEGEN
  263. AssertMsg(!OpCodeAttr::BackEndOnly(op), "Can't write back end only OpCode");
  264. #endif
  265. AssertMsg(OpCodeUtil::GetOpCodeLayout(op) == layoutType, "Ensure correct layout for OpCode");
  266. }
  267. inline void ByteCodeWriter::CheckLabel(ByteCodeLabel labelID)
  268. {
  269. AssertMsg(labelID < m_labelOffsets->Count(),
  270. "Label must be previously defined before being marked in the byte-code");
  271. }
  272. inline void ByteCodeWriter::CheckReg(RegSlot registerID)
  273. {
  274. AssertMsg(registerID != Js::Constants::NoRegister, "bad register");
  275. if (registerID == Js::Constants::NoRegister)
  276. Js::Throw::InternalError();
  277. }
  278. void ByteCodeWriter::Empty(OpCode op)
  279. {
  280. CheckOpen();
  281. CheckOp(op, OpLayoutType::Empty);
  282. m_byteCodeData.Encode(op, this);
  283. }
  284. #define MULTISIZE_LAYOUT_WRITE(layout, ...) \
  285. if (!TryWrite##layout<SmallLayoutSizePolicy>(__VA_ARGS__) && !TryWrite##layout<MediumLayoutSizePolicy>(__VA_ARGS__)) \
  286. { \
  287. bool success = TryWrite##layout<LargeLayoutSizePolicy>(__VA_ARGS__); \
  288. Assert(success); \
  289. }
  290. template <typename SizePolicy>
  291. bool ByteCodeWriter::TryWriteReg1(OpCode op, RegSlot R0)
  292. {
  293. OpLayoutT_Reg1<SizePolicy> layout;
  294. if (SizePolicy::Assign(layout.R0, R0))
  295. {
  296. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  297. return true;
  298. }
  299. return false;
  300. }
  301. void ByteCodeWriter::Reg1(OpCode op, RegSlot R0)
  302. {
  303. CheckOpen();
  304. CheckOp(op, OpLayoutType::Reg1);
  305. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  306. R0 = ConsumeReg(R0);
  307. MULTISIZE_LAYOUT_WRITE(Reg1, op, R0);
  308. }
  309. template <typename SizePolicy>
  310. bool ByteCodeWriter::TryWriteReg2WithICIndex(OpCode op, RegSlot R0, RegSlot R1, uint32 inlineCacheIndex, bool isRootLoad)
  311. {
  312. OpLayoutT_Reg2WithICIndex<SizePolicy> layout;
  313. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.inlineCacheIndex, inlineCacheIndex))
  314. {
  315. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  316. if (isRootLoad)
  317. {
  318. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  319. + offsetof(OpLayoutT_Reg2WithICIndex<SizePolicy>, inlineCacheIndex);
  320. rootObjectLoadMethodInlineCacheOffsets.Prepend(m_labelOffsets->GetAllocator(), inlineCacheOffset);
  321. }
  322. return true;
  323. }
  324. return false;
  325. }
  326. template <typename SizePolicy>
  327. bool ByteCodeWriter::TryWriteReg2(OpCode op, RegSlot R0, RegSlot R1)
  328. {
  329. OpLayoutT_Reg2<SizePolicy> layout;
  330. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1))
  331. {
  332. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  333. return true;
  334. }
  335. return false;
  336. }
  337. void ByteCodeWriter::Reg2(OpCode op, RegSlot R0, RegSlot R1)
  338. {
  339. CheckOpen();
  340. CheckOp(op, OpLayoutType::Reg2);
  341. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  342. if (DoDynamicProfileOpcode(CheckThisPhase) ||
  343. DoDynamicProfileOpcode(TypedArrayTypeSpecPhase) ||
  344. DoDynamicProfileOpcode(ArrayCheckHoistPhase))
  345. {
  346. if (op == OpCode::StrictLdThis)
  347. {
  348. op = OpCode::ProfiledStrictLdThis;
  349. }
  350. }
  351. R0 = ConsumeReg(R0);
  352. R1 = ConsumeReg(R1);
  353. CacheIdUnit unit;
  354. unit.cacheId = Js::Constants::NoInlineCacheIndex;
  355. callRegToLdFldCacheIndexMap->TryGetValueAndRemove(R1, &unit);
  356. bool isProfiled = false;
  357. bool isProfiled2 = false;
  358. bool isReg2WithICIndex = false;
  359. Js::ProfileId profileId = Js::Constants::NoProfileId;
  360. Js::ProfileId profileId2 = Js::Constants::NoProfileId;
  361. if (op == Js::OpCode::BeginSwitch && DoDynamicProfileOpcode(SwitchOptPhase) &&
  362. this->m_functionWrite->AllocProfiledSwitch(&profileId))
  363. {
  364. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  365. isProfiled = true;
  366. }
  367. Assert(DoProfileNewScObjArrayOp(op) == false);
  368. Assert(DoProfileNewScObjectOp(op) == false);
  369. if (op == Js::OpCode::LdLen_A
  370. && (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  371. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  372. DoDynamicProfileOpcode(TypedArrayTypeSpecPhase) ||
  373. DoDynamicProfileOpcode(ArrayCheckHoistPhase))
  374. && this->m_functionWrite->AllocProfiledLdElemId(&profileId))
  375. {
  376. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  377. isProfiled = true;
  378. }
  379. if (isReg2WithICIndex)
  380. {
  381. MULTISIZE_LAYOUT_WRITE(Reg2WithICIndex, op, R0, R1, unit.cacheId, unit.isRootObjectCache);
  382. }
  383. else
  384. {
  385. MULTISIZE_LAYOUT_WRITE(Reg2, op, R0, R1);
  386. }
  387. if (isProfiled)
  388. {
  389. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  390. if (isProfiled2)
  391. {
  392. m_byteCodeData.Encode(&profileId2, sizeof(Js::ProfileId));
  393. }
  394. }
  395. }
  396. template <typename SizePolicy>
  397. bool ByteCodeWriter::TryWriteReg3(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2)
  398. {
  399. OpLayoutT_Reg3<SizePolicy> layout;
  400. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2))
  401. {
  402. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  403. return true;
  404. }
  405. return false;
  406. }
  407. void ByteCodeWriter::Reg3(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2)
  408. {
  409. CheckOpen();
  410. CheckOp(op, OpLayoutType::Reg3);
  411. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  412. R0 = ConsumeReg(R0);
  413. R1 = ConsumeReg(R1);
  414. R2 = ConsumeReg(R2);
  415. ProfileId profileId = 0;
  416. bool isProfiled = false;
  417. if ((DoDynamicProfileOpcode(FloatTypeSpecPhase) && (op == Js::OpCode::Div_A || op == Js::OpCode::Rem_A)) &&
  418. this->m_functionWrite->AllocProfiledDivOrRem(&profileId))
  419. {
  420. isProfiled = true;
  421. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  422. }
  423. MULTISIZE_LAYOUT_WRITE(Reg3, op, R0, R1, R2);
  424. if (isProfiled)
  425. {
  426. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  427. }
  428. }
  429. template <typename SizePolicy>
  430. bool ByteCodeWriter::TryWriteReg3C(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, CacheId cacheId)
  431. {
  432. OpLayoutT_Reg3C<SizePolicy> layout;
  433. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2)
  434. && SizePolicy::Assign(layout.inlineCacheIndex, cacheId))
  435. {
  436. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  437. return true;
  438. }
  439. return false;
  440. }
  441. void ByteCodeWriter::Reg3C(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, uint cacheId)
  442. {
  443. CheckOpen();
  444. CheckOp(op, OpLayoutType::Reg3C);
  445. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  446. R0 = ConsumeReg(R0);
  447. R1 = ConsumeReg(R1);
  448. R2 = ConsumeReg(R2);
  449. MULTISIZE_LAYOUT_WRITE(Reg3C, op, R0, R1, R2, cacheId);
  450. }
  451. template <typename SizePolicy>
  452. bool ByteCodeWriter::TryWriteReg4(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, RegSlot R3)
  453. {
  454. OpLayoutT_Reg4<SizePolicy> layout;
  455. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2)
  456. && SizePolicy::Assign(layout.R3, R3))
  457. {
  458. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  459. return true;
  460. }
  461. return false;
  462. }
  463. void ByteCodeWriter::Reg4(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, RegSlot R3)
  464. {
  465. CheckOpen();
  466. CheckOp(op, OpLayoutType::Reg4);
  467. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  468. R0 = ConsumeReg(R0);
  469. R1 = ConsumeReg(R1);
  470. R2 = ConsumeReg(R2);
  471. R3 = ConsumeReg(R3);
  472. MULTISIZE_LAYOUT_WRITE(Reg4, op, R0, R1, R2, R3);
  473. }
  474. template <typename SizePolicy>
  475. bool ByteCodeWriter::TryWriteReg2B1(OpCode op, RegSlot R0, RegSlot R1, uint8 B2)
  476. {
  477. OpLayoutT_Reg2B1<SizePolicy> layout;
  478. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.B2, B2))
  479. {
  480. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  481. return true;
  482. }
  483. return false;
  484. }
  485. void ByteCodeWriter::Reg2B1(OpCode op, RegSlot R0, RegSlot R1, uint8 B2)
  486. {
  487. CheckOpen();
  488. CheckOp(op, OpLayoutType::Reg2B1);
  489. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  490. R0 = ConsumeReg(R0);
  491. R1 = ConsumeReg(R1);
  492. MULTISIZE_LAYOUT_WRITE(Reg2B1, op, R0, R1, B2);
  493. }
  494. template <typename SizePolicy>
  495. bool ByteCodeWriter::TryWriteReg3B1(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, uint8 B3)
  496. {
  497. OpLayoutT_Reg3B1<SizePolicy> layout;
  498. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2)
  499. && SizePolicy::Assign(layout.B3, B3))
  500. {
  501. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  502. return true;
  503. }
  504. return false;
  505. }
  506. void ByteCodeWriter::Reg3B1(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, uint8 B3)
  507. {
  508. CheckOpen();
  509. CheckOp(op, OpLayoutType::Reg3B1);
  510. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  511. R0 = ConsumeReg(R0);
  512. R1 = ConsumeReg(R1);
  513. R2 = ConsumeReg(R2);
  514. MULTISIZE_LAYOUT_WRITE(Reg3B1, op, R0, R1, R2, B3);
  515. }
  516. template <typename SizePolicy>
  517. bool ByteCodeWriter::TryWriteReg5(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, RegSlot R3, RegSlot R4)
  518. {
  519. OpLayoutT_Reg5<SizePolicy> layout;
  520. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2)
  521. && SizePolicy::Assign(layout.R3, R3) && SizePolicy::Assign(layout.R4, R4))
  522. {
  523. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  524. return true;
  525. }
  526. return false;
  527. }
  528. void ByteCodeWriter::Reg5(OpCode op, RegSlot R0, RegSlot R1, RegSlot R2, RegSlot R3, RegSlot R4)
  529. {
  530. CheckOpen();
  531. CheckOp(op, OpLayoutType::Reg5);
  532. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  533. R0 = ConsumeReg(R0);
  534. R1 = ConsumeReg(R1);
  535. R2 = ConsumeReg(R2);
  536. R3 = ConsumeReg(R3);
  537. R4 = ConsumeReg(R4);
  538. MULTISIZE_LAYOUT_WRITE(Reg5, op, R0, R1, R2, R3, R4);
  539. }
  540. template <typename SizePolicy>
  541. bool ByteCodeWriter::TryWriteUnsigned1(OpCode op, uint C1)
  542. {
  543. OpLayoutT_Unsigned1<SizePolicy> layout;
  544. if (SizePolicy::Assign(layout.C1, C1))
  545. {
  546. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  547. return true;
  548. }
  549. return false;
  550. }
  551. void ByteCodeWriter::Unsigned1(OpCode op, uint C1)
  552. {
  553. CheckOpen();
  554. CheckOp(op, OpLayoutType::Unsigned1);
  555. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  556. MULTISIZE_LAYOUT_WRITE(Unsigned1, op, C1);
  557. }
  558. void ByteCodeWriter::ArgIn0(RegSlot reg)
  559. {
  560. AssertMsg(0 < m_functionWrite->GetInParamsCount(),
  561. "Ensure source arg was declared in prologue");
  562. Reg1(OpCode::ArgIn0, reg);
  563. }
  564. template void ByteCodeWriter::ArgOut<true>(ArgSlot arg, RegSlot reg, ProfileId callSiteId);
  565. template void ByteCodeWriter::ArgOut<false>(ArgSlot arg, RegSlot reg, ProfileId callSiteId);
  566. template <typename SizePolicy>
  567. bool ByteCodeWriter::TryWriteArg(OpCode op, ArgSlot arg, RegSlot reg)
  568. {
  569. OpLayoutT_Arg<SizePolicy> layout;
  570. if (SizePolicy::Assign(layout.Arg, arg) && SizePolicy::Assign(layout.Reg, reg))
  571. {
  572. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  573. return true;
  574. }
  575. return false;
  576. }
  577. template <bool isVar>
  578. void ByteCodeWriter::ArgOut(ArgSlot arg, RegSlot reg, ProfileId callSiteId)
  579. {
  580. CheckOpen();
  581. Assert(OpCodeAttr::HasMultiSizeLayout(OpCode::ArgOut_A) && OpCodeAttr::HasMultiSizeLayout(OpCode::ArgOut_ANonVar));
  582. // Note: don't "consume" the arg slot, as the passed-in value is the final one.
  583. reg = ConsumeReg(reg);
  584. OpCode op;
  585. if (isVar)
  586. {
  587. op = OpCode::ArgOut_A;
  588. }
  589. else
  590. {
  591. op = OpCode::ArgOut_ANonVar;
  592. MULTISIZE_LAYOUT_WRITE(Arg, op, arg, reg);
  593. return;
  594. }
  595. if (DoDynamicProfileOpcode(InlinePhase)
  596. && arg > 0 && arg < Js::Constants::MaximumArgumentCountForConstantArgumentInlining
  597. && (reg > FunctionBody::FirstRegSlot && reg < m_functionWrite->GetConstantCount())
  598. && callSiteId != Js::Constants::NoProfileId
  599. && !m_isInDebugMode // We don't inline in debug mode, so no need to emit ProfiledArgOut_A
  600. )
  601. {
  602. Assert((reg > FunctionBody::FirstRegSlot && reg < m_functionWrite->GetConstantCount()));
  603. MULTISIZE_LAYOUT_WRITE(Arg, Js::OpCode::ProfiledArgOut_A, arg, reg);
  604. m_byteCodeData.Encode(&callSiteId, sizeof(Js::ProfileId));
  605. }
  606. else
  607. {
  608. MULTISIZE_LAYOUT_WRITE(Arg, op, arg, reg);
  609. return;
  610. }
  611. }
  612. template <typename SizePolicy>
  613. bool ByteCodeWriter::TryWriteArgNoSrc(OpCode op, ArgSlot arg)
  614. {
  615. OpLayoutT_ArgNoSrc<SizePolicy> layout;
  616. if (SizePolicy::Assign(layout.Arg, arg))
  617. {
  618. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  619. return true;
  620. }
  621. return false;
  622. }
  623. void ByteCodeWriter::ArgOutEnv(ArgSlot arg)
  624. {
  625. CheckOpen();
  626. Assert(OpCodeAttr::HasMultiSizeLayout(OpCode::ArgOut_Env));
  627. MULTISIZE_LAYOUT_WRITE(ArgNoSrc, OpCode::ArgOut_Env, arg);
  628. }
  629. void ByteCodeWriter::Br(ByteCodeLabel labelID)
  630. {
  631. Br(OpCode::Br, labelID);
  632. }
  633. // For switch case - default branching
  634. void ByteCodeWriter::Br(OpCode op, ByteCodeLabel labelID)
  635. {
  636. CheckOpen();
  637. CheckOp(op, OpLayoutType::Br);
  638. CheckLabel(labelID);
  639. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  640. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBr) - offsetof(OpLayoutBr, RelativeJumpOffset);
  641. OpLayoutBr data;
  642. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  643. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  644. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  645. }
  646. void ByteCodeWriter::BrS(OpCode op, ByteCodeLabel labelID, byte val)
  647. {
  648. CheckOpen();
  649. CheckOp(op, OpLayoutType::BrS);
  650. CheckLabel(labelID);
  651. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  652. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBrS) - offsetof(OpLayoutBrS, RelativeJumpOffset);
  653. OpLayoutBrS data;
  654. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  655. data.val = val;
  656. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  657. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  658. }
  659. template <typename SizePolicy>
  660. bool ByteCodeWriter::TryWriteBrReg1(OpCode op, ByteCodeLabel labelID, RegSlot R1)
  661. {
  662. OpLayoutT_BrReg1<SizePolicy> layout;
  663. if (SizePolicy::Assign(layout.R1, R1))
  664. {
  665. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutT_BrReg1<SizePolicy>) - offsetof(OpLayoutT_BrReg1<SizePolicy>, RelativeJumpOffset);
  666. layout.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  667. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  668. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  669. return true;
  670. }
  671. return false;
  672. }
  673. void ByteCodeWriter::BrReg1(OpCode op, ByteCodeLabel labelID, RegSlot R1)
  674. {
  675. CheckOpen();
  676. CheckOp(op, OpLayoutType::BrReg1);
  677. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  678. CheckLabel(labelID);
  679. R1 = ConsumeReg(R1);
  680. MULTISIZE_LAYOUT_WRITE(BrReg1, op, labelID, R1);
  681. }
  682. template <typename SizePolicy>
  683. bool ByteCodeWriter::TryWriteBrReg2(OpCode op, ByteCodeLabel labelID, RegSlot R1, RegSlot R2)
  684. {
  685. OpLayoutT_BrReg2<SizePolicy> layout;
  686. if (SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.R2, R2))
  687. {
  688. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutT_BrReg2<SizePolicy>) - offsetof(OpLayoutT_BrReg2<SizePolicy>, RelativeJumpOffset);
  689. layout.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  690. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  691. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  692. return true;
  693. }
  694. return false;
  695. }
  696. void ByteCodeWriter::BrReg2(OpCode op, ByteCodeLabel labelID, RegSlot R1, RegSlot R2)
  697. {
  698. CheckOpen();
  699. CheckOp(op, OpLayoutType::BrReg2);
  700. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  701. CheckLabel(labelID);
  702. R1 = ConsumeReg(R1);
  703. R2 = ConsumeReg(R2);
  704. MULTISIZE_LAYOUT_WRITE(BrReg2, op, labelID, R1, R2);
  705. }
  706. void ByteCodeWriter::BrProperty(OpCode op, ByteCodeLabel labelID, RegSlot instance, PropertyIdIndexType index)
  707. {
  708. CheckOpen();
  709. CheckOp(op, OpLayoutType::BrProperty);
  710. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  711. CheckLabel(labelID);
  712. instance = ConsumeReg(instance);
  713. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBrProperty) - offsetof(OpLayoutBrProperty, RelativeJumpOffset);
  714. OpLayoutBrProperty data;
  715. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  716. data.Instance = instance;
  717. data.PropertyIdIndex = index;
  718. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  719. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  720. }
  721. void ByteCodeWriter::BrLocalProperty(OpCode op, ByteCodeLabel labelID, PropertyIdIndexType index)
  722. {
  723. CheckOpen();
  724. CheckOp(op, OpLayoutType::BrLocalProperty);
  725. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  726. CheckLabel(labelID);
  727. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBrLocalProperty) - offsetof(OpLayoutBrLocalProperty, RelativeJumpOffset);
  728. OpLayoutBrLocalProperty data;
  729. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  730. data.PropertyIdIndex = index;
  731. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  732. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  733. }
  734. void ByteCodeWriter::BrEnvProperty(OpCode op, ByteCodeLabel labelID, PropertyIdIndexType index, int32 slotIndex)
  735. {
  736. CheckOpen();
  737. CheckOp(op, OpLayoutType::BrEnvProperty);
  738. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  739. CheckLabel(labelID);
  740. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBrEnvProperty) - offsetof(OpLayoutBrEnvProperty, RelativeJumpOffset);
  741. OpLayoutBrEnvProperty data;
  742. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  743. data.SlotIndex = slotIndex;
  744. data.PropertyIdIndex = index;
  745. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  746. AddJumpOffset(op, labelID, offsetOfRelativeJumpOffsetFromEnd);
  747. }
  748. bool ByteCodeWriter::DoDynamicProfileOpcode(Phase tag, bool noHeuristics) const
  749. {
  750. #if ENABLE_PROFILE_INFO
  751. if (!DynamicProfileInfo::IsEnabled(tag, this->m_functionWrite))
  752. {
  753. return false;
  754. }
  755. // Other heuristics
  756. switch (tag)
  757. {
  758. case Phase::InlinePhase:
  759. // Do profile opcode everywhere if we are an inline candidate
  760. // Otherwise, only in loops if the function has loop
  761. #pragma prefast(suppress:6236, "DevDiv bug 830883. False positive when PHASE_OFF is #defined as '(false)'.")
  762. return PHASE_FORCE(Phase::InlinePhase, this->m_functionWrite) ||
  763. (!this->m_functionWrite->GetDontInline() &&
  764. (noHeuristics || !this->m_hasLoop || (this->m_loopNest != 0) ||
  765. !(PHASE_OFF(InlineOutsideLoopsPhase, this->m_functionWrite))));
  766. default:
  767. return true;
  768. }
  769. #else
  770. return false;
  771. #endif
  772. }
  773. bool ByteCodeWriter::ShouldIncrementCallSiteId(OpCode op)
  774. {
  775. if ((DoProfileCallOp(op) && DoDynamicProfileOpcode(InlinePhase)) ||
  776. (DoProfileNewScObjArrayOp(op) && (DoDynamicProfileOpcode(NativeArrayPhase, true) || DoDynamicProfileOpcode(InlinePhase, true))) ||
  777. (DoProfileNewScObjectOp(op) && (DoDynamicProfileOpcode(InlinePhase, true) || DoDynamicProfileOpcode(FixedNewObjPhase, true))))
  778. {
  779. return true;
  780. }
  781. return false;
  782. }
  783. void ByteCodeWriter::StartCall(OpCode op, ArgSlot ArgCount)
  784. {
  785. CheckOpen();
  786. CheckOp(op, OpLayoutType::StartCall);
  787. OpLayoutStartCall data;
  788. data.ArgCount = ArgCount;
  789. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  790. }
  791. template <typename SizePolicy>
  792. bool ByteCodeWriter::TryWriteCallIExtended(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, CallIExtendedOptions options, uint32 spreadArgsOffset)
  793. {
  794. OpLayoutT_CallIExtended<SizePolicy> layout;
  795. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  796. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.Options, options)
  797. && SizePolicy::Assign(layout.SpreadAuxOffset, spreadArgsOffset))
  798. {
  799. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  800. return true;
  801. }
  802. return false;
  803. }
  804. template <typename SizePolicy>
  805. bool ByteCodeWriter::TryWriteCallIExtendedWithICIndex(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, InlineCacheIndex inlineCacheIndex, bool isRootLoad, CallIExtendedOptions options, uint32 spreadArgsOffset)
  806. {
  807. OpLayoutT_CallIExtendedWithICIndex<SizePolicy> layout;
  808. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  809. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.inlineCacheIndex, inlineCacheIndex)
  810. && SizePolicy::Assign(layout.Options, options) && SizePolicy::Assign(layout.SpreadAuxOffset, spreadArgsOffset))
  811. {
  812. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  813. if (isRootLoad)
  814. {
  815. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  816. + offsetof(OpLayoutT_CallIExtendedWithICIndex<SizePolicy>, inlineCacheIndex);
  817. rootObjectLoadMethodInlineCacheOffsets.Prepend(m_labelOffsets->GetAllocator(), inlineCacheOffset);
  818. }
  819. return true;
  820. }
  821. return false;
  822. }
  823. template <typename SizePolicy>
  824. bool ByteCodeWriter::TryWriteCallIExtendedFlags(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, CallIExtendedOptions options, uint32 spreadArgsOffset, CallFlags callFlags)
  825. {
  826. OpLayoutT_CallIExtendedFlags<SizePolicy> layout;
  827. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  828. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.Options, options)
  829. && SizePolicy::Assign(layout.SpreadAuxOffset, spreadArgsOffset) && SizePolicy::Assign(layout.callFlags, callFlags))
  830. {
  831. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  832. return true;
  833. }
  834. return false;
  835. }
  836. template <typename SizePolicy>
  837. bool ByteCodeWriter::TryWriteCallIExtendedFlagsWithICIndex(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, InlineCacheIndex inlineCacheIndex, bool isRootLoad, CallIExtendedOptions options, uint32 spreadArgsOffset, CallFlags callFlags)
  838. {
  839. OpLayoutT_CallIExtendedFlagsWithICIndex<SizePolicy> layout;
  840. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  841. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.inlineCacheIndex, inlineCacheIndex)
  842. && SizePolicy::Assign(layout.Options, options) && SizePolicy::Assign(layout.SpreadAuxOffset, spreadArgsOffset)
  843. && SizePolicy::Assign(layout.callFlags, callFlags))
  844. {
  845. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  846. if (isRootLoad)
  847. {
  848. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  849. + offsetof(OpLayoutT_CallIExtendedFlagsWithICIndex<SizePolicy>, inlineCacheIndex);
  850. rootObjectLoadMethodInlineCacheOffsets.Prepend(m_labelOffsets->GetAllocator(), inlineCacheOffset);
  851. }
  852. return true;
  853. }
  854. return false;
  855. }
  856. void ByteCodeWriter::CallIExtended(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, CallIExtendedOptions options, const void *buffer, uint byteCount, ProfileId callSiteId, CallFlags callFlags)
  857. {
  858. CheckOpen();
  859. bool hasCallFlags = !(callFlags == CallFlags_None);
  860. if (hasCallFlags)
  861. {
  862. CheckOp(op, OpLayoutType::CallIExtendedFlags);
  863. }
  864. else
  865. {
  866. CheckOp(op, OpLayoutType::CallIExtended);
  867. }
  868. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  869. // givenArgCount could be <, ==, or > than Function's "InParams" count
  870. if (returnValueRegister != Js::Constants::NoRegister)
  871. {
  872. returnValueRegister = ConsumeReg(returnValueRegister);
  873. }
  874. functionRegister = ConsumeReg(functionRegister);
  875. // CallISpread is not going to use the ldFld cache index, but still remove it from the map as we expect
  876. // the entry for a cache index to be removed once we have seen the corresponding call.
  877. CacheIdUnit unit;
  878. unit.cacheId = Js::Constants::NoInlineCacheIndex;
  879. callRegToLdFldCacheIndexMap->TryGetValueAndRemove(functionRegister, &unit);
  880. bool isProfiled = false, isProfiled2 = false;
  881. ProfileId profileId = callSiteId, profileId2 = Constants::NoProfileId;
  882. bool isCallWithICIndex = false;
  883. if (DoProfileCallOp(op))
  884. {
  885. if (DoDynamicProfileOpcode(InlinePhase) &&
  886. callSiteId != Js::Constants::NoProfileId)
  887. {
  888. op = Js::OpCodeUtil::ConvertCallOpToProfiled(op);
  889. isProfiled = true;
  890. }
  891. else if ((DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) || DoDynamicProfileOpcode(FloatTypeSpecPhase)) &&
  892. this->m_functionWrite->AllocProfiledReturnTypeId(&profileId))
  893. {
  894. op = Js::OpCodeUtil::ConvertCallOpToProfiledReturnType(op);
  895. isProfiled = true;
  896. }
  897. }
  898. else if (DoProfileNewScObjArrayOp(op) &&
  899. (DoDynamicProfileOpcode(NativeArrayPhase, true) || DoDynamicProfileOpcode(InlinePhase, true)) &&
  900. callSiteId != Js::Constants::NoProfileId &&
  901. this->m_functionWrite->AllocProfiledArrayCallSiteId(&profileId2))
  902. {
  903. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  904. isProfiled = true;
  905. isProfiled2 = true;
  906. }
  907. else if (DoProfileNewScObjectOp(op) && (DoDynamicProfileOpcode(InlinePhase, true) || DoDynamicProfileOpcode(FixedNewObjPhase, true)) &&
  908. callSiteId != Js::Constants::NoProfileId)
  909. {
  910. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  911. isProfiled = true;
  912. }
  913. uint spreadArgsOffset = 0;
  914. if (options & CallIExtended_SpreadArgs)
  915. {
  916. Assert(buffer != nullptr && byteCount > 0);
  917. spreadArgsOffset = InsertAuxiliaryData(buffer, byteCount);
  918. }
  919. if (isCallWithICIndex)
  920. {
  921. if (hasCallFlags == true)
  922. {
  923. MULTISIZE_LAYOUT_WRITE(CallIExtendedFlagsWithICIndex, op, returnValueRegister, functionRegister, givenArgCount, unit.cacheId, unit.isRootObjectCache, options, spreadArgsOffset, callFlags);
  924. }
  925. else
  926. {
  927. MULTISIZE_LAYOUT_WRITE(CallIExtendedWithICIndex, op, returnValueRegister, functionRegister, givenArgCount, unit.cacheId, unit.isRootObjectCache, options, spreadArgsOffset);
  928. }
  929. }
  930. else
  931. {
  932. if (hasCallFlags == true)
  933. {
  934. MULTISIZE_LAYOUT_WRITE(CallIExtendedFlags, op, returnValueRegister, functionRegister, givenArgCount, options, spreadArgsOffset, callFlags);
  935. }
  936. else
  937. {
  938. MULTISIZE_LAYOUT_WRITE(CallIExtended, op, returnValueRegister, functionRegister, givenArgCount, options, spreadArgsOffset);
  939. }
  940. }
  941. if (isProfiled)
  942. {
  943. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  944. if (isProfiled2)
  945. {
  946. m_byteCodeData.Encode(&profileId2, sizeof(Js::ProfileId));
  947. }
  948. }
  949. }
  950. template <typename SizePolicy>
  951. bool ByteCodeWriter::TryWriteCallIWithICIndex(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, InlineCacheIndex inlineCacheIndex, bool isRootLoad)
  952. {
  953. OpLayoutT_CallIWithICIndex<SizePolicy> layout;
  954. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  955. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.inlineCacheIndex, inlineCacheIndex))
  956. {
  957. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  958. if (isRootLoad)
  959. {
  960. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  961. + offsetof(OpLayoutT_CallIWithICIndex<SizePolicy>, inlineCacheIndex);
  962. rootObjectLoadMethodInlineCacheOffsets.Prepend(m_labelOffsets->GetAllocator(), inlineCacheOffset);
  963. }
  964. return true;
  965. }
  966. return false;
  967. }
  968. template <typename SizePolicy>
  969. bool ByteCodeWriter::TryWriteCallIFlagsWithICIndex(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, InlineCacheIndex inlineCacheIndex, bool isRootLoad, CallFlags callFlags)
  970. {
  971. OpLayoutT_CallIFlagsWithICIndex<SizePolicy> layout;
  972. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  973. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.inlineCacheIndex, inlineCacheIndex)
  974. && SizePolicy::Assign(layout.callFlags, callFlags))
  975. {
  976. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  977. if (isRootLoad)
  978. {
  979. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  980. + offsetof(OpLayoutT_CallIFlagsWithICIndex<SizePolicy>, inlineCacheIndex);
  981. rootObjectLoadMethodInlineCacheOffsets.Prepend(m_labelOffsets->GetAllocator(), inlineCacheOffset);
  982. }
  983. return true;
  984. }
  985. return false;
  986. }
  987. template <typename SizePolicy>
  988. bool ByteCodeWriter::TryWriteCallI(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount)
  989. {
  990. OpLayoutT_CallI<SizePolicy> layout;
  991. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  992. && SizePolicy::Assign(layout.ArgCount, givenArgCount))
  993. {
  994. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  995. return true;
  996. }
  997. return false;
  998. }
  999. template <typename SizePolicy>
  1000. bool ByteCodeWriter::TryWriteCallIFlags(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, CallFlags callFlags)
  1001. {
  1002. OpLayoutT_CallIFlags<SizePolicy> layout;
  1003. if (SizePolicy::Assign(layout.Return, returnValueRegister) && SizePolicy::Assign(layout.Function, functionRegister)
  1004. && SizePolicy::Assign(layout.ArgCount, givenArgCount) && SizePolicy::Assign(layout.callFlags, callFlags))
  1005. {
  1006. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1007. return true;
  1008. }
  1009. return false;
  1010. }
  1011. void ByteCodeWriter::RemoveEntryForRegSlotFromCacheIdMap(RegSlot regSlot)
  1012. {
  1013. regSlot = ConsumeReg(regSlot);
  1014. CacheIdUnit unit;
  1015. unit.cacheId = Js::Constants::NoInlineCacheIndex;
  1016. callRegToLdFldCacheIndexMap->TryGetValueAndRemove(regSlot, &unit);
  1017. }
  1018. void ByteCodeWriter::CallI(OpCode op, RegSlot returnValueRegister, RegSlot functionRegister, ArgSlot givenArgCount, ProfileId callSiteId, CallFlags callFlags)
  1019. {
  1020. CheckOpen();
  1021. bool hasCallFlags = !(callFlags == CallFlags_None);
  1022. if (hasCallFlags == true)
  1023. {
  1024. CheckOp(op, OpLayoutType::CallIFlags);
  1025. }
  1026. else
  1027. {
  1028. CheckOp(op, OpLayoutType::CallI);
  1029. }
  1030. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1031. // givenArgCount could be <, ==, or > than Function's "InParams" count
  1032. if (returnValueRegister != Js::Constants::NoRegister)
  1033. {
  1034. returnValueRegister = ConsumeReg(returnValueRegister);
  1035. }
  1036. functionRegister = ConsumeReg(functionRegister);
  1037. bool isProfiled = false;
  1038. bool isProfiled2 = false;
  1039. bool isCallWithICIndex = false;
  1040. ProfileId profileId = callSiteId;
  1041. ProfileId profileId2 = Constants::NoProfileId;
  1042. CacheIdUnit unit;
  1043. unit.cacheId = Js::Constants::NoInlineCacheIndex;
  1044. callRegToLdFldCacheIndexMap->TryGetValueAndRemove(functionRegister, &unit);
  1045. if (DoProfileCallOp(op))
  1046. {
  1047. if (DoDynamicProfileOpcode(InlinePhase) &&
  1048. callSiteId != Js::Constants::NoProfileId)
  1049. {
  1050. if (unit.cacheId == Js::Constants::NoInlineCacheIndex)
  1051. {
  1052. op = Js::OpCodeUtil::ConvertCallOpToProfiled(op);
  1053. isProfiled = true;
  1054. }
  1055. else
  1056. {
  1057. isCallWithICIndex = true;
  1058. op = Js::OpCodeUtil::ConvertCallOpToProfiled(op, true);
  1059. isProfiled = true;
  1060. }
  1061. }
  1062. else if ((DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) || DoDynamicProfileOpcode(FloatTypeSpecPhase)) &&
  1063. this->m_functionWrite->AllocProfiledReturnTypeId(&profileId))
  1064. {
  1065. op = Js::OpCodeUtil::ConvertCallOpToProfiledReturnType(op);
  1066. isProfiled = true;
  1067. }
  1068. }
  1069. else if (DoProfileNewScObjArrayOp(op) &&
  1070. (DoDynamicProfileOpcode(NativeArrayPhase, true) || DoDynamicProfileOpcode(InlinePhase, true)) &&
  1071. callSiteId != Js::Constants::NoProfileId &&
  1072. this->m_functionWrite->AllocProfiledArrayCallSiteId(&profileId2))
  1073. {
  1074. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1075. isProfiled = true;
  1076. isProfiled2 = true;
  1077. }
  1078. else if (DoProfileNewScObjectOp(op) &&
  1079. (DoDynamicProfileOpcode(InlinePhase, true) || DoDynamicProfileOpcode(FixedNewObjPhase, true)) &&
  1080. callSiteId != Js::Constants::NoProfileId)
  1081. {
  1082. if (unit.cacheId == Js::Constants::NoInlineCacheIndex)
  1083. {
  1084. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1085. isProfiled = true;
  1086. }
  1087. else
  1088. {
  1089. isCallWithICIndex = true;
  1090. OpCodeUtil::ConvertNonCallOpToProfiledWithICIndex(op);
  1091. isProfiled = true;
  1092. }
  1093. }
  1094. if (isCallWithICIndex)
  1095. {
  1096. if (hasCallFlags == true)
  1097. {
  1098. MULTISIZE_LAYOUT_WRITE(CallIFlagsWithICIndex, op, returnValueRegister, functionRegister, givenArgCount, unit.cacheId, unit.isRootObjectCache, callFlags);
  1099. }
  1100. else
  1101. {
  1102. MULTISIZE_LAYOUT_WRITE(CallIWithICIndex, op, returnValueRegister, functionRegister, givenArgCount, unit.cacheId, unit.isRootObjectCache);
  1103. }
  1104. }
  1105. else
  1106. {
  1107. if (hasCallFlags == true)
  1108. {
  1109. MULTISIZE_LAYOUT_WRITE(CallIFlags, op, returnValueRegister, functionRegister, givenArgCount, callFlags);
  1110. }
  1111. else
  1112. {
  1113. MULTISIZE_LAYOUT_WRITE(CallI, op, returnValueRegister, functionRegister, givenArgCount);
  1114. }
  1115. }
  1116. if (isProfiled)
  1117. {
  1118. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1119. if (isProfiled2)
  1120. {
  1121. m_byteCodeData.Encode(&profileId2, sizeof(Js::ProfileId));
  1122. }
  1123. }
  1124. }
  1125. template <typename SizePolicy>
  1126. bool ByteCodeWriter::TryWriteElementI(OpCode op, RegSlot Value, RegSlot Instance, RegSlot Element)
  1127. {
  1128. OpLayoutT_ElementI<SizePolicy> layout;
  1129. if (SizePolicy::Assign(layout.Value, Value) && SizePolicy::Assign(layout.Instance, Instance)
  1130. && SizePolicy::Assign(layout.Element, Element))
  1131. {
  1132. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1133. return true;
  1134. }
  1135. return false;
  1136. }
  1137. void ByteCodeWriter::Element(OpCode op, RegSlot Value, RegSlot Instance, RegSlot Element, bool instanceAtReturnRegOK)
  1138. {
  1139. CheckOpen();
  1140. CheckOp(op, OpLayoutType::ElementI);
  1141. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1142. Value = ConsumeReg(Value);
  1143. Instance = ConsumeReg(Instance);
  1144. Element = ConsumeReg(Element);
  1145. if (this->m_functionWrite->GetIsStrictMode())
  1146. {
  1147. if (op == OpCode::DeleteElemI_A)
  1148. {
  1149. op = OpCode::DeleteElemIStrict_A;
  1150. }
  1151. }
  1152. bool isProfiledLayout = false;
  1153. Js::ProfileId profileId = Js::Constants::NoProfileId;
  1154. Assert(instanceAtReturnRegOK || Instance != 0);
  1155. if (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  1156. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  1157. DoDynamicProfileOpcode(TypedArrayTypeSpecPhase) ||
  1158. DoDynamicProfileOpcode(ArrayCheckHoistPhase))
  1159. {
  1160. OpCode newop;
  1161. switch (op)
  1162. {
  1163. case OpCode::LdElemI_A:
  1164. newop = OpCode::ProfiledLdElemI_A;
  1165. if (this->m_functionWrite->AllocProfiledLdElemId(&profileId))
  1166. {
  1167. isProfiledLayout = true;
  1168. op = newop;
  1169. }
  1170. break;
  1171. case Js::OpCode::StElemI_A:
  1172. newop = OpCode::ProfiledStElemI_A;
  1173. goto StoreCommon;
  1174. case Js::OpCode::StElemI_A_Strict:
  1175. newop = OpCode::ProfiledStElemI_A_Strict;
  1176. StoreCommon:
  1177. if (this->m_functionWrite->AllocProfiledStElemId(&profileId))
  1178. {
  1179. isProfiledLayout = true;
  1180. op = newop;
  1181. }
  1182. break;
  1183. }
  1184. }
  1185. MULTISIZE_LAYOUT_WRITE(ElementI, op, Value, Instance, Element);
  1186. if (isProfiledLayout)
  1187. {
  1188. Assert(profileId != Js::Constants::NoProfileId);
  1189. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1190. }
  1191. }
  1192. template <typename SizePolicy>
  1193. bool ByteCodeWriter::TryWriteElementUnsigned1(OpCode op, RegSlot Value, RegSlot Instance, uint32 Element)
  1194. {
  1195. OpLayoutT_ElementUnsigned1<SizePolicy> layout;
  1196. if (SizePolicy::Assign(layout.Value, Value) && SizePolicy::Assign(layout.Instance, Instance)
  1197. && SizePolicy::Assign(layout.Element, Element))
  1198. {
  1199. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1200. return true;
  1201. }
  1202. return false;
  1203. }
  1204. void ByteCodeWriter::ElementUnsigned1(OpCode op, RegSlot Value, RegSlot Instance, uint32 Element)
  1205. {
  1206. CheckOpen();
  1207. CheckOp(op, OpLayoutType::ElementUnsigned1);
  1208. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1209. Value = ConsumeReg(Value);
  1210. Instance = ConsumeReg(Instance);
  1211. MULTISIZE_LAYOUT_WRITE(ElementUnsigned1, op, Value, Instance, Element);
  1212. }
  1213. template <typename SizePolicy>
  1214. bool ByteCodeWriter::TryWriteElementScopedC(OpCode op, RegSlot value, PropertyIdIndexType propertyIdIndex)
  1215. {
  1216. OpLayoutT_ElementScopedC<SizePolicy> layout;
  1217. if (SizePolicy::Assign(layout.Value, value)
  1218. && SizePolicy::Assign(layout.PropertyIdIndex, propertyIdIndex))
  1219. {
  1220. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1221. return true;
  1222. }
  1223. return false;
  1224. }
  1225. void ByteCodeWriter::ScopedProperty(OpCode op, RegSlot value, PropertyIdIndexType propertyIdIndex)
  1226. {
  1227. CheckOpen();
  1228. CheckOp(op, OpLayoutType::ElementScopedC);
  1229. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1230. value = ConsumeReg(value);
  1231. #if DBG
  1232. switch (op)
  1233. {
  1234. case OpCode::ScopedDeleteFld:
  1235. case OpCode::ScopedEnsureNoRedeclFld:
  1236. case OpCode::ScopedInitFunc:
  1237. break;
  1238. default:
  1239. AssertMsg(false, "The specified OpCode is not intended for scoped field-access");
  1240. break;
  1241. }
  1242. #endif
  1243. if (this->m_functionWrite->GetIsStrictMode())
  1244. {
  1245. if (op == OpCode::ScopedDeleteFld)
  1246. {
  1247. op = OpCode::ScopedDeleteFldStrict;
  1248. }
  1249. }
  1250. MULTISIZE_LAYOUT_WRITE(ElementScopedC, op, value, propertyIdIndex);
  1251. }
  1252. template <typename SizePolicy>
  1253. bool ByteCodeWriter::TryWriteElementC(OpCode op, RegSlot value, RegSlot instance, PropertyIdIndexType propertyIdIndex)
  1254. {
  1255. OpLayoutT_ElementC<SizePolicy> layout;
  1256. if (SizePolicy::Assign(layout.Value, value) && SizePolicy::Assign(layout.Instance, instance)
  1257. && SizePolicy::Assign(layout.PropertyIdIndex, propertyIdIndex))
  1258. {
  1259. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1260. return true;
  1261. }
  1262. return false;
  1263. }
  1264. void ByteCodeWriter::Property(OpCode op, RegSlot value, RegSlot instance, PropertyIdIndexType propertyIdIndex)
  1265. {
  1266. CheckOpen();
  1267. CheckOp(op, OpLayoutType::ElementC);
  1268. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1269. value = ConsumeReg(value);
  1270. instance = ConsumeReg(instance);
  1271. #if DBG
  1272. switch (op)
  1273. {
  1274. case OpCode::InitSetFld:
  1275. case OpCode::InitGetFld:
  1276. case OpCode::InitClassMemberGet:
  1277. case OpCode::InitClassMemberSet:
  1278. case OpCode::InitProto:
  1279. case OpCode::DeleteFld:
  1280. case OpCode::DeleteRootFld:
  1281. case OpCode::LdElemUndefScoped:
  1282. case OpCode::StFuncExpr:
  1283. break;
  1284. default:
  1285. AssertMsg(false, "The specified OpCode is not intended for field-access");
  1286. break;
  1287. }
  1288. #endif
  1289. if (this->m_functionWrite->GetIsStrictMode())
  1290. {
  1291. if (op == OpCode::DeleteFld)
  1292. {
  1293. op = OpCode::DeleteFldStrict;
  1294. }
  1295. else if (op == OpCode::DeleteRootFld)
  1296. {
  1297. // We will reach here when in the language service mode, since in that mode we have skipped that error.
  1298. op = OpCode::DeleteRootFldStrict;
  1299. }
  1300. }
  1301. MULTISIZE_LAYOUT_WRITE(ElementC, op, value, instance, propertyIdIndex);
  1302. }
  1303. template <typename SizePolicy>
  1304. bool ByteCodeWriter::TryWriteElementSlot(OpCode op, RegSlot value, RegSlot instance, int32 slotId)
  1305. {
  1306. OpLayoutT_ElementSlot<SizePolicy> layout;
  1307. if (SizePolicy::Assign(layout.Value, value) && SizePolicy::Assign(layout.Instance, instance)
  1308. && SizePolicy::Assign(layout.SlotIndex, slotId))
  1309. {
  1310. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1311. return true;
  1312. }
  1313. return false;
  1314. }
  1315. void ByteCodeWriter::Slot(OpCode op, RegSlot value, RegSlot instance, int32 slotId)
  1316. {
  1317. CheckOpen();
  1318. CheckOp(op, OpLayoutType::ElementSlot);
  1319. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1320. value = ConsumeReg(value);
  1321. instance = ConsumeReg(instance);
  1322. #if DBG
  1323. switch (op)
  1324. {
  1325. #if ENABLE_NATIVE_CODEGEN
  1326. case OpCode::LdSlotArr:
  1327. case OpCode::StSlot:
  1328. case OpCode::StSlotChkUndecl:
  1329. #endif
  1330. case OpCode::StObjSlot:
  1331. case OpCode::StObjSlotChkUndecl:
  1332. break;
  1333. default:
  1334. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1335. break;
  1336. }
  1337. #endif
  1338. MULTISIZE_LAYOUT_WRITE(ElementSlot, op, value, instance, slotId);
  1339. }
  1340. void ByteCodeWriter::Slot(OpCode op, RegSlot value, RegSlot instance, int32 slotId, ProfileId profileId)
  1341. {
  1342. CheckOpen();
  1343. CheckOp(op, OpLayoutType::ElementSlot);
  1344. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1345. value = ConsumeReg(value);
  1346. instance = ConsumeReg(instance);
  1347. switch (op)
  1348. {
  1349. case OpCode::LdSlot:
  1350. case OpCode::LdObjSlot:
  1351. if ((DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) || DoDynamicProfileOpcode(FloatTypeSpecPhase)) &&
  1352. profileId != Constants::NoProfileId)
  1353. {
  1354. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1355. }
  1356. break;
  1357. default:
  1358. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1359. break;
  1360. }
  1361. MULTISIZE_LAYOUT_WRITE(ElementSlot, op, value, instance, slotId);
  1362. if (OpCodeAttr::IsProfiledOp(op))
  1363. {
  1364. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1365. }
  1366. }
  1367. template <typename SizePolicy>
  1368. bool ByteCodeWriter::TryWriteElementSlotI1(OpCode op, RegSlot value, int32 slotId)
  1369. {
  1370. OpLayoutT_ElementSlotI1<SizePolicy> layout;
  1371. if (SizePolicy::Assign(layout.Value, value)
  1372. && SizePolicy::Assign(layout.SlotIndex, slotId))
  1373. {
  1374. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1375. return true;
  1376. }
  1377. return false;
  1378. }
  1379. void ByteCodeWriter::SlotI1(OpCode op, RegSlot value, int32 slotId)
  1380. {
  1381. CheckOpen();
  1382. CheckOp(op, OpLayoutType::ElementSlotI1);
  1383. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1384. value = ConsumeReg(value);
  1385. #if DBG
  1386. switch (op)
  1387. {
  1388. case OpCode::LdEnvObj:
  1389. case OpCode::StLocalSlot:
  1390. case OpCode::StLocalObjSlot:
  1391. case OpCode::StLocalSlotChkUndecl:
  1392. case OpCode::StLocalObjSlotChkUndecl:
  1393. {
  1394. break;
  1395. }
  1396. default:
  1397. {
  1398. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1399. break;
  1400. }
  1401. }
  1402. #endif
  1403. MULTISIZE_LAYOUT_WRITE(ElementSlotI1, op, value, slotId);
  1404. }
  1405. void ByteCodeWriter::SlotI1(OpCode op, RegSlot value, int32 slotId, ProfileId profileId)
  1406. {
  1407. CheckOpen();
  1408. CheckOp(op, OpLayoutType::ElementSlotI1);
  1409. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1410. value = ConsumeReg(value);
  1411. switch (op)
  1412. {
  1413. case OpCode::LdLocalSlot:
  1414. case OpCode::LdLocalObjSlot:
  1415. if ((DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) || DoDynamicProfileOpcode(FloatTypeSpecPhase)) &&
  1416. profileId != Constants::NoProfileId)
  1417. {
  1418. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1419. }
  1420. break;
  1421. default:
  1422. {
  1423. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1424. break;
  1425. }
  1426. }
  1427. MULTISIZE_LAYOUT_WRITE(ElementSlotI1, op, value, slotId);
  1428. if (OpCodeAttr::IsProfiledOp(op))
  1429. {
  1430. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1431. }
  1432. }
  1433. template <typename SizePolicy>
  1434. bool ByteCodeWriter::TryWriteElementSlotI2(OpCode op, RegSlot value, int32 slotId1, int32 slotId2)
  1435. {
  1436. OpLayoutT_ElementSlotI2<SizePolicy> layout;
  1437. if (SizePolicy::Assign(layout.Value, value)
  1438. && SizePolicy::Assign(layout.SlotIndex1, slotId1)
  1439. && SizePolicy::Assign(layout.SlotIndex2, slotId2))
  1440. {
  1441. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1442. return true;
  1443. }
  1444. return false;
  1445. }
  1446. void ByteCodeWriter::SlotI2(OpCode op, RegSlot value, int32 slotId1, int32 slotId2)
  1447. {
  1448. CheckOpen();
  1449. CheckOp(op, OpLayoutType::ElementSlotI2);
  1450. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1451. value = ConsumeReg(value);
  1452. #if DBG
  1453. switch (op)
  1454. {
  1455. case OpCode::StInnerSlot:
  1456. case OpCode::StInnerSlotChkUndecl:
  1457. case OpCode::StInnerObjSlot:
  1458. case OpCode::StInnerObjSlotChkUndecl:
  1459. case OpCode::StEnvSlot:
  1460. case OpCode::StEnvSlotChkUndecl:
  1461. case OpCode::StEnvObjSlot:
  1462. case OpCode::StEnvObjSlotChkUndecl:
  1463. {
  1464. break;
  1465. }
  1466. default:
  1467. {
  1468. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1469. break;
  1470. }
  1471. }
  1472. #endif
  1473. MULTISIZE_LAYOUT_WRITE(ElementSlotI2, op, value, slotId1, slotId2);
  1474. }
  1475. void ByteCodeWriter::SlotI2(OpCode op, RegSlot value, int32 slotId1, int32 slotId2, ProfileId profileId)
  1476. {
  1477. CheckOpen();
  1478. CheckOp(op, OpLayoutType::ElementSlotI2);
  1479. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1480. value = ConsumeReg(value);
  1481. switch (op)
  1482. {
  1483. case OpCode::LdInnerSlot:
  1484. case OpCode::LdInnerObjSlot:
  1485. case OpCode::LdEnvSlot:
  1486. case OpCode::LdEnvObjSlot:
  1487. if ((DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) || DoDynamicProfileOpcode(FloatTypeSpecPhase)) &&
  1488. profileId != Constants::NoProfileId)
  1489. {
  1490. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1491. }
  1492. break;
  1493. default:
  1494. {
  1495. AssertMsg(false, "The specified OpCode is not intended for slot access");
  1496. break;
  1497. }
  1498. }
  1499. MULTISIZE_LAYOUT_WRITE(ElementSlotI2, op, value, slotId1, slotId2);
  1500. if (OpCodeAttr::IsProfiledOp(op))
  1501. {
  1502. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1503. }
  1504. }
  1505. template <typename SizePolicy>
  1506. bool ByteCodeWriter::TryWriteElementU(OpCode op, RegSlot instance, PropertyIdIndexType index)
  1507. {
  1508. OpLayoutT_ElementU<SizePolicy> layout;
  1509. if (SizePolicy::Assign(layout.Instance, instance) && SizePolicy::Assign(layout.PropertyIdIndex, index))
  1510. {
  1511. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1512. return true;
  1513. }
  1514. return false;
  1515. }
  1516. void ByteCodeWriter::ElementU(OpCode op, RegSlot instance, PropertyIdIndexType index)
  1517. {
  1518. CheckOpen();
  1519. CheckOp(op, OpLayoutType::ElementU);
  1520. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1521. instance = ConsumeReg(instance);
  1522. MULTISIZE_LAYOUT_WRITE(ElementU, op, instance, index);
  1523. }
  1524. template <typename SizePolicy>
  1525. bool ByteCodeWriter::TryWriteElementScopedU(OpCode op, PropertyIdIndexType index)
  1526. {
  1527. OpLayoutT_ElementScopedU<SizePolicy> layout;
  1528. if (SizePolicy::Assign(layout.PropertyIdIndex, index))
  1529. {
  1530. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1531. return true;
  1532. }
  1533. return false;
  1534. }
  1535. void ByteCodeWriter::ElementScopedU(OpCode op, PropertyIdIndexType index)
  1536. {
  1537. CheckOpen();
  1538. CheckOp(op, OpLayoutType::ElementScopedU);
  1539. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1540. MULTISIZE_LAYOUT_WRITE(ElementScopedU, op, index);
  1541. }
  1542. template <typename SizePolicy>
  1543. bool ByteCodeWriter::TryWriteElementRootU(OpCode op, PropertyIdIndexType index)
  1544. {
  1545. OpLayoutT_ElementRootU<SizePolicy> layout;
  1546. if (SizePolicy::Assign(layout.PropertyIdIndex, index))
  1547. {
  1548. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1549. return true;
  1550. }
  1551. return false;
  1552. }
  1553. void ByteCodeWriter::ElementRootU(OpCode op, PropertyIdIndexType index)
  1554. {
  1555. CheckOpen();
  1556. CheckOp(op, OpLayoutType::ElementRootU);
  1557. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1558. MULTISIZE_LAYOUT_WRITE(ElementRootU, op, index);
  1559. }
  1560. template <typename SizePolicy>
  1561. bool ByteCodeWriter::TryWriteElementRootCP(OpCode op, RegSlot value, uint cacheId, bool isLoadMethod, bool isStore)
  1562. {
  1563. Assert(!isLoadMethod || !isStore);
  1564. OpLayoutT_ElementRootCP<SizePolicy> layout;
  1565. if (SizePolicy::Assign(layout.Value, value) && SizePolicy::Assign(layout.inlineCacheIndex, cacheId))
  1566. {
  1567. size_t offset = m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1568. size_t inlineCacheOffset = offset + OpCodeUtil::EncodedSize(op, SizePolicy::LayoutEnum)
  1569. + offsetof(OpLayoutT_ElementRootCP<SizePolicy>, inlineCacheIndex);
  1570. // Root object inline cache index are given out from 0, but it will be at index after
  1571. // all the plain inline cache. Store the offset of the inline cache index to patch it up later.
  1572. SListBase<size_t> * rootObjectInlineCacheOffsets = isStore ?
  1573. &rootObjectStoreInlineCacheOffsets : isLoadMethod ? &rootObjectLoadMethodInlineCacheOffsets : &rootObjectLoadInlineCacheOffsets;
  1574. rootObjectInlineCacheOffsets->Prepend(this->m_labelOffsets->GetAllocator(), inlineCacheOffset);
  1575. return true;
  1576. }
  1577. return false;
  1578. }
  1579. void ByteCodeWriter::PatchableRootProperty(OpCode op, RegSlot value, uint cacheId, bool isLoadMethod, bool isStore, bool registerCacheIdForCall)
  1580. {
  1581. CheckOpen();
  1582. CheckOp(op, OpLayoutType::ElementRootCP);
  1583. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1584. Assert(!isLoadMethod || !isStore);
  1585. value = ConsumeReg(value);
  1586. switch (op)
  1587. {
  1588. case OpCode::LdRootFld:
  1589. case OpCode::LdRootFldForTypeOf:
  1590. if (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  1591. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  1592. DoDynamicProfileOpcode(ObjTypeSpecPhase) ||
  1593. DoDynamicProfileOpcode(InlinePhase) ||
  1594. DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase))
  1595. {
  1596. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1597. }
  1598. break;
  1599. case OpCode::LdRootMethodFld:
  1600. if (registerCacheIdForCall)
  1601. {
  1602. CacheIdUnit unit(cacheId, true);
  1603. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1604. callRegToLdFldCacheIndexMap->Add(value, unit);
  1605. }
  1606. case OpCode::StRootFld:
  1607. case OpCode::StRootFldStrict:
  1608. case OpCode::InitRootFld:
  1609. if (DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase) ||
  1610. DoDynamicProfileOpcode(InlinePhase) ||
  1611. DoDynamicProfileOpcode(ObjTypeSpecPhase))
  1612. {
  1613. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1614. }
  1615. break;
  1616. case OpCode::InitRootLetFld:
  1617. case OpCode::InitRootConstFld:
  1618. break;
  1619. default:
  1620. AssertMsg(false, "The specified OpCode is not intended for patchable root field-access");
  1621. break;
  1622. }
  1623. MULTISIZE_LAYOUT_WRITE(ElementRootCP, op, value, cacheId, isLoadMethod, isStore);
  1624. }
  1625. template <typename SizePolicy>
  1626. bool ByteCodeWriter::TryWriteElementP(OpCode op, RegSlot value, CacheId cacheId)
  1627. {
  1628. OpLayoutT_ElementP<SizePolicy> layout;
  1629. if (SizePolicy::Assign(layout.Value, value)
  1630. && SizePolicy::Assign(layout.inlineCacheIndex, cacheId))
  1631. {
  1632. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1633. return true;
  1634. }
  1635. return false;
  1636. }
  1637. void ByteCodeWriter::ElementP(OpCode op, RegSlot value, uint cacheId, bool isCtor, bool registerCacheIdForCall)
  1638. {
  1639. CheckOpen();
  1640. CheckOp(op, OpLayoutType::ElementP);
  1641. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1642. value = ConsumeReg(value);
  1643. switch (op)
  1644. {
  1645. case OpCode::ScopedLdFld:
  1646. case OpCode::ScopedLdFldForTypeOf:
  1647. case OpCode::ScopedStFld:
  1648. case OpCode::ConsoleScopedStFld:
  1649. case OpCode::ScopedStFldStrict:
  1650. break;
  1651. case OpCode::LdLocalFld:
  1652. if (isCtor) // The symbol loaded by this LdFld will be used as a constructor
  1653. {
  1654. if (registerCacheIdForCall)
  1655. {
  1656. CacheIdUnit unit(cacheId);
  1657. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1658. callRegToLdFldCacheIndexMap->Add(value, unit);
  1659. }
  1660. }
  1661. if (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  1662. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  1663. DoDynamicProfileOpcode(ObjTypeSpecPhase) ||
  1664. DoDynamicProfileOpcode(InlinePhase) ||
  1665. DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase))
  1666. {
  1667. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1668. }
  1669. break;
  1670. case OpCode::LdLocalMethodFld:
  1671. if (registerCacheIdForCall)
  1672. {
  1673. CacheIdUnit unit(cacheId);
  1674. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1675. callRegToLdFldCacheIndexMap->Add(value, unit);
  1676. }
  1677. // fall-through
  1678. case OpCode::StLocalFld:
  1679. case OpCode::InitLocalFld:
  1680. if (DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase) ||
  1681. DoDynamicProfileOpcode(InlinePhase) ||
  1682. DoDynamicProfileOpcode(ObjTypeSpecPhase))
  1683. {
  1684. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1685. }
  1686. break;
  1687. case OpCode::InitLocalLetFld:
  1688. case OpCode::InitUndeclLocalLetFld:
  1689. case OpCode::InitUndeclLocalConstFld:
  1690. break;
  1691. default:
  1692. AssertMsg(false, "The specified OpCode not intended for base-less patchable field access");
  1693. break;
  1694. }
  1695. MULTISIZE_LAYOUT_WRITE(ElementP, op, value, cacheId);
  1696. }
  1697. template <typename SizePolicy>
  1698. bool ByteCodeWriter::TryWriteElementPIndexed(OpCode op, RegSlot value, uint32 scopeIndex, CacheId cacheId)
  1699. {
  1700. OpLayoutT_ElementPIndexed<SizePolicy> layout;
  1701. if (SizePolicy::Assign(layout.Value, value)
  1702. && SizePolicy::Assign(layout.inlineCacheIndex, cacheId)
  1703. && SizePolicy::Assign(layout.scopeIndex, scopeIndex))
  1704. {
  1705. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1706. return true;
  1707. }
  1708. return false;
  1709. }
  1710. void ByteCodeWriter::ElementPIndexed(OpCode op, RegSlot value, uint32 scopeIndex, uint cacheId)
  1711. {
  1712. CheckOpen();
  1713. CheckOp(op, OpLayoutType::ElementPIndexed);
  1714. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1715. value = ConsumeReg(value);
  1716. switch (op)
  1717. {
  1718. case OpCode::InitInnerFld:
  1719. case OpCode::InitInnerLetFld:
  1720. case OpCode::InitUndeclLetFld:
  1721. case OpCode::InitUndeclConstFld:
  1722. break;
  1723. break;
  1724. default:
  1725. AssertMsg(false, "The specified OpCode not intended for base-less patchable inner field access");
  1726. break;
  1727. }
  1728. MULTISIZE_LAYOUT_WRITE(ElementPIndexed, op, value, scopeIndex, cacheId);
  1729. }
  1730. template <typename SizePolicy>
  1731. bool ByteCodeWriter::TryWriteElementCP(OpCode op, RegSlot value, RegSlot instance, CacheId cacheId)
  1732. {
  1733. OpLayoutT_ElementCP<SizePolicy> layout;
  1734. if (SizePolicy::Assign(layout.Value, value) && SizePolicy::Assign(layout.Instance, instance)
  1735. && SizePolicy::Assign(layout.inlineCacheIndex, cacheId))
  1736. {
  1737. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1738. return true;
  1739. }
  1740. return false;
  1741. }
  1742. void ByteCodeWriter::PatchableProperty(OpCode op, RegSlot value, RegSlot instance, uint cacheId, bool isCtor, bool registerCacheIdForCall)
  1743. {
  1744. CheckOpen();
  1745. CheckOp(op, OpLayoutType::ElementCP);
  1746. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1747. value = ConsumeReg(value);
  1748. instance = ConsumeReg(instance);
  1749. switch (op)
  1750. {
  1751. case OpCode::LdFldForTypeOf:
  1752. case OpCode::LdFld:
  1753. if (isCtor) // The symbol loaded by this LdFld will be used as a constructor
  1754. {
  1755. if (registerCacheIdForCall)
  1756. {
  1757. CacheIdUnit unit(cacheId);
  1758. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1759. callRegToLdFldCacheIndexMap->Add(value, unit);
  1760. }
  1761. }
  1762. case OpCode::LdFldForCallApplyTarget:
  1763. if (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  1764. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  1765. DoDynamicProfileOpcode(ObjTypeSpecPhase) ||
  1766. DoDynamicProfileOpcode(InlinePhase) ||
  1767. DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase))
  1768. {
  1769. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1770. }
  1771. break;
  1772. case OpCode::LdMethodFld:
  1773. if (registerCacheIdForCall)
  1774. {
  1775. CacheIdUnit unit(cacheId);
  1776. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1777. callRegToLdFldCacheIndexMap->Add(value, unit);
  1778. }
  1779. // fall-through
  1780. case OpCode::StFld:
  1781. case OpCode::StFldStrict:
  1782. case OpCode::InitFld:
  1783. if (DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase) ||
  1784. DoDynamicProfileOpcode(InlinePhase) ||
  1785. DoDynamicProfileOpcode(ObjTypeSpecPhase))
  1786. {
  1787. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1788. }
  1789. break;
  1790. case OpCode::InitLetFld:
  1791. case OpCode::InitConstFld:
  1792. case OpCode::InitClassMember:
  1793. case OpCode::ScopedLdMethodFld:
  1794. break;
  1795. default:
  1796. AssertMsg(false, "The specified OpCode is not intended for patchable field-access");
  1797. break;
  1798. }
  1799. MULTISIZE_LAYOUT_WRITE(ElementCP, op, value, instance, cacheId);
  1800. }
  1801. template <typename SizePolicy>
  1802. bool ByteCodeWriter::TryWriteElementC2(OpCode op, RegSlot value, RegSlot instance, PropertyIdIndexType propertyIdIndex, RegSlot value2)
  1803. {
  1804. OpLayoutT_ElementC2<SizePolicy> layout;
  1805. if (SizePolicy::Assign(layout.Value, value) && SizePolicy::Assign(layout.Instance, instance)
  1806. && SizePolicy::Assign(layout.PropertyIdIndex, propertyIdIndex) && SizePolicy::Assign(layout.Value2, value2))
  1807. {
  1808. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1809. return true;
  1810. }
  1811. return false;
  1812. }
  1813. void ByteCodeWriter::PatchablePropertyWithThisPtr(OpCode op, RegSlot value, RegSlot instance, RegSlot thisInstance, uint cacheId, bool isCtor, bool registerCacheIdForCall)
  1814. {
  1815. CheckOpen();
  1816. CheckOp(op, OpLayoutType::ElementC2);
  1817. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1818. value = ConsumeReg(value);
  1819. instance = ConsumeReg(instance);
  1820. thisInstance = ConsumeReg(thisInstance);
  1821. switch (op)
  1822. {
  1823. case OpCode::LdSuperFld:
  1824. if (isCtor) // The symbol loaded by this LdSuperFld will be used as a constructor
  1825. {
  1826. if (registerCacheIdForCall)
  1827. {
  1828. CacheIdUnit unit(cacheId);
  1829. Assert(!callRegToLdFldCacheIndexMap->TryGetValue(value, &unit));
  1830. callRegToLdFldCacheIndexMap->Add(value, unit);
  1831. }
  1832. }
  1833. if (DoDynamicProfileOpcode(AggressiveIntTypeSpecPhase) ||
  1834. DoDynamicProfileOpcode(FloatTypeSpecPhase) ||
  1835. DoDynamicProfileOpcode(ObjTypeSpecPhase) ||
  1836. DoDynamicProfileOpcode(InlinePhase) ||
  1837. DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase))
  1838. {
  1839. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1840. }
  1841. break;
  1842. case OpCode::StSuperFld:
  1843. if (DoDynamicProfileOpcode(ProfileBasedFldFastPathPhase) ||
  1844. DoDynamicProfileOpcode(InlinePhase) ||
  1845. DoDynamicProfileOpcode(ObjTypeSpecPhase))
  1846. {
  1847. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1848. }
  1849. break;
  1850. default:
  1851. AssertMsg(false, "The specified OpCode is not intended for patchable super field-access");
  1852. break;
  1853. }
  1854. MULTISIZE_LAYOUT_WRITE(ElementC2, op, value, instance, cacheId, thisInstance);
  1855. }
  1856. template <typename SizePolicy>
  1857. bool ByteCodeWriter::TryWriteElementScopedC2(OpCode op, RegSlot value, PropertyIdIndexType propertyIdIndex, RegSlot value2)
  1858. {
  1859. OpLayoutT_ElementScopedC2<SizePolicy> layout;
  1860. if (SizePolicy::Assign(layout.Value, value)
  1861. && SizePolicy::Assign(layout.PropertyIdIndex, propertyIdIndex) && SizePolicy::Assign(layout.Value2, value2))
  1862. {
  1863. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1864. return true;
  1865. }
  1866. return false;
  1867. }
  1868. void ByteCodeWriter::ScopedProperty2(OpCode op, RegSlot value, PropertyIdIndexType propertyIdIndex, RegSlot value2)
  1869. {
  1870. CheckOpen();
  1871. CheckOp(op, OpLayoutType::ElementScopedC2);
  1872. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1873. value = ConsumeReg(value);
  1874. value2 = ConsumeReg(value2);
  1875. switch (op)
  1876. {
  1877. case OpCode::ScopedLdInst:
  1878. break;
  1879. default:
  1880. AssertMsg(false, "The specified OpCode is not intended for field-access with a second instance");
  1881. break;
  1882. }
  1883. MULTISIZE_LAYOUT_WRITE(ElementScopedC2, op, value, propertyIdIndex, value2);
  1884. }
  1885. template <typename SizePolicy>
  1886. bool ByteCodeWriter::TryWriteClass(OpCode op, RegSlot constructor, RegSlot extends)
  1887. {
  1888. OpLayoutT_Class<SizePolicy> layout;
  1889. if (SizePolicy::Assign(layout.Constructor, constructor) && SizePolicy::Assign(layout.Extends, extends))
  1890. {
  1891. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1892. return true;
  1893. }
  1894. return false;
  1895. }
  1896. void ByteCodeWriter::InitClass(RegSlot constructor, RegSlot extends)
  1897. {
  1898. Assert(OpCodeAttr::HasMultiSizeLayout(Js::OpCode::InitClass));
  1899. CheckOpen();
  1900. constructor = ConsumeReg(constructor);
  1901. if (extends != Js::Constants::NoRegister)
  1902. {
  1903. extends = ConsumeReg(extends);
  1904. }
  1905. MULTISIZE_LAYOUT_WRITE(Class, Js::OpCode::InitClass, constructor, extends);
  1906. }
  1907. void ByteCodeWriter::NewFunction(RegSlot destinationRegister, uint index, bool isGenerator)
  1908. {
  1909. CheckOpen();
  1910. destinationRegister = ConsumeReg(destinationRegister);
  1911. OpCode opcode = isGenerator ?
  1912. OpCode::NewScGenFunc :
  1913. this->m_functionWrite->DoStackNestedFunc() ?
  1914. OpCode::NewStackScFunc : OpCode::NewScFunc;
  1915. Assert(OpCodeAttr::HasMultiSizeLayout(opcode));
  1916. MULTISIZE_LAYOUT_WRITE(ElementSlotI1, opcode, destinationRegister, index);
  1917. }
  1918. void ByteCodeWriter::NewInnerFunction(RegSlot destinationRegister, uint index, RegSlot environmentRegister, bool isGenerator)
  1919. {
  1920. CheckOpen();
  1921. destinationRegister = ConsumeReg(destinationRegister);
  1922. environmentRegister = ConsumeReg(environmentRegister);
  1923. OpCode opcode = isGenerator ?
  1924. OpCode::NewInnerScGenFunc :
  1925. this->m_functionWrite->DoStackNestedFunc() ?
  1926. OpCode::NewInnerStackScFunc : OpCode::NewInnerScFunc;
  1927. Assert(OpCodeAttr::HasMultiSizeLayout(opcode));
  1928. MULTISIZE_LAYOUT_WRITE(ElementSlot, opcode, destinationRegister, environmentRegister, index);
  1929. }
  1930. template <typename SizePolicy>
  1931. bool ByteCodeWriter::TryWriteReg1Unsigned1(OpCode op, RegSlot R0, uint C1)
  1932. {
  1933. OpLayoutT_Reg1Unsigned1<SizePolicy> layout;
  1934. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.C1, C1))
  1935. {
  1936. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1937. return true;
  1938. }
  1939. return false;
  1940. }
  1941. void ByteCodeWriter::Reg1Unsigned1(OpCode op, RegSlot R0, uint C1)
  1942. {
  1943. CheckOpen();
  1944. CheckOp(op, OpLayoutType::Reg1Unsigned1);
  1945. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1946. R0 = ConsumeReg(R0);
  1947. ProfileId profileId = Constants::NoProfileId;
  1948. bool isProfiled = DoProfileNewScArrayOp(op) &&
  1949. DoDynamicProfileOpcode(NativeArrayPhase, true) &&
  1950. this->m_functionWrite->AllocProfiledArrayCallSiteId(&profileId);
  1951. if (isProfiled)
  1952. {
  1953. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  1954. }
  1955. MULTISIZE_LAYOUT_WRITE(Reg1Unsigned1, op, R0, C1);
  1956. if (isProfiled)
  1957. {
  1958. m_byteCodeData.Encode(&profileId, sizeof(Js::ProfileId));
  1959. }
  1960. }
  1961. void ByteCodeWriter::W1(OpCode op, ushort C1)
  1962. {
  1963. CheckOpen();
  1964. CheckOp(op, OpLayoutType::W1);
  1965. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  1966. OpLayoutW1 data;
  1967. data.C1 = C1;
  1968. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  1969. }
  1970. void ByteCodeWriter::Reg1Int2(OpCode op, RegSlot R0, int C1, int C2)
  1971. {
  1972. CheckOpen();
  1973. CheckOp(op, OpLayoutType::Reg1Int2);
  1974. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  1975. R0 = ConsumeReg(R0);
  1976. OpLayoutReg1Int2 data;
  1977. data.R0 = R0;
  1978. data.C1 = C1;
  1979. data.C2 = C2;
  1980. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  1981. }
  1982. template <typename SizePolicy>
  1983. bool ByteCodeWriter::TryWriteReg2Int1(OpCode op, RegSlot R0, RegSlot R1, int C1)
  1984. {
  1985. OpLayoutT_Reg2Int1<SizePolicy> layout;
  1986. if (SizePolicy::Assign(layout.R0, R0) && SizePolicy::Assign(layout.R1, R1) && SizePolicy::Assign(layout.C1, C1))
  1987. {
  1988. m_byteCodeData.EncodeT<SizePolicy::LayoutEnum>(op, &layout, sizeof(layout), this);
  1989. return true;
  1990. }
  1991. return false;
  1992. }
  1993. void ByteCodeWriter::Reg2Int1(OpCode op, RegSlot R0, RegSlot R1, int C1)
  1994. {
  1995. CheckOpen();
  1996. CheckOp(op, OpLayoutType::Reg2Int1);
  1997. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  1998. if (DoDynamicProfileOpcode(CheckThisPhase) ||
  1999. DoDynamicProfileOpcode(TypedArrayTypeSpecPhase) ||
  2000. DoDynamicProfileOpcode(ArrayCheckHoistPhase))
  2001. {
  2002. if (op == OpCode::LdThis)
  2003. {
  2004. op = OpCode::ProfiledLdThis;
  2005. }
  2006. }
  2007. R0 = ConsumeReg(R0);
  2008. R1 = ConsumeReg(R1);
  2009. MULTISIZE_LAYOUT_WRITE(Reg2Int1, op, R0, R1, C1);
  2010. }
  2011. void ByteCodeWriter::Num3(OpCode op, RegSlot C0, RegSlot C1, RegSlot C2)
  2012. {
  2013. CheckOpen();
  2014. CheckOp(op, OpLayoutType::Reg3);
  2015. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  2016. MULTISIZE_LAYOUT_WRITE(Reg3, op, C0, C1, C2);
  2017. }
  2018. int ByteCodeWriter::AuxNoReg(OpCode op, const void* buffer, int byteCount, int C1)
  2019. {
  2020. CheckOpen();
  2021. //
  2022. // Write the buffer's contents
  2023. //
  2024. int currentOffset = InsertAuxiliaryData(buffer, byteCount);
  2025. //
  2026. // Write OpCode to create new auxiliary data
  2027. //
  2028. OpLayoutAuxNoReg data;
  2029. data.Offset = currentOffset;
  2030. data.C1 = C1;
  2031. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2032. return currentOffset;
  2033. }
  2034. void ByteCodeWriter::AuxNoReg(OpCode op, uint byteOffset, int C1)
  2035. {
  2036. CheckOpen();
  2037. //
  2038. // Write the buffer's contents
  2039. //
  2040. Assert(byteOffset < m_auxiliaryData.GetCurrentOffset());
  2041. OpLayoutAuxNoReg data;
  2042. data.Offset = byteOffset;
  2043. data.C1 = C1;
  2044. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2045. }
  2046. int ByteCodeWriter::Auxiliary(OpCode op, RegSlot destinationRegister, const void* buffer, int byteCount, int C1)
  2047. {
  2048. CheckOpen();
  2049. destinationRegister = ConsumeReg(destinationRegister);
  2050. //
  2051. // Write the buffer's contents
  2052. //
  2053. int currentOffset = InsertAuxiliaryData(buffer, byteCount);
  2054. //
  2055. // Write OpCode to create new auxiliary data
  2056. //
  2057. ProfileId profileId = Constants::NoProfileId;
  2058. if (DoProfileNewScArrayOp(op) &&
  2059. DoDynamicProfileOpcode(NativeArrayPhase, true) &&
  2060. this->m_functionWrite->AllocProfiledArrayCallSiteId(&profileId))
  2061. {
  2062. OpCodeUtil::ConvertNonCallOpToProfiled(op);
  2063. OpLayoutDynamicProfile<OpLayoutAuxiliary> data;
  2064. data.R0 = destinationRegister;
  2065. data.Offset = currentOffset;
  2066. data.C1 = C1;
  2067. data.profileId = profileId;
  2068. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2069. }
  2070. else
  2071. {
  2072. OpLayoutAuxiliary data;
  2073. data.R0 = destinationRegister;
  2074. data.Offset = currentOffset;
  2075. data.C1 = C1;
  2076. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2077. }
  2078. return currentOffset;
  2079. }
  2080. void ByteCodeWriter::Auxiliary(OpCode op, RegSlot destinationRegister, uint byteOffset, int C1)
  2081. {
  2082. CheckOpen();
  2083. destinationRegister = ConsumeReg(destinationRegister);
  2084. //
  2085. // Write the buffer's contents
  2086. //
  2087. Assert(byteOffset < m_auxiliaryData.GetCurrentOffset());
  2088. OpLayoutAuxiliary data;
  2089. data.R0 = destinationRegister;
  2090. data.Offset = byteOffset;
  2091. data.C1 = C1;
  2092. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2093. }
  2094. int ByteCodeWriter::Reg2Aux(OpCode op, RegSlot R0, RegSlot R1, const void* buffer, int byteCount, int C1)
  2095. {
  2096. CheckOpen();
  2097. R0 = ConsumeReg(R0);
  2098. R1 = ConsumeReg(R1);
  2099. //
  2100. // Write the buffer's contents
  2101. //
  2102. int currentOffset = InsertAuxiliaryData(buffer, byteCount);
  2103. //
  2104. // Write OpCode to create new auxiliary data
  2105. //
  2106. OpLayoutReg2Aux data;
  2107. data.R0 = R0;
  2108. data.R1 = R1;
  2109. data.Offset = currentOffset;
  2110. data.C1 = C1;
  2111. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2112. return currentOffset;
  2113. }
  2114. void ByteCodeWriter::Reg2Aux(OpCode op, RegSlot R0, RegSlot R1, uint byteOffset, int C1)
  2115. {
  2116. CheckOpen();
  2117. R0 = ConsumeReg(R0);
  2118. R1 = ConsumeReg(R1);
  2119. //
  2120. // Write the buffer's contents
  2121. //
  2122. Assert(byteOffset < m_auxiliaryData.GetCurrentOffset());
  2123. OpLayoutReg2Aux data;
  2124. data.R0 = R0;
  2125. data.R1 = R1;
  2126. data.Offset = byteOffset;
  2127. data.C1 = C1;
  2128. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2129. }
  2130. void ByteCodeWriter::AuxiliaryContext(OpCode op, RegSlot destinationRegister, const void* buffer, int byteCount, Js::RegSlot C1)
  2131. {
  2132. CheckOpen();
  2133. destinationRegister = ConsumeReg(destinationRegister);
  2134. C1 = ConsumeReg(C1);
  2135. //
  2136. // Write the buffer's contents
  2137. //
  2138. int currentOffset = m_auxContextData.GetCurrentOffset();
  2139. if (byteCount > 0)
  2140. {
  2141. m_auxContextData.Encode(buffer, byteCount);
  2142. }
  2143. //
  2144. // Write OpCode to create new auxiliary data
  2145. //
  2146. OpLayoutAuxiliary data;
  2147. data.R0 = destinationRegister;
  2148. data.Offset = currentOffset;
  2149. data.C1 = C1;
  2150. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2151. }
  2152. uint ByteCodeWriter::InsertAuxiliaryData(const void* buffer, uint byteCount)
  2153. {
  2154. uint offset = m_auxiliaryData.GetCurrentOffset();
  2155. if (byteCount > 0)
  2156. {
  2157. m_auxiliaryData.Encode(buffer, byteCount);
  2158. }
  2159. return offset;
  2160. }
  2161. ByteCodeLabel ByteCodeWriter::DefineLabel()
  2162. {
  2163. #if defined(_M_X64_OR_ARM64)
  2164. if (m_labelOffsets->Count() == INT_MAX)
  2165. {
  2166. // Reach our limit
  2167. Js::Throw::OutOfMemory();
  2168. }
  2169. #else
  2170. // 32-bit machine don't have enough address space to get to INT_MAX
  2171. Assert(m_labelOffsets->Count() < INT_MAX);
  2172. #endif
  2173. //
  2174. // Allocate a new label:
  2175. // - All label locations start as "undefined: -1". Once the label's location is marked in
  2176. // the byte-code, this will be updated.
  2177. //
  2178. return (ByteCodeLabel)m_labelOffsets->Add(UINT_MAX);
  2179. }
  2180. void ByteCodeWriter::MarkLabel(ByteCodeLabel labelID)
  2181. {
  2182. CheckOpen();
  2183. CheckLabel(labelID);
  2184. #ifdef BYTECODE_BRANCH_ISLAND
  2185. if (useBranchIsland)
  2186. {
  2187. // If we are going to emit a branch island, it should be before the label.
  2188. EnsureLongBranch(Js::OpCode::Label);
  2189. }
  2190. #endif
  2191. //
  2192. // Define the label as the current offset within the byte-code.
  2193. //
  2194. AssertMsg(m_labelOffsets->Item(labelID) == UINT_MAX, "A label may only be defined at one location");
  2195. m_labelOffsets->SetExistingItem(labelID, m_byteCodeData.GetCurrentOffset());
  2196. }
  2197. void ByteCodeWriter::AddJumpOffset(Js::OpCode op, ByteCodeLabel labelId, uint fieldByteOffsetFromEnd) // Offset of "Offset" field in OpLayout, in bytes
  2198. {
  2199. AssertMsg(fieldByteOffsetFromEnd < 100, "Ensure valid field offset");
  2200. CheckOpen();
  2201. CheckLabel(labelId);
  2202. uint jumpByteOffset = m_byteCodeData.GetCurrentOffset() - fieldByteOffsetFromEnd;
  2203. #ifdef BYTECODE_BRANCH_ISLAND
  2204. if (useBranchIsland)
  2205. {
  2206. // Any Jump might need a long jump, account for that emit the branch island earlier.
  2207. // Even if it is a back edge and we are going to emit a long jump, we will still
  2208. // emit a branch around any way.
  2209. this->nextBranchIslandOffset -= LongBranchSize;
  2210. uint labelOffset = m_labelOffsets->Item(labelId);
  2211. if (labelOffset != UINT_MAX)
  2212. {
  2213. // Back branch, see if it needs to be long
  2214. Assert(labelOffset < m_byteCodeData.GetCurrentOffset());
  2215. LongJumpOffset jumpOffset = labelOffset - m_byteCodeData.GetCurrentOffset();
  2216. if (jumpOffset < -GetBranchLimit())
  2217. {
  2218. // Create the long jump label and add the original jump offset to the list first
  2219. ByteCodeLabel longJumpLabel = this->DefineLabel();
  2220. JumpInfo jumpInfo = { longJumpLabel, jumpByteOffset };
  2221. m_jumpOffsets->Add(jumpInfo);
  2222. // Emit the jump around (if necessary)
  2223. ByteCodeLabel jumpAroundLabel = (ByteCodeLabel)-1;
  2224. if (OpCodeAttr::HasFallThrough(op))
  2225. {
  2226. // emit jump around.
  2227. jumpAroundLabel = this->DefineLabel();
  2228. this->Br(jumpAroundLabel);
  2229. }
  2230. // emit the long jump
  2231. this->MarkLabel(longJumpLabel);
  2232. this->BrLong(Js::OpCode::BrLong, labelId);
  2233. if (jumpAroundLabel != (ByteCodeLabel)-1)
  2234. {
  2235. this->MarkLabel(jumpAroundLabel);
  2236. }
  2237. return;
  2238. }
  2239. }
  2240. }
  2241. #endif
  2242. //
  2243. // Branch targets are created in two passes:
  2244. // - In the instruction stream, write "labelID" into "OpLayoutBrC.Offset". Record this
  2245. // location in "m_jumpOffsets" to be patched later.
  2246. // - When the byte-code is closed, update all "OpLayoutBrC.Offset"'s with their actual
  2247. // destinations.
  2248. //
  2249. JumpInfo jumpInfo = { labelId, jumpByteOffset };
  2250. m_jumpOffsets->Add(jumpInfo);
  2251. }
  2252. #ifdef BYTECODE_BRANCH_ISLAND
  2253. int32 ByteCodeWriter::GetBranchLimit()
  2254. {
  2255. #ifdef BYTECODE_TESTING
  2256. if (Js::Configuration::Global.flags.IsEnabled(Js::ByteCodeBranchLimitFlag))
  2257. {
  2258. // minimum 64
  2259. return min(max(Js::Configuration::Global.flags.ByteCodeBranchLimit, 64), SHRT_MAX + 1);
  2260. }
  2261. #endif
  2262. return SHRT_MAX + 1;
  2263. }
  2264. void ByteCodeWriter::AddLongJumpOffset(ByteCodeLabel labelId, uint fieldByteOffsetFromEnd) // Offset of "Offset" field in OpLayout, in bytes
  2265. {
  2266. Assert(useBranchIsland);
  2267. AssertMsg(fieldByteOffsetFromEnd < 100, "Ensure valid field offset");
  2268. //
  2269. // Branch targets are created in two passes:
  2270. // - In the instruction stream, write "labelID" into "OpLayoutBrC.Offset". Record this
  2271. // location in "m_jumpOffsets" to be patched later.
  2272. // - When the byte-code is closed, update all "OpLayoutBrC.Offset"'s with their actual
  2273. // destinations.
  2274. //
  2275. uint jumpByteOffset = m_byteCodeData.GetCurrentOffset() - fieldByteOffsetFromEnd;
  2276. JumpInfo jumpInfo = { labelId, jumpByteOffset };
  2277. m_longJumpOffsets->Add(jumpInfo);
  2278. }
  2279. void ByteCodeWriter::BrLong(OpCode op, ByteCodeLabel labelID)
  2280. {
  2281. Assert(useBranchIsland);
  2282. CheckOpen();
  2283. CheckOp(op, OpLayoutType::BrLong);
  2284. CheckLabel(labelID);
  2285. Assert(!OpCodeAttr::HasMultiSizeLayout(op));
  2286. size_t const offsetOfRelativeJumpOffsetFromEnd = sizeof(OpLayoutBrLong) - offsetof(OpLayoutBrLong, RelativeJumpOffset);
  2287. OpLayoutBrLong data;
  2288. data.RelativeJumpOffset = offsetOfRelativeJumpOffsetFromEnd;
  2289. m_byteCodeData.Encode(op, &data, sizeof(data), this);
  2290. AddLongJumpOffset(labelID, offsetOfRelativeJumpOffsetFromEnd);
  2291. }
  2292. void ByteCodeWriter::UpdateNextBranchIslandOffset(uint firstUnknownJumpInfo, uint firstUnknownJumpOffset)
  2293. {
  2294. this->firstUnknownJumpInfo = firstUnknownJumpInfo;
  2295. // We will need to emit the next branch from the first branch + branch limit.
  2296. // But leave room for the jump around and one extra byte code instruction.
  2297. // Also account for all the long branches we may have to emit as well.
  2298. this->nextBranchIslandOffset = firstUnknownJumpOffset + GetBranchLimit()
  2299. - JumpAroundSize - MaxLayoutSize - MaxOpCodeSize - LongBranchSize * (m_jumpOffsets->Count() - firstUnknownJumpInfo);
  2300. }
  2301. void ByteCodeWriter::EnsureLongBranch(Js::OpCode op)
  2302. {
  2303. Assert(useBranchIsland);
  2304. int currentOffset = this->m_byteCodeData.GetCurrentOffset();
  2305. // See if we need to emit branch island yet, and avoid recursion.
  2306. if (currentOffset < this->nextBranchIslandOffset || this->inEnsureLongBranch)
  2307. {
  2308. lastOpcode = op;
  2309. return;
  2310. }
  2311. // Leave actually may continue right after, it is only no fall through in the JIT.
  2312. bool needBranchAround = OpCodeAttr::HasFallThrough(lastOpcode) || lastOpcode == Js::OpCode::Leave;
  2313. lastOpcode = op;
  2314. // If we are about to emit a no fall through op and the last was has fall through
  2315. // then just emit the no fall through op, and then we can skip the branch around.
  2316. // Except at label or StatementBoundary, we always want to emit before them.
  2317. if ((needBranchAround && !OpCodeAttr::HasFallThrough(op))
  2318. && op != Js::OpCode::StatementBoundary && op != Js::OpCode::Label)
  2319. {
  2320. return;
  2321. }
  2322. ByteCodeLabel branchAroundLabel = (Js::ByteCodeLabel)-1;
  2323. bool foundUnknown = m_jumpOffsets->MapUntilFrom(firstUnknownJumpInfo,
  2324. [=, &branchAroundLabel, &currentOffset](int index, JumpInfo& jumpInfo)
  2325. {
  2326. //
  2327. // Read "labelID" stored at the offset within the byte-code.
  2328. //
  2329. uint jumpByteOffset = jumpInfo.patchOffset;
  2330. AssertMsg(jumpByteOffset <= this->m_byteCodeData.GetCurrentOffset() - sizeof(JumpOffset),
  2331. "Must have valid jump site within byte-code to back-patch");
  2332. ByteCodeLabel labelID = jumpInfo.labelId;
  2333. CheckLabel(labelID);
  2334. // See if the label has bee marked yet.
  2335. uint const labelByteOffset = m_labelOffsets->Item(labelID);
  2336. if (labelByteOffset != UINT_MAX)
  2337. {
  2338. // If a label is already defined, then it should be short
  2339. // (otherwise we should have emitted a branch island for it already).
  2340. Assert((int)labelByteOffset - (int)jumpByteOffset < GetBranchLimit()
  2341. && (int)labelByteOffset - (int)jumpByteOffset >= -GetBranchLimit());
  2342. return false;
  2343. }
  2344. this->UpdateNextBranchIslandOffset(index, jumpByteOffset);
  2345. // Flush all the jump that are half of the way to the limit as well so we don't have
  2346. // as many jump around of branch island.
  2347. int flushNextBranchIslandOffset = this->nextBranchIslandOffset - GetBranchLimit() / 2;
  2348. if (currentOffset < flushNextBranchIslandOffset)
  2349. {
  2350. // No need to for long branch yet. Terminate the loop.
  2351. return true;
  2352. }
  2353. if (labelID == branchAroundLabel)
  2354. {
  2355. // Let's not flush the branchAroundLabel.
  2356. // Should happen very rarely and mostly when the branch limit is very small.
  2357. // This should be the last short jump we have just emitted (below).
  2358. Assert(index == m_jumpOffsets->Count() - 1);
  2359. Assert(currentOffset < this->nextBranchIslandOffset);
  2360. return true;
  2361. }
  2362. // Emit long branch
  2363. // Prevent recursion when we emit byte code here
  2364. this->inEnsureLongBranch = true;
  2365. // Create the branch label and update the jumpInfo.
  2366. // Need to update the jumpInfo before we add the branch island as that might resize the m_jumpOffsets list.
  2367. ByteCodeLabel longBranchLabel = this->DefineLabel();
  2368. jumpInfo.labelId = longBranchLabel;
  2369. // Emit the branch around if it hasn't been emitted already
  2370. if (branchAroundLabel == (Js::ByteCodeLabel)-1 && needBranchAround)
  2371. {
  2372. branchAroundLabel = this->DefineLabel();
  2373. this->Br(Js::OpCode::Br, branchAroundLabel);
  2374. Assert(this->m_byteCodeData.GetCurrentOffset() - currentOffset == JumpAroundSize);
  2375. currentOffset += JumpAroundSize;
  2376. // Continue to count he jumpAroundSize, because we may have to emit
  2377. // yet another branch island right after if the jumpAroundSize is included.
  2378. }
  2379. // Emit the long branch
  2380. this->MarkLabel(longBranchLabel);
  2381. this->BrLong(Js::OpCode::BrLong, labelID);
  2382. this->inEnsureLongBranch = false;
  2383. Assert(this->m_byteCodeData.GetCurrentOffset() - currentOffset == LongBranchSize);
  2384. currentOffset += LongBranchSize;
  2385. return false;
  2386. });
  2387. if (!foundUnknown)
  2388. {
  2389. // Nothing is found, just set the next branch island from the current offset
  2390. this->UpdateNextBranchIslandOffset(this->m_jumpOffsets->Count(), currentOffset);
  2391. }
  2392. if (branchAroundLabel != (Js::ByteCodeLabel)-1)
  2393. {
  2394. // Make the branch around label if we needed one
  2395. this->MarkLabel(branchAroundLabel);
  2396. }
  2397. }
  2398. #endif
  2399. void ByteCodeWriter::StartStatement(ParseNode* node, uint32 tmpRegCount)
  2400. {
  2401. if (m_pMatchingNode)
  2402. {
  2403. if (m_pMatchingNode == node)
  2404. {
  2405. m_matchingNodeRefCount++;
  2406. }
  2407. return;
  2408. }
  2409. #ifdef BYTECODE_BRANCH_ISLAND
  2410. if (useBranchIsland)
  2411. {
  2412. // If we are going to emit a branch island, it should be before the statement start
  2413. this->EnsureLongBranch(Js::OpCode::StatementBoundary);
  2414. }
  2415. #endif
  2416. m_pMatchingNode = node;
  2417. m_beginCodeSpan = m_byteCodeData.GetCurrentOffset();
  2418. if (m_isInDebugMode && m_tmpRegCount != tmpRegCount)
  2419. {
  2420. Unsigned1(OpCode::EmitTmpRegCount, tmpRegCount);
  2421. m_tmpRegCount = tmpRegCount;
  2422. }
  2423. }
  2424. void ByteCodeWriter::EndStatement(ParseNode* node)
  2425. {
  2426. AssertMsg(m_pMatchingNode, "EndStatement unmatched to StartStatement");
  2427. if (m_pMatchingNode != node)
  2428. {
  2429. return;
  2430. }
  2431. else if (m_matchingNodeRefCount > 0)
  2432. {
  2433. m_matchingNodeRefCount--;
  2434. return;
  2435. }
  2436. if (m_byteCodeData.GetCurrentOffset() != m_beginCodeSpan)
  2437. {
  2438. if (m_isInDebugMode)
  2439. {
  2440. FunctionBody::StatementMap* pCurrentStatement = FunctionBody::StatementMap::New(this->m_functionWrite->GetScriptContext()->GetRecycler());
  2441. if (pCurrentStatement)
  2442. {
  2443. pCurrentStatement->sourceSpan.begin = node->ichMin;
  2444. pCurrentStatement->sourceSpan.end = node->ichLim;
  2445. pCurrentStatement->byteCodeSpan.begin = m_beginCodeSpan;
  2446. pCurrentStatement->byteCodeSpan.end = m_byteCodeData.GetCurrentOffset() - 1;
  2447. m_functionWrite->RecordStatementMap(pCurrentStatement);
  2448. }
  2449. }
  2450. else
  2451. {
  2452. StatementData currentStatement;
  2453. currentStatement.sourceBegin = node->ichMin;
  2454. currentStatement.bytecodeBegin = m_beginCodeSpan;
  2455. m_functionWrite->RecordStatementMap(spanIter, &currentStatement);
  2456. }
  2457. }
  2458. m_pMatchingNode = nullptr;
  2459. }
  2460. void ByteCodeWriter::StartSubexpression(ParseNode* node)
  2461. {
  2462. if (!m_isInDebugMode || !m_pMatchingNode) // Subexpression not in debug mode or not enclosed in regular statement
  2463. {
  2464. return;
  2465. }
  2466. #ifdef BYTECODE_BRANCH_ISLAND
  2467. // If we are going to emit a branch island, it should be before the statement start
  2468. this->EnsureLongBranch(Js::OpCode::StatementBoundary);
  2469. #endif
  2470. m_subexpressionNodesStack->Push(SubexpressionNode(node, m_byteCodeData.GetCurrentOffset()));
  2471. }
  2472. void ByteCodeWriter::EndSubexpression(ParseNode* node)
  2473. {
  2474. if (!m_isInDebugMode || m_subexpressionNodesStack->Empty() || m_subexpressionNodesStack->Peek().node != node)
  2475. {
  2476. return;
  2477. }
  2478. if (m_byteCodeData.GetCurrentOffset() != m_beginCodeSpan)
  2479. {
  2480. FunctionBody::StatementMap* pCurrentStatement = FunctionBody::StatementMap::New(this->m_functionWrite->GetScriptContext()->GetRecycler());
  2481. if (pCurrentStatement)
  2482. {
  2483. pCurrentStatement->sourceSpan.begin = node->ichMin;
  2484. pCurrentStatement->sourceSpan.end = node->ichLim;
  2485. SubexpressionNode subexpressionNode = m_subexpressionNodesStack->Pop();
  2486. pCurrentStatement->byteCodeSpan.begin = subexpressionNode.beginCodeSpan;
  2487. pCurrentStatement->byteCodeSpan.end = m_byteCodeData.GetCurrentOffset() - 1;
  2488. pCurrentStatement->isSubexpression = true;
  2489. m_functionWrite->RecordStatementMap(pCurrentStatement);
  2490. }
  2491. }
  2492. }
  2493. // Pushes a new debugger scope onto the stack. This information is used when determining
  2494. // what the current scope is for tracking of let/const initialization offsets (for detecting
  2495. // dead zones).
  2496. void ByteCodeWriter::PushDebuggerScope(Js::DebuggerScope* debuggerScope)
  2497. {
  2498. Assert(debuggerScope);
  2499. debuggerScope->SetParentScope(m_currentDebuggerScope);
  2500. m_currentDebuggerScope = debuggerScope;
  2501. OUTPUT_VERBOSE_TRACE(Js::DebuggerPhase, L"PushDebuggerScope() - Pushed scope 0x%p of type %d.\n", m_currentDebuggerScope, m_currentDebuggerScope->scopeType);
  2502. }
  2503. // Pops the current debugger scope from the stack.
  2504. void ByteCodeWriter::PopDebuggerScope()
  2505. {
  2506. Assert(m_currentDebuggerScope);
  2507. OUTPUT_VERBOSE_TRACE(Js::DebuggerPhase, L"PopDebuggerScope() - Popped scope 0x%p of type %d.\n", m_currentDebuggerScope, m_currentDebuggerScope->scopeType);
  2508. if (m_currentDebuggerScope != nullptr)
  2509. {
  2510. m_currentDebuggerScope = m_currentDebuggerScope->GetParentScope();
  2511. }
  2512. }
  2513. DebuggerScope* ByteCodeWriter::RecordStartScopeObject(DiagExtraScopesType scopeType, RegSlot scopeLocation, int* index)
  2514. {
  2515. if (scopeLocation != Js::Constants::NoRegister)
  2516. {
  2517. scopeLocation = ConsumeReg(scopeLocation);
  2518. }
  2519. DebuggerScope* debuggerScope = m_functionWrite->RecordStartScopeObject(scopeType, m_byteCodeData.GetCurrentOffset(), scopeLocation, index);
  2520. PushDebuggerScope(debuggerScope);
  2521. return debuggerScope;
  2522. }
  2523. void ByteCodeWriter::AddPropertyToDebuggerScope(
  2524. DebuggerScope* debuggerScope,
  2525. RegSlot location,
  2526. Js::PropertyId propertyId,
  2527. bool shouldConsumeRegister /*= true*/,
  2528. DebuggerScopePropertyFlags flags /*= DebuggerScopePropertyFlags_None*/,
  2529. bool isFunctionDeclaration /*= false*/)
  2530. {
  2531. Assert(debuggerScope);
  2532. // Activation object doesn't use register and slot array location represents the
  2533. // index in the array. Only need to consume for register slots.
  2534. if (shouldConsumeRegister)
  2535. {
  2536. Assert(location != Js::Constants::NoRegister);
  2537. location = ConsumeReg(location);
  2538. }
  2539. debuggerScope->AddProperty(location, propertyId, flags);
  2540. // Only need to update properties in debug mode (even for slot array, which is tracked in non-debug mode,
  2541. // since the offset is only used for debugging).
  2542. if (this->m_isInDebugMode && isFunctionDeclaration)
  2543. {
  2544. AssertMsg(this->m_currentDebuggerScope, "Function declarations can only be added in a block scope.");
  2545. AssertMsg(debuggerScope == this->m_currentDebuggerScope
  2546. || debuggerScope == this->m_currentDebuggerScope->siblingScope,
  2547. "Function declarations should always be added to the current scope.");
  2548. // If this is a function declaration, it doesn't have a dead zone region so
  2549. // we just update its byte code initialization offset to the start of the block.
  2550. this->UpdateDebuggerPropertyInitializationOffset(
  2551. debuggerScope,
  2552. location,
  2553. propertyId,
  2554. false /*shouldConsumeRegister*/, // Register would have already been consumed above, if needed.
  2555. debuggerScope->GetStart(),
  2556. isFunctionDeclaration);
  2557. }
  2558. }
  2559. void ByteCodeWriter::RecordEndScopeObject()
  2560. {
  2561. Assert(this->m_currentDebuggerScope);
  2562. m_functionWrite->RecordEndScopeObject(this->m_currentDebuggerScope, m_byteCodeData.GetCurrentOffset() - 1);
  2563. PopDebuggerScope();
  2564. }
  2565. void ByteCodeWriter::UpdateDebuggerPropertyInitializationOffset(
  2566. Js::DebuggerScope* currentDebuggerScope,
  2567. Js::RegSlot location,
  2568. Js::PropertyId propertyId,
  2569. bool shouldConsumeRegister/* = true*/,
  2570. int byteCodeOffset/* = Constants::InvalidOffset*/,
  2571. bool isFunctionDeclaration /*= false*/)
  2572. {
  2573. #if DBG
  2574. bool isInDebugMode = m_isInDebugMode
  2575. #if DBG_DUMP
  2576. || Js::Configuration::Global.flags.Debug
  2577. #endif // DBG_DUMP
  2578. ;
  2579. AssertMsg(isInDebugMode, "Property offsets should only ever be updated in debug mode (not used in non-debug).");
  2580. #endif // DBG
  2581. Assert(currentDebuggerScope);
  2582. if (shouldConsumeRegister)
  2583. {
  2584. Assert(location != Js::Constants::NoRegister);
  2585. location = ConsumeReg(location);
  2586. }
  2587. if (byteCodeOffset == Constants::InvalidOffset)
  2588. {
  2589. // Use the current offset if no offset is passed in.
  2590. byteCodeOffset = this->m_byteCodeData.GetCurrentOffset();
  2591. }
  2592. // Search through the scope chain starting with the current up through the parents to see if the
  2593. // property can be found and updated.
  2594. while (currentDebuggerScope != nullptr)
  2595. {
  2596. if (currentDebuggerScope->UpdatePropertyInitializationOffset(location, propertyId, byteCodeOffset, isFunctionDeclaration))
  2597. {
  2598. break;
  2599. }
  2600. currentDebuggerScope = currentDebuggerScope->GetParentScope();
  2601. }
  2602. }
  2603. void ByteCodeWriter::RecordFrameDisplayRegister(RegSlot slot)
  2604. {
  2605. slot = ConsumeReg(slot);
  2606. m_functionWrite->RecordFrameDisplayRegister(slot);
  2607. }
  2608. void ByteCodeWriter::RecordObjectRegister(RegSlot slot)
  2609. {
  2610. slot = ConsumeReg(slot);
  2611. m_functionWrite->RecordObjectRegister(slot);
  2612. }
  2613. void ByteCodeWriter::RecordStatementAdjustment(FunctionBody::StatementAdjustmentType type)
  2614. {
  2615. if (m_isInDebugMode)
  2616. {
  2617. m_functionWrite->RecordStatementAdjustment(m_byteCodeData.GetCurrentOffset(), type);
  2618. }
  2619. }
  2620. void ByteCodeWriter::RecordCrossFrameEntryExitRecord(bool isEnterBlock)
  2621. {
  2622. if (m_isInDebugMode)
  2623. {
  2624. m_functionWrite->RecordCrossFrameEntryExitRecord(m_byteCodeData.GetCurrentOffset(), isEnterBlock);
  2625. }
  2626. }
  2627. void ByteCodeWriter::RecordForInOrOfCollectionScope()
  2628. {
  2629. if (m_isInDebugMode && this->m_currentDebuggerScope != nullptr)
  2630. {
  2631. this->m_currentDebuggerScope->UpdatePropertiesInForInOrOfCollectionScope();
  2632. }
  2633. }
  2634. uint ByteCodeWriter::EnterLoop(Js::ByteCodeLabel loopEntrance)
  2635. {
  2636. #ifdef BYTECODE_BRANCH_ISLAND
  2637. if (useBranchIsland)
  2638. {
  2639. // If we are going to emit a branch island, it should be before the loop header
  2640. this->EnsureLongBranch(Js::OpCode::StatementBoundary);
  2641. }
  2642. #endif
  2643. uint loopId = m_functionWrite->IncrLoopCount();
  2644. Assert((uint)m_loopHeaders->Count() == loopId);
  2645. m_loopHeaders->Add(LoopHeaderData(m_byteCodeData.GetCurrentOffset(), 0, m_loopNest > 0));
  2646. m_loopNest++;
  2647. m_functionWrite->SetHasNestedLoop(m_loopNest > 1);
  2648. Js::OpCode loopBodyOpcode = Js::OpCode::LoopBodyStart;
  2649. #if ENABLE_PROFILE_INFO
  2650. if (Js::DynamicProfileInfo::EnableImplicitCallFlags(GetFunctionWrite()))
  2651. {
  2652. this->Unsigned1(Js::OpCode::ProfiledLoopStart, loopId);
  2653. loopBodyOpcode = Js::OpCode::ProfiledLoopBodyStart;
  2654. }
  2655. #endif
  2656. this->MarkLabel(loopEntrance);
  2657. if (this->DoJitLoopBodies() || this->DoInterruptProbes())
  2658. {
  2659. this->Unsigned1(loopBodyOpcode, loopId);
  2660. }
  2661. return loopId;
  2662. }
  2663. void ByteCodeWriter::ExitLoop(uint loopId)
  2664. {
  2665. #if ENABLE_PROFILE_INFO
  2666. if (Js::DynamicProfileInfo::EnableImplicitCallFlags(GetFunctionWrite()))
  2667. {
  2668. this->Unsigned1(Js::OpCode::ProfiledLoopEnd, loopId);
  2669. }
  2670. #endif
  2671. Assert(m_loopNest > 0);
  2672. m_loopNest--;
  2673. m_loopHeaders->Item(loopId).endOffset = m_byteCodeData.GetCurrentOffset();
  2674. }
  2675. void ByteCodeWriter::IncreaseByteCodeCount()
  2676. {
  2677. m_byteCodeCount++;
  2678. if (m_loopNest > 0)
  2679. {
  2680. m_byteCodeInLoopCount++;
  2681. }
  2682. }
  2683. void ByteCodeWriter::Data::Create(uint initSize, ArenaAllocator* tmpAlloc)
  2684. {
  2685. //
  2686. // Allocate the initial byte-code block to write into.
  2687. //
  2688. tempAllocator = tmpAlloc;
  2689. AssertMsg(head == nullptr, "Missing dispose?");
  2690. currentOffset = 0;
  2691. head = Anew(tempAllocator, DataChunk, tempAllocator, initSize);
  2692. current = head;
  2693. }
  2694. void ByteCodeWriter::Data::Reset()
  2695. {
  2696. currentOffset = 0;
  2697. DataChunk* currentChunk = head;
  2698. while (currentChunk)
  2699. {
  2700. // reset to the starting point
  2701. currentChunk->Reset();
  2702. currentChunk = currentChunk->nextChunk;
  2703. }
  2704. current = head;
  2705. }
  2706. void ByteCodeWriter::Data::SetCurrent(uint offset, DataChunk* currChunk)
  2707. {
  2708. this->current = currChunk;
  2709. this->currentOffset = offset;
  2710. }
  2711. /// Copies its contents to a final contiguous section of memory.
  2712. void ByteCodeWriter::Data::Copy(Recycler* alloc, ByteBlock ** finalBlock)
  2713. {
  2714. AssertMsg(finalBlock != nullptr, "Must have valid storage");
  2715. uint cbFinalData = GetCurrentOffset();
  2716. if (cbFinalData == 0)
  2717. {
  2718. *finalBlock = nullptr;
  2719. }
  2720. else
  2721. {
  2722. ByteBlock* finalByteCodeBlock = ByteBlock::New(alloc, /*initialContent*/nullptr, cbFinalData);
  2723. DataChunk* currentChunk = head;
  2724. size_t bytesLeftToCopy = cbFinalData;
  2725. byte* currentDest = finalByteCodeBlock->GetBuffer();
  2726. while (true)
  2727. {
  2728. if (bytesLeftToCopy <= currentChunk->GetSize())
  2729. {
  2730. js_memcpy_s(currentDest, bytesLeftToCopy, currentChunk->GetBuffer(), bytesLeftToCopy);
  2731. break;
  2732. }
  2733. js_memcpy_s(currentDest, bytesLeftToCopy, currentChunk->GetBuffer(), currentChunk->GetSize());
  2734. bytesLeftToCopy -= currentChunk->GetSize();
  2735. currentDest += currentChunk->GetSize();
  2736. currentChunk = currentChunk->nextChunk;
  2737. AssertMsg(currentChunk, "We are copying more data than we have!");
  2738. }
  2739. *finalBlock = finalByteCodeBlock;
  2740. }
  2741. }
  2742. template <>
  2743. __inline uint ByteCodeWriter::Data::EncodeT<SmallLayout>(OpCode op, ByteCodeWriter* writer)
  2744. {
  2745. #ifdef BYTECODE_BRANCH_ISLAND
  2746. if (writer->useBranchIsland)
  2747. {
  2748. writer->EnsureLongBranch(op);
  2749. }
  2750. #endif
  2751. Assert(op < Js::OpCode::ByteCodeLast);
  2752. Assert(!OpCodeAttr::BackEndOnly(op));
  2753. uint offset;
  2754. if (op <= Js::OpCode::MaxByteSizedOpcodes)
  2755. {
  2756. byte byteop = (byte)op;
  2757. offset = Write(&byteop, sizeof(byte));
  2758. }
  2759. else
  2760. {
  2761. byte byteop = (byte)Js::OpCode::ExtendedOpcodePrefix;
  2762. offset = Write(&byteop, sizeof(byte));
  2763. byteop = (byte)op;
  2764. Write(&byteop, sizeof(byte));
  2765. }
  2766. if (op != Js::OpCode::Ld_A)
  2767. {
  2768. writer->m_byteCodeWithoutLDACount++;
  2769. }
  2770. writer->IncreaseByteCodeCount();
  2771. return offset;
  2772. }
  2773. template <LayoutSize layoutSize>
  2774. __inline uint ByteCodeWriter::Data::EncodeT(OpCode op, ByteCodeWriter* writer)
  2775. {
  2776. #ifdef BYTECODE_BRANCH_ISLAND
  2777. if (writer->useBranchIsland)
  2778. {
  2779. writer->EnsureLongBranch(op);
  2780. }
  2781. #endif
  2782. Assert(op < Js::OpCode::ByteCodeLast);
  2783. Assert(!OpCodeAttr::BackEndOnly(op));
  2784. Assert(OpCodeAttr::HasMultiSizeLayout(op));
  2785. CompileAssert(layoutSize != SmallLayout);
  2786. const byte exop = (byte)((op <= Js::OpCode::MaxByteSizedOpcodes) ?
  2787. (layoutSize == LargeLayout ? Js::OpCode::LargeLayoutPrefix : Js::OpCode::MediumLayoutPrefix) :
  2788. (layoutSize == LargeLayout ? Js::OpCode::ExtendedLargeLayoutPrefix : Js::OpCode::ExtendedMediumLayoutPrefix));
  2789. uint offset = Write(&exop, sizeof(byte));
  2790. Write(&op, sizeof(byte));
  2791. if (op != Js::OpCode::Ld_A)
  2792. {
  2793. writer->m_byteCodeWithoutLDACount++;
  2794. }
  2795. writer->IncreaseByteCodeCount();
  2796. return offset;
  2797. }
  2798. template <LayoutSize layoutSize>
  2799. __inline uint ByteCodeWriter::Data::EncodeT(OpCode op, const void* rawData, int byteSize, ByteCodeWriter* writer)
  2800. {
  2801. AssertMsg((rawData != nullptr) && (byteSize < 100), "Ensure valid data for opcode");
  2802. uint offset = EncodeT<layoutSize>(op, writer);
  2803. Write(rawData, byteSize);
  2804. return offset;
  2805. }
  2806. __inline uint ByteCodeWriter::Data::Encode(const void* rawData, int byteSize)
  2807. {
  2808. AssertMsg(rawData != nullptr, "Ensure valid data for opcode");
  2809. return Write(rawData, byteSize);
  2810. }
  2811. __inline uint ByteCodeWriter::Data::Write(__in_bcount(byteSize) const void* data, __in uint byteSize)
  2812. {
  2813. // Simple case where the current chunk has enough space.
  2814. uint bytesFree = current->RemainingBytes();
  2815. if (bytesFree >= byteSize)
  2816. {
  2817. current->WriteUnsafe(data, byteSize);
  2818. }
  2819. else
  2820. {
  2821. SlowWrite(data, byteSize);
  2822. }
  2823. uint offset = currentOffset;
  2824. currentOffset = offset + byteSize;
  2825. return offset;
  2826. }
  2827. /// Requires buffer extension.
  2828. __declspec(noinline) void ByteCodeWriter::Data::SlowWrite(__in_bcount(byteSize) const void* data, __in uint byteSize)
  2829. {
  2830. AssertMsg(byteSize > current->RemainingBytes(), "We should not need an extension if there is enough space in the current chunk");
  2831. uint bytesLeftToWrite = byteSize;
  2832. byte* dataToBeWritten = (byte*)data;
  2833. // the next chunk may already be created in the case that we are patching bytecode.
  2834. // If so, we want to move the pointer to the beginning of the buffer
  2835. if (current->nextChunk)
  2836. {
  2837. current->nextChunk->SetCurrentOffset(0);
  2838. }
  2839. while (true)
  2840. {
  2841. uint bytesFree = current->RemainingBytes();
  2842. if (bytesFree >= bytesLeftToWrite)
  2843. {
  2844. current->WriteUnsafe(dataToBeWritten, bytesLeftToWrite);
  2845. break;
  2846. }
  2847. current->WriteUnsafe(dataToBeWritten, bytesFree);
  2848. bytesLeftToWrite -= bytesFree;
  2849. dataToBeWritten += bytesFree;
  2850. // Create a new chunk when needed
  2851. if (!current->nextChunk)
  2852. {
  2853. AddChunk(bytesLeftToWrite);
  2854. }
  2855. current = current->nextChunk;
  2856. }
  2857. }
  2858. void ByteCodeWriter::Data::AddChunk(uint byteSize)
  2859. {
  2860. AssertMsg(current->nextChunk == nullptr, "Do we really need to grow?");
  2861. // For some data elements i.e. bytecode we have a good initial size and
  2862. // therefore, we use a conservative growth strategy - and grow by a fixed size.
  2863. uint newSize = fixedGrowthPolicy ? max(byteSize, static_cast<uint>(3 * AutoSystemInfo::PageSize)) : max(byteSize, static_cast<uint>(current->GetSize() * 2));
  2864. DataChunk* newChunk = Anew(tempAllocator, DataChunk, tempAllocator, newSize);
  2865. current->nextChunk = newChunk;
  2866. }
  2867. #if DBG_DUMP
  2868. uint ByteCodeWriter::ByteCodeDataSize()
  2869. {
  2870. return m_byteCodeData.GetCurrentOffset();
  2871. }
  2872. uint ByteCodeWriter::AuxiliaryDataSize()
  2873. {
  2874. return m_auxiliaryData.GetCurrentOffset();
  2875. }
  2876. uint ByteCodeWriter::AuxiliaryContextDataSize()
  2877. {
  2878. return m_auxContextData.GetCurrentOffset();
  2879. }
  2880. #endif
  2881. } // namespace Js