LinearScanMD.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "BackEnd.h"
  6. #include "SCCLiveness.h"
  7. extern const IRType RegTypes[RegNumCount];
  8. LinearScanMD::LinearScanMD(Func *func)
  9. : helperSpillSlots(nullptr),
  10. maxOpHelperSpilledLiveranges(0),
  11. func(func)
  12. {
  13. this->byteableRegsBv.ClearAll();
  14. FOREACH_REG(reg)
  15. {
  16. if (LinearScan::GetRegAttribs(reg) & RA_BYTEABLE)
  17. {
  18. this->byteableRegsBv.Set(reg);
  19. }
  20. } NEXT_REG;
  21. memset(this->xmmSymTable128, 0, sizeof(this->xmmSymTable128));
  22. memset(this->xmmSymTable64, 0, sizeof(this->xmmSymTable64));
  23. memset(this->xmmSymTable32, 0, sizeof(this->xmmSymTable32));
  24. }
  25. BitVector
  26. LinearScanMD::FilterRegIntSizeConstraints(BitVector regsBv, BitVector sizeUsageBv) const
  27. {
  28. // Requires byte-able reg?
  29. if (sizeUsageBv.Test(1))
  30. {
  31. regsBv.And(this->byteableRegsBv);
  32. }
  33. return regsBv;
  34. }
  35. bool
  36. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, BitVector sizeUsageBv) const
  37. {
  38. // Requires byte-able reg?
  39. return !sizeUsageBv.Test(1) || this->byteableRegsBv.Test(reg);
  40. }
  41. bool
  42. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, IRType type) const
  43. {
  44. // Requires byte-able reg?
  45. return TySize[type] != 1 || this->byteableRegsBv.Test(reg);
  46. }
  47. StackSym *
  48. LinearScanMD::EnsureSpillSymForXmmReg(RegNum reg, Func *func, IRType type)
  49. {
  50. Assert(REGNUM_ISXMMXREG(reg));
  51. __analysis_assume(reg - FIRST_XMM_REG < XMM_REGCOUNT);
  52. StackSym *sym;
  53. if (type == TyFloat32)
  54. {
  55. sym = this->xmmSymTable32[reg - FIRST_XMM_REG];
  56. }
  57. else if (type == TyFloat64)
  58. {
  59. sym = this->xmmSymTable64[reg - FIRST_XMM_REG];
  60. }
  61. else
  62. {
  63. Assert(IRType_IsSimd128(type));
  64. sym = this->xmmSymTable128[reg - FIRST_XMM_REG];
  65. }
  66. if (sym == nullptr)
  67. {
  68. sym = StackSym::New(type, func);
  69. func->StackAllocate(sym, TySize[type]);
  70. __analysis_assume(reg - FIRST_XMM_REG < XMM_REGCOUNT);
  71. if (type == TyFloat32)
  72. {
  73. this->xmmSymTable32[reg - FIRST_XMM_REG] = sym;
  74. }
  75. else if (type == TyFloat64)
  76. {
  77. this->xmmSymTable64[reg - FIRST_XMM_REG] = sym;
  78. }
  79. else
  80. {
  81. Assert(IRType_IsSimd128(type));
  82. this->xmmSymTable128[reg - FIRST_XMM_REG] = sym;
  83. }
  84. }
  85. return sym;
  86. }
  87. void
  88. LinearScanMD::LegalizeConstantUse(IR::Instr * instr, IR::Opnd * opnd)
  89. {
  90. Assert(opnd->IsAddrOpnd() || opnd->IsIntConstOpnd());
  91. intptr value = opnd->IsAddrOpnd() ? (intptr)opnd->AsAddrOpnd()->m_address : opnd->AsIntConstOpnd()->GetValue();
  92. if (value == 0
  93. && instr->m_opcode == Js::OpCode::MOV
  94. && !instr->GetDst()->IsRegOpnd()
  95. && TySize[opnd->GetType()] >= 4)
  96. {
  97. Assert(this->linearScan->instrUseRegs.IsEmpty());
  98. // MOV doesn't have a imm8 encoding for 32-bit/64-bit assignment, so if we have a register available,
  99. // we should hoist it and generate xor reg, reg and MOV dst, reg
  100. BitVector regsBv;
  101. regsBv.Copy(this->linearScan->activeRegs);
  102. regsBv.ComplimentAll();
  103. regsBv.And(this->linearScan->int32Regs);
  104. regsBv.Minus(this->linearScan->tempRegs); // Avoid tempRegs
  105. BVIndex regIndex = regsBv.GetNextBit();
  106. if (regIndex != BVInvalidIndex)
  107. {
  108. instr->HoistSrc1(Js::OpCode::MOV, (RegNum)regIndex);
  109. this->linearScan->instrUseRegs.Set(regIndex);
  110. this->func->m_regsUsed.Set(regIndex);
  111. // If we are in a loop, we need to mark the register being used by the loop so that
  112. // reload to that register will not be hoisted out of the loop
  113. this->linearScan->RecordLoopUse(nullptr, (RegNum)regIndex);
  114. }
  115. }
  116. }
  117. void
  118. LinearScanMD::InsertOpHelperSpillAndRestores(SList<OpHelperBlock> *opHelperBlockList)
  119. {
  120. if (maxOpHelperSpilledLiveranges)
  121. {
  122. Assert(!helperSpillSlots);
  123. helperSpillSlots = AnewArrayZ(linearScan->GetTempAlloc(), StackSym *, maxOpHelperSpilledLiveranges);
  124. }
  125. FOREACH_SLIST_ENTRY(OpHelperBlock, opHelperBlock, opHelperBlockList)
  126. {
  127. InsertOpHelperSpillsAndRestores(opHelperBlock);
  128. }
  129. NEXT_SLIST_ENTRY;
  130. }
  131. void
  132. LinearScanMD::InsertOpHelperSpillsAndRestores(const OpHelperBlock& opHelperBlock)
  133. {
  134. uint32 index = 0;
  135. FOREACH_SLIST_ENTRY(OpHelperSpilledLifetime, opHelperSpilledLifetime, &opHelperBlock.spilledLifetime)
  136. {
  137. // Use the original sym as spill slot if this is an inlinee arg
  138. StackSym* sym = nullptr;
  139. if (opHelperSpilledLifetime.spillAsArg)
  140. {
  141. sym = opHelperSpilledLifetime.lifetime->sym;
  142. AnalysisAssert(sym);
  143. Assert(sym->IsAllocated());
  144. }
  145. if (RegTypes[opHelperSpilledLifetime.reg] == TyFloat64)
  146. {
  147. IRType type = opHelperSpilledLifetime.lifetime->sym->GetType();
  148. IR::RegOpnd *regOpnd = IR::RegOpnd::New(nullptr, opHelperSpilledLifetime.reg, type, this->func);
  149. if (!sym)
  150. {
  151. sym = EnsureSpillSymForXmmReg(regOpnd->GetReg(), this->func, type);
  152. }
  153. IR::Instr *pushInstr = IR::Instr::New(LowererMDArch::GetAssignOp(type), IR::SymOpnd::New(sym, type, this->func), regOpnd, this->func);
  154. opHelperBlock.opHelperLabel->InsertAfter(pushInstr);
  155. pushInstr->CopyNumber(opHelperBlock.opHelperLabel);
  156. if (opHelperSpilledLifetime.reload)
  157. {
  158. IR::Instr *popInstr = IR::Instr::New(LowererMDArch::GetAssignOp(type), regOpnd, IR::SymOpnd::New(sym, type, this->func), this->func);
  159. opHelperBlock.opHelperEndInstr->InsertBefore(popInstr);
  160. popInstr->CopyNumber(opHelperBlock.opHelperEndInstr);
  161. }
  162. }
  163. else
  164. {
  165. Assert(helperSpillSlots);
  166. Assert(index < maxOpHelperSpilledLiveranges);
  167. if (!sym)
  168. {
  169. // Lazily allocate only as many slots as we really need.
  170. if (!helperSpillSlots[index])
  171. {
  172. helperSpillSlots[index] = StackSym::New(TyMachReg, func);
  173. }
  174. sym = helperSpillSlots[index];
  175. index++;
  176. Assert(sym);
  177. func->StackAllocate(sym, MachRegInt);
  178. }
  179. IR::RegOpnd * regOpnd = IR::RegOpnd::New(nullptr, opHelperSpilledLifetime.reg, sym->GetType(), func);
  180. LowererMD::CreateAssign(IR::SymOpnd::New(sym, sym->GetType(), func), regOpnd, opHelperBlock.opHelperLabel->m_next);
  181. if (opHelperSpilledLifetime.reload)
  182. {
  183. LowererMD::CreateAssign(regOpnd, IR::SymOpnd::New(sym, sym->GetType(), func), opHelperBlock.opHelperEndInstr);
  184. }
  185. }
  186. }
  187. NEXT_SLIST_ENTRY;
  188. }
  189. void
  190. LinearScanMD::EndOfHelperBlock(uint32 helperSpilledLiveranges)
  191. {
  192. if (helperSpilledLiveranges > maxOpHelperSpilledLiveranges)
  193. {
  194. maxOpHelperSpilledLiveranges = helperSpilledLiveranges;
  195. }
  196. }
  197. void
  198. LinearScanMD::GenerateBailOut(IR::Instr * instr, __in_ecount(registerSaveSymsCount) StackSym ** registerSaveSyms, uint registerSaveSymsCount)
  199. {
  200. Func *const func = instr->m_func;
  201. BailOutInfo *const bailOutInfo = instr->GetBailOutInfo();
  202. IR::Instr *firstInstr = instr->m_prev;
  203. // Save registers used for parameters, and rax, if necessary, into the shadow space allocated for register parameters:
  204. // mov [rsp + 16], rdx
  205. // mov [rsp + 8], rcx
  206. // mov [rsp], rax
  207. for(RegNum reg = bailOutInfo->branchConditionOpnd ? RegRDX : RegRCX;
  208. reg != RegNOREG;
  209. reg = static_cast<RegNum>(reg - 1))
  210. {
  211. StackSym *const stackSym = registerSaveSyms[reg - 1];
  212. if(!stackSym)
  213. {
  214. continue;
  215. }
  216. const IRType regType = RegTypes[reg];
  217. Lowerer::InsertMove(
  218. IR::SymOpnd::New(func->m_symTable->GetArgSlotSym(static_cast<Js::ArgSlot>(reg)), regType, func),
  219. IR::RegOpnd::New(stackSym, reg, regType, func),
  220. instr);
  221. }
  222. if(bailOutInfo->branchConditionOpnd)
  223. {
  224. // Pass in the branch condition
  225. // mov rdx, condition
  226. IR::Instr *const newInstr =
  227. Lowerer::InsertMove(
  228. IR::RegOpnd::New(nullptr, RegRDX, bailOutInfo->branchConditionOpnd->GetType(), func),
  229. bailOutInfo->branchConditionOpnd,
  230. instr);
  231. linearScan->SetSrcRegs(newInstr);
  232. }
  233. // Pass in the bailout record
  234. // mov rcx, bailOutRecord
  235. Lowerer::InsertMove(
  236. IR::RegOpnd::New(nullptr, RegRCX, TyMachPtr, func),
  237. IR::AddrOpnd::New(bailOutInfo->bailOutRecord, IR::AddrOpndKindDynamicBailOutRecord, func, true),
  238. instr);
  239. firstInstr = firstInstr->m_next;
  240. for(uint i = 0; i < registerSaveSymsCount; i++)
  241. {
  242. StackSym *const stackSym = registerSaveSyms[i];
  243. if(!stackSym)
  244. {
  245. continue;
  246. }
  247. // Record the use on the lifetime in case it spilled afterwards. Spill loads will be inserted before 'firstInstr', that
  248. // is, before the register saves are done.
  249. this->linearScan->RecordUse(stackSym->scratch.linearScan.lifetime, firstInstr, nullptr, true);
  250. }
  251. // Load the bailout target into rax
  252. // mov rax, BailOut
  253. // call rax
  254. Assert(instr->GetSrc1()->IsHelperCallOpnd());
  255. Lowerer::InsertMove(IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, func), instr->GetSrc1(), instr);
  256. instr->ReplaceSrc1(IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, func));
  257. }
  258. // Gets the InterpreterStackFrame pointer into RAX.
  259. // Restores the live stack locations followed by the live registers from
  260. // the interpreter's register slots.
  261. // RecordDefs each live register that is restored.
  262. //
  263. // Generates the following code:
  264. //
  265. // MOV rax, param0
  266. // MOV rax, [rax + JavascriptGenerator::GetFrameOffset()]
  267. //
  268. // for each live stack location, sym
  269. //
  270. // MOV rcx, [rax + regslot offset]
  271. // MOV sym(stack location), rcx
  272. //
  273. // for each live register, sym (rax is restore last if it is live)
  274. //
  275. // MOV sym(register), [rax + regslot offset]
  276. //
  277. IR::Instr *
  278. LinearScanMD::GenerateBailInForGeneratorYield(IR::Instr * resumeLabelInstr, BailOutInfo * bailOutInfo)
  279. {
  280. IR::Instr * instrAfter = resumeLabelInstr->m_next;
  281. IR::RegOpnd * raxRegOpnd = IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, this->func);
  282. IR::RegOpnd * rcxRegOpnd = IR::RegOpnd::New(nullptr, RegRCX, TyVar, this->func);
  283. StackSym * sym = StackSym::NewParamSlotSym(1, this->func);
  284. this->func->SetArgOffset(sym, LowererMD::GetFormalParamOffset() * MachPtr);
  285. IR::SymOpnd * symOpnd = IR::SymOpnd::New(sym, TyMachPtr, this->func);
  286. LinearScan::InsertMove(raxRegOpnd, symOpnd, instrAfter);
  287. IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(raxRegOpnd, Js::JavascriptGenerator::GetFrameOffset(), TyMachPtr, this->func);
  288. LinearScan::InsertMove(raxRegOpnd, indirOpnd, instrAfter);
  289. // rax points to the frame, restore stack syms and registers except rax, restore rax last
  290. IR::Instr * raxRestoreInstr = nullptr;
  291. IR::Instr * instrInsertStackSym = instrAfter;
  292. IR::Instr * instrInsertRegSym = instrAfter;
  293. Assert(bailOutInfo->capturedValues.constantValues.Empty());
  294. Assert(bailOutInfo->capturedValues.copyPropSyms.Empty());
  295. Assert(bailOutInfo->liveLosslessInt32Syms->IsEmpty());
  296. Assert(bailOutInfo->liveFloat64Syms->IsEmpty());
  297. auto restoreSymFn = [this, &raxRegOpnd, &rcxRegOpnd, &raxRestoreInstr, &instrInsertStackSym, &instrInsertRegSym](Js::RegSlot regSlot, StackSym* stackSym)
  298. {
  299. Assert(stackSym->IsVar());
  300. int32 offset = regSlot * sizeof(Js::Var) + Js::InterpreterStackFrame::GetOffsetOfLocals();
  301. IR::Opnd * srcOpnd = IR::IndirOpnd::New(raxRegOpnd, offset, stackSym->GetType(), this->func);
  302. Lifetime * lifetime = stackSym->scratch.linearScan.lifetime;
  303. if (lifetime->isSpilled)
  304. {
  305. // stack restores require an extra register since we can't move an indir directly to an indir on amd64
  306. IR::SymOpnd * dstOpnd = IR::SymOpnd::New(stackSym, stackSym->GetType(), this->func);
  307. LinearScan::InsertMove(rcxRegOpnd, srcOpnd, instrInsertStackSym);
  308. LinearScan::InsertMove(dstOpnd, rcxRegOpnd, instrInsertStackSym);
  309. }
  310. else
  311. {
  312. // register restores must come after stack restores so that we have RAX and RCX free to
  313. // use for stack restores and further RAX must be restored last since it holds the
  314. // pointer to the InterpreterStackFrame from which we are restoring values.
  315. // We must also track these restores using RecordDef in case the symbols are spilled.
  316. IR::RegOpnd * dstRegOpnd = IR::RegOpnd::New(stackSym, stackSym->GetType(), this->func);
  317. dstRegOpnd->SetReg(lifetime->reg);
  318. IR::Instr * instr = LinearScan::InsertMove(dstRegOpnd, srcOpnd, instrInsertRegSym);
  319. if (instrInsertRegSym == instrInsertStackSym)
  320. {
  321. // this is the first register sym, make sure we don't insert stack stores
  322. // after this instruction so we can ensure rax and rcx remain free to use
  323. // for restoring spilled stack syms.
  324. instrInsertStackSym = instr;
  325. }
  326. if (lifetime->reg == RegRAX)
  327. {
  328. // ensure rax is restored last
  329. Assert(instrInsertRegSym != instrInsertStackSym);
  330. instrInsertRegSym = instr;
  331. if (raxRestoreInstr != nullptr)
  332. {
  333. AssertMsg(false, "this is unexpected until copy prop is enabled");
  334. // rax was mapped to multiple bytecode registers. Obviously only the first
  335. // restore we do will work so change all following stores to `mov rax, rax`.
  336. // We still need to keep them around for RecordDef in case the corresponding
  337. // dst sym is spilled later on.
  338. raxRestoreInstr->FreeSrc1();
  339. raxRestoreInstr->SetSrc1(raxRegOpnd);
  340. }
  341. raxRestoreInstr = instr;
  342. }
  343. this->linearScan->RecordDef(lifetime, instr, 0);
  344. }
  345. };
  346. FOREACH_BITSET_IN_SPARSEBV(symId, bailOutInfo->byteCodeUpwardExposedUsed)
  347. {
  348. StackSym* stackSym = this->func->m_symTable->FindStackSym(symId);
  349. restoreSymFn(stackSym->GetByteCodeRegSlot(), stackSym);
  350. }
  351. NEXT_BITSET_IN_SPARSEBV;
  352. if (bailOutInfo->capturedValues.argObjSyms)
  353. {
  354. FOREACH_BITSET_IN_SPARSEBV(symId, bailOutInfo->capturedValues.argObjSyms)
  355. {
  356. StackSym* stackSym = this->func->m_symTable->FindStackSym(symId);
  357. restoreSymFn(stackSym->GetByteCodeRegSlot(), stackSym);
  358. }
  359. NEXT_BITSET_IN_SPARSEBV;
  360. }
  361. Js::RegSlot localsCount = this->func->GetJnFunction()->GetLocalsCount();
  362. bailOutInfo->IterateArgOutSyms([localsCount, &restoreSymFn](uint, uint argOutSlotOffset, StackSym* sym) {
  363. restoreSymFn(localsCount + argOutSlotOffset, sym);
  364. });
  365. return instrAfter;
  366. }
  367. uint LinearScanMD::GetRegisterSaveIndex(RegNum reg)
  368. {
  369. if (RegTypes[reg] == TyFloat64)
  370. {
  371. // make room for maximum XMM reg size
  372. Assert(reg >= RegXMM0);
  373. return (reg - RegXMM0) * (sizeof(SIMDValue) / sizeof(Js::Var)) + RegXMM0;
  374. }
  375. else
  376. {
  377. return reg;
  378. }
  379. }
  380. RegNum LinearScanMD::GetRegisterFromSaveIndex(uint offset)
  381. {
  382. return (RegNum)(offset >= RegXMM0 ? (offset - RegXMM0) / (sizeof(SIMDValue) / sizeof(Js::Var)) + RegXMM0 : offset);
  383. }