LinearScanMD.cpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "Backend.h"
  6. #include "SccLiveness.h"
  7. extern const IRType RegTypes[RegNumCount];
  8. LinearScanMD::LinearScanMD(Func *func)
  9. : helperSpillSlots(nullptr),
  10. func(func),
  11. maxOpHelperSpilledLiveranges(0)
  12. {
  13. }
  14. void
  15. LinearScanMD::Init(LinearScan *linearScan)
  16. {
  17. LinearScanMDShared::Init(linearScan);
  18. Func *func = linearScan->func;
  19. RegNum localsReg = func->GetLocalsPointer();
  20. if (localsReg != RegSP)
  21. {
  22. func->m_regsUsed.Set(localsReg);
  23. }
  24. memset(this->vfpSymTable, 0, sizeof(this->vfpSymTable));
  25. }
  26. StackSym *
  27. LinearScanMD::EnsureSpillSymForVFPReg(RegNum reg, Func *func)
  28. {
  29. Assert(REGNUM_ISVFPREG(reg));
  30. __analysis_assume(reg - RegD0 < VFP_REGCOUNT);
  31. StackSym *sym = this->vfpSymTable[reg - RegD0];
  32. if (sym == nullptr)
  33. {
  34. sym = StackSym::New(TyFloat64, func);
  35. func->StackAllocate(sym, MachRegDouble);
  36. __analysis_assume(reg - RegD0 < VFP_REGCOUNT);
  37. this->vfpSymTable[reg - RegD0] = sym;
  38. }
  39. return sym;
  40. }
  41. bool
  42. LinearScanMD::IsAllocatable(RegNum reg, Func *func) const
  43. {
  44. return reg != func->GetLocalsPointer();
  45. }
  46. BitVector
  47. LinearScanMD::FilterRegIntSizeConstraints(BitVector regsBv, BitVector sizeUsageBv) const
  48. {
  49. return regsBv;
  50. }
  51. bool
  52. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, BitVector sizeUsageBv) const
  53. {
  54. return true;
  55. }
  56. bool
  57. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, IRType type) const
  58. {
  59. return true;
  60. }
  61. void
  62. LinearScanMD::InsertOpHelperSpillAndRestores(SList<OpHelperBlock> *opHelperBlockList)
  63. {
  64. if (maxOpHelperSpilledLiveranges)
  65. {
  66. Assert(!helperSpillSlots);
  67. helperSpillSlots = AnewArrayZ(linearScan->GetTempAlloc(), StackSym *, maxOpHelperSpilledLiveranges);
  68. }
  69. FOREACH_SLIST_ENTRY(OpHelperBlock, opHelperBlock, opHelperBlockList)
  70. {
  71. InsertOpHelperSpillsAndRestores(opHelperBlock);
  72. }
  73. NEXT_SLIST_ENTRY;
  74. }
  75. void
  76. LinearScanMD::InsertOpHelperSpillsAndRestores(const OpHelperBlock& opHelperBlock)
  77. {
  78. uint32 index = 0;
  79. FOREACH_SLIST_ENTRY(OpHelperSpilledLifetime, opHelperSpilledLifetime, &opHelperBlock.spilledLifetime)
  80. {
  81. // Use the original sym as spill slot if this is an inlinee arg
  82. StackSym* sym = nullptr;
  83. if (opHelperSpilledLifetime.spillAsArg)
  84. {
  85. sym = opHelperSpilledLifetime.lifetime->sym;
  86. AnalysisAssert(sym);
  87. Assert(sym->IsAllocated());
  88. }
  89. if (RegTypes[opHelperSpilledLifetime.reg] == TyFloat64)
  90. {
  91. IR::RegOpnd * regOpnd = IR::RegOpnd::New(nullptr, opHelperSpilledLifetime.reg, TyMachDouble, this->func);
  92. if (!sym)
  93. {
  94. sym = EnsureSpillSymForVFPReg(regOpnd->GetReg(), this->func);
  95. }
  96. IR::Instr * pushInstr = IR::Instr::New(Js::OpCode::FSTR, IR::SymOpnd::New(sym, TyMachDouble, this->func), regOpnd, this->func);
  97. opHelperBlock.opHelperLabel->InsertAfter(pushInstr);
  98. pushInstr->CopyNumber(opHelperBlock.opHelperLabel);
  99. if (opHelperSpilledLifetime.reload)
  100. {
  101. IR::Instr * popInstr = IR::Instr::New(Js::OpCode::FLDR, regOpnd, IR::SymOpnd::New(sym, TyMachDouble, this->func), this->func);
  102. opHelperBlock.opHelperEndInstr->InsertBefore(popInstr);
  103. popInstr->CopyNumber(opHelperBlock.opHelperEndInstr);
  104. }
  105. }
  106. else
  107. {
  108. Assert(helperSpillSlots);
  109. Assert(index < maxOpHelperSpilledLiveranges);
  110. if (!sym)
  111. {
  112. // Lazily allocate only as many slots as we really need.
  113. if (!helperSpillSlots[index])
  114. {
  115. helperSpillSlots[index] = StackSym::New(TyMachReg, func);
  116. }
  117. sym = helperSpillSlots[index];
  118. index++;
  119. Assert(sym);
  120. func->StackAllocate(sym, MachRegInt);
  121. }
  122. IR::RegOpnd * regOpnd = IR::RegOpnd::New(sym, opHelperSpilledLifetime.reg, sym->GetType(), func);
  123. IR::Instr * saveInstr = IR::Instr::New(Js::OpCode::STR, IR::SymOpnd::New(sym, sym->GetType(), func), regOpnd, func);
  124. opHelperBlock.opHelperLabel->InsertAfter(saveInstr);
  125. saveInstr->CopyNumber(opHelperBlock.opHelperLabel);
  126. this->LegalizeDef(saveInstr);
  127. if (opHelperSpilledLifetime.reload)
  128. {
  129. IR::Instr * restoreInstr = IR::Instr::New(Js::OpCode::LDR, regOpnd, IR::SymOpnd::New(sym, sym->GetType(), func), func);
  130. opHelperBlock.opHelperEndInstr->InsertBefore(restoreInstr);
  131. restoreInstr->CopyNumber(opHelperBlock.opHelperEndInstr);
  132. this->LegalizeUse(restoreInstr, restoreInstr->GetSrc1());
  133. }
  134. }
  135. }
  136. NEXT_SLIST_ENTRY;
  137. }
  138. void
  139. LinearScanMD::EndOfHelperBlock(uint32 helperSpilledLiveranges)
  140. {
  141. if (helperSpilledLiveranges > maxOpHelperSpilledLiveranges)
  142. {
  143. maxOpHelperSpilledLiveranges = helperSpilledLiveranges;
  144. }
  145. }
  146. void
  147. LinearScanMD::LegalizeDef(IR::Instr * instr)
  148. {
  149. if (instr->m_opcode == Js::OpCode::ArgOut_A_InlineBuiltIn)
  150. {
  151. // ArgOut_A_InlineBuiltIn pseudo instruction is kept through register allocator only to use for bailout as is,
  152. // and thus it must not be changed here by legalization.
  153. // It is removed in peeps, so only place to special case it is in register allocator.
  154. return;
  155. }
  156. // Legalize opcodes, etc., but do not expand symbol/indirs with large offsets
  157. // because we can't safely do this until all loads and stores are in place.
  158. LegalizeMD::LegalizeDst(instr);
  159. }
  160. void
  161. LinearScanMD::LegalizeUse(IR::Instr * instr, IR::Opnd * opnd)
  162. {
  163. if (instr->m_opcode == Js::OpCode::ArgOut_A_InlineBuiltIn)
  164. {
  165. // ArgOut_A_InlineBuiltIn pseudo instruction is kept through register allocator only to use for bailout as is,
  166. // and thus it must not be changed here by legalization.
  167. // It is removed in peeps, so only place to special case it is in register allocator.
  168. return;
  169. }
  170. // Legalize opcodes, etc., but do not expand symbol/indirs with large offsets
  171. // because we can't safely do this until all loads and stores are in place.
  172. if (opnd == instr->GetSrc1())
  173. {
  174. LegalizeMD::LegalizeSrc(instr, opnd, 1);
  175. }
  176. else
  177. {
  178. LegalizeMD::LegalizeSrc(instr, opnd, 2);
  179. }
  180. }
  181. void
  182. LinearScanMD::GenerateBailOut(
  183. IR::Instr * instr,
  184. __in_ecount(registerSaveSymsCount) StackSym ** registerSaveSyms,
  185. uint registerSaveSymsCount)
  186. {
  187. Func *const func = instr->m_func;
  188. BailOutInfo *const bailOutInfo = instr->GetBailOutInfo();
  189. IR::Instr *firstInstr = instr->m_prev;
  190. Js::Var *const registerSaveSpace = (Js::Var*)func->GetThreadContextInfo()->GetBailOutRegisterSaveSpaceAddr();
  191. const auto LoadRegSaveSpaceIntoScratch = [&](const RegNum reg)
  192. {
  193. // Load the register save space address for the specified register into the scratch register:
  194. // ldimm SCRATCH_REG, regSaveSpace
  195. LinearScan::InsertMove(
  196. IR::RegOpnd::New(nullptr, SCRATCH_REG, TyMachPtr, func),
  197. IR::AddrOpnd::New(&registerSaveSpace[reg - 1], IR::AddrOpndKindDynamicMisc, func),
  198. instr);
  199. };
  200. const auto SaveReg = [&](const RegNum reg)
  201. {
  202. Assert(registerSaveSyms[reg - 1]);
  203. // LoadRegSaveSpaceIntoScratch(reg)
  204. // mov [SCRATCH_REG], reg
  205. LoadRegSaveSpaceIntoScratch(reg);
  206. const IRType regType = RegTypes[reg];
  207. LinearScan::InsertMove(
  208. IR::IndirOpnd::New(
  209. IR::RegOpnd::New(nullptr, SCRATCH_REG, TyMachPtr, func),
  210. 0,
  211. regType,
  212. func),
  213. IR::RegOpnd::New(registerSaveSyms[reg - 1], reg, regType, func),
  214. instr);
  215. };
  216. // Save registers used for parameters, and lr, if necessary, into the register save space
  217. if(bailOutInfo->branchConditionOpnd && registerSaveSyms[RegR1 - 1] && registerSaveSyms[RegR0 - 1])
  218. {
  219. // Save r0 and r1 with one store pair:
  220. // LoadRegSaveSpaceIntoScratch(RegR0)
  221. // STP r0, r1, [SCRATCH_REG]
  222. LoadRegSaveSpaceIntoScratch(RegR0);
  223. IR::Instr *instrSTP = IR::Instr::New(Js::OpCode::STP,
  224. IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, SCRATCH_REG, TyMachPtr, func), 0, TyMachReg, func),
  225. IR::RegOpnd::New(registerSaveSyms[RegR0 - 1], RegR0, RegTypes[RegR0], func),
  226. IR::RegOpnd::New(registerSaveSyms[RegR1 - 1], RegR1, RegTypes[RegR1], func),
  227. func);
  228. instr->InsertBefore(instrSTP);
  229. instrSTP->CopyNumber(instr);
  230. }
  231. else if(bailOutInfo->branchConditionOpnd && registerSaveSyms[RegR1 - 1])
  232. {
  233. SaveReg(RegR1);
  234. }
  235. else if(registerSaveSyms[RegR0 - 1])
  236. {
  237. SaveReg(RegR0);
  238. }
  239. if(registerSaveSyms[RegLR - 1])
  240. {
  241. SaveReg(RegLR);
  242. }
  243. if(bailOutInfo->branchConditionOpnd)
  244. {
  245. // Pass in the branch condition
  246. // mov r1, condition
  247. IR::Instr *const newInstr =
  248. LinearScan::InsertMove(
  249. IR::RegOpnd::New(nullptr, RegR1, bailOutInfo->branchConditionOpnd->GetType(), func),
  250. bailOutInfo->branchConditionOpnd,
  251. instr);
  252. linearScan->SetSrcRegs(newInstr);
  253. }
  254. if (func->IsOOPJIT())
  255. {
  256. // ldimm r0, dataAddr
  257. intptr_t nativeDataAddr = func->GetWorkItem()->GetWorkItemData()->nativeDataAddr;
  258. IR::RegOpnd * r0 = IR::RegOpnd::New(nullptr, RegR0, TyMachPtr, func);
  259. LinearScan::InsertMove(r0, IR::AddrOpnd::New(nativeDataAddr, IR::AddrOpndKindDynamicNativeCodeDataRef, func), instr);
  260. // mov r0, [r0]
  261. LinearScan::InsertMove(r0, IR::IndirOpnd::New(r0, 0, TyMachPtr, func), instr);
  262. // lea r0, [r0 + bailoutRecord_offset]
  263. unsigned int bailoutRecordOffset = NativeCodeData::GetDataTotalOffset(bailOutInfo->bailOutRecord);
  264. LinearScan::InsertLea(
  265. r0,
  266. IR::IndirOpnd::New(r0, bailoutRecordOffset, TyUint32,
  267. #if DBG
  268. NativeCodeData::GetDataDescription(bailOutInfo->bailOutRecord, func->m_alloc),
  269. #endif
  270. this->func), instr);
  271. }
  272. else
  273. {
  274. // Pass in the bailout record
  275. // ldimm r0, bailOutRecord
  276. LinearScan::InsertMove(
  277. IR::RegOpnd::New(nullptr, RegR0, TyMachPtr, func),
  278. IR::AddrOpnd::New(bailOutInfo->bailOutRecord, IR::AddrOpndKindDynamicBailOutRecord, func, true),
  279. instr);
  280. }
  281. firstInstr = firstInstr->m_next;
  282. for(uint i = 0; i < registerSaveSymsCount; i++)
  283. {
  284. StackSym *const stackSym = registerSaveSyms[i];
  285. if(!stackSym)
  286. {
  287. continue;
  288. }
  289. // Record the use on the lifetime in case it spilled afterwards. Spill loads will be inserted before 'firstInstr', that
  290. // is, before the register saves are done.
  291. this->linearScan->RecordUse(stackSym->scratch.linearScan.lifetime, firstInstr, nullptr, true);
  292. }
  293. // Load the bailout target into lr
  294. // ldimm lr, BailOut
  295. // blx lr
  296. Assert(instr->GetSrc1()->IsHelperCallOpnd());
  297. LinearScan::InsertMove(IR::RegOpnd::New(nullptr, RegLR, TyMachPtr, func), instr->GetSrc1(), instr);
  298. instr->ReplaceSrc1(IR::RegOpnd::New(nullptr, RegLR, TyMachPtr, func));
  299. }
  300. uint LinearScanMD::GetRegisterSaveIndex(RegNum reg)
  301. {
  302. return reg;
  303. }
  304. // static
  305. RegNum LinearScanMD::GetRegisterFromSaveIndex(uint offset)
  306. {
  307. return (RegNum)offset;
  308. }
  309. RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func)
  310. {
  311. /* TODO - Add ARM32 support according to register calling convention */
  312. return RegNOREG;
  313. }