LinearScanMD.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "Backend.h"
  6. #include "SccLiveness.h"
  7. extern const IRType RegTypes[RegNumCount];
  8. LinearScanMD::LinearScanMD(Func *func)
  9. : helperSpillSlots(nullptr),
  10. maxOpHelperSpilledLiveranges(0),
  11. func(func)
  12. {
  13. this->byteableRegsBv.ClearAll();
  14. FOREACH_REG(reg)
  15. {
  16. if (LinearScan::GetRegAttribs(reg) & RA_BYTEABLE)
  17. {
  18. this->byteableRegsBv.Set(reg);
  19. }
  20. } NEXT_REG;
  21. memset(this->xmmSymTable128, 0, sizeof(this->xmmSymTable128));
  22. memset(this->xmmSymTable64, 0, sizeof(this->xmmSymTable64));
  23. memset(this->xmmSymTable32, 0, sizeof(this->xmmSymTable32));
  24. }
  25. BitVector
  26. LinearScanMD::FilterRegIntSizeConstraints(BitVector regsBv, BitVector sizeUsageBv) const
  27. {
  28. // Requires byte-able reg?
  29. if (sizeUsageBv.Test(1))
  30. {
  31. regsBv.And(this->byteableRegsBv);
  32. }
  33. return regsBv;
  34. }
  35. bool
  36. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, BitVector sizeUsageBv) const
  37. {
  38. // Requires byte-able reg?
  39. return !sizeUsageBv.Test(1) || this->byteableRegsBv.Test(reg);
  40. }
  41. bool
  42. LinearScanMD::FitRegIntSizeConstraints(RegNum reg, IRType type) const
  43. {
  44. // Requires byte-able reg?
  45. return TySize[type] != 1 || this->byteableRegsBv.Test(reg);
  46. }
  47. StackSym *
  48. LinearScanMD::EnsureSpillSymForXmmReg(RegNum reg, Func *func, IRType type)
  49. {
  50. Assert(REGNUM_ISXMMXREG(reg));
  51. __analysis_assume(reg - FIRST_XMM_REG < XMM_REGCOUNT);
  52. StackSym *sym;
  53. if (type == TyFloat32)
  54. {
  55. sym = this->xmmSymTable32[reg - FIRST_XMM_REG];
  56. }
  57. else if (type == TyFloat64)
  58. {
  59. sym = this->xmmSymTable64[reg - FIRST_XMM_REG];
  60. }
  61. else
  62. {
  63. Assert(IRType_IsSimd128(type));
  64. sym = this->xmmSymTable128[reg - FIRST_XMM_REG];
  65. }
  66. if (sym == nullptr)
  67. {
  68. sym = StackSym::New(type, func);
  69. func->StackAllocate(sym, TySize[type]);
  70. __analysis_assume(reg - FIRST_XMM_REG < XMM_REGCOUNT);
  71. if (type == TyFloat32)
  72. {
  73. this->xmmSymTable32[reg - FIRST_XMM_REG] = sym;
  74. }
  75. else if (type == TyFloat64)
  76. {
  77. this->xmmSymTable64[reg - FIRST_XMM_REG] = sym;
  78. }
  79. else
  80. {
  81. Assert(IRType_IsSimd128(type));
  82. this->xmmSymTable128[reg - FIRST_XMM_REG] = sym;
  83. }
  84. }
  85. return sym;
  86. }
  87. void
  88. LinearScanMD::LegalizeConstantUse(IR::Instr * instr, IR::Opnd * opnd)
  89. {
  90. Assert(opnd->IsAddrOpnd() || opnd->IsIntConstOpnd());
  91. intptr_t value = opnd->IsAddrOpnd() ? (intptr_t)opnd->AsAddrOpnd()->m_address : opnd->AsIntConstOpnd()->GetValue();
  92. if (value == 0
  93. && instr->m_opcode == Js::OpCode::MOV
  94. && !instr->GetDst()->IsRegOpnd()
  95. && TySize[opnd->GetType()] >= 4)
  96. {
  97. Assert(this->linearScan->instrUseRegs.IsEmpty());
  98. // MOV doesn't have an imm8 encoding for 32-bit/64-bit assignment, so if we have a register available,
  99. // we should hoist it and generate xor reg, reg and MOV dst, reg
  100. BitVector regsBv;
  101. regsBv.Copy(this->linearScan->activeRegs);
  102. regsBv.Or(this->linearScan->callSetupRegs);
  103. regsBv.ComplimentAll();
  104. regsBv.And(this->linearScan->int32Regs);
  105. regsBv.Minus(this->linearScan->tempRegs); // Avoid tempRegs
  106. BVIndex regIndex = regsBv.GetNextBit();
  107. if (regIndex != BVInvalidIndex)
  108. {
  109. instr->HoistSrc1(Js::OpCode::MOV, (RegNum)regIndex);
  110. this->linearScan->instrUseRegs.Set(regIndex);
  111. this->func->m_regsUsed.Set(regIndex);
  112. // If we are in a loop, we need to mark the register being used by the loop so that
  113. // reload to that register will not be hoisted out of the loop
  114. this->linearScan->RecordLoopUse(nullptr, (RegNum)regIndex);
  115. }
  116. }
  117. }
  118. void
  119. LinearScanMD::InsertOpHelperSpillAndRestores(SList<OpHelperBlock> *opHelperBlockList)
  120. {
  121. if (maxOpHelperSpilledLiveranges)
  122. {
  123. Assert(!helperSpillSlots);
  124. helperSpillSlots = AnewArrayZ(linearScan->GetTempAlloc(), StackSym *, maxOpHelperSpilledLiveranges);
  125. }
  126. FOREACH_SLIST_ENTRY(OpHelperBlock, opHelperBlock, opHelperBlockList)
  127. {
  128. InsertOpHelperSpillsAndRestores(opHelperBlock);
  129. }
  130. NEXT_SLIST_ENTRY;
  131. }
  132. void
  133. LinearScanMD::InsertOpHelperSpillsAndRestores(const OpHelperBlock& opHelperBlock)
  134. {
  135. uint32 index = 0;
  136. FOREACH_SLIST_ENTRY(OpHelperSpilledLifetime, opHelperSpilledLifetime, &opHelperBlock.spilledLifetime)
  137. {
  138. // Use the original sym as spill slot if this is an inlinee arg
  139. StackSym* sym = nullptr;
  140. if (opHelperSpilledLifetime.spillAsArg)
  141. {
  142. sym = opHelperSpilledLifetime.lifetime->sym;
  143. AnalysisAssert(sym);
  144. Assert(sym->IsAllocated());
  145. }
  146. if (RegTypes[opHelperSpilledLifetime.reg] == TyFloat64)
  147. {
  148. IRType type = opHelperSpilledLifetime.lifetime->sym->GetType();
  149. IR::RegOpnd *regOpnd = IR::RegOpnd::New(nullptr, opHelperSpilledLifetime.reg, type, this->func);
  150. if (!sym)
  151. {
  152. sym = EnsureSpillSymForXmmReg(regOpnd->GetReg(), this->func, type);
  153. }
  154. IR::Instr *pushInstr = IR::Instr::New(LowererMDArch::GetAssignOp(type), IR::SymOpnd::New(sym, type, this->func), regOpnd, this->func);
  155. opHelperBlock.opHelperLabel->InsertAfter(pushInstr);
  156. pushInstr->CopyNumber(opHelperBlock.opHelperLabel);
  157. if (opHelperSpilledLifetime.reload)
  158. {
  159. IR::Instr *popInstr = IR::Instr::New(LowererMDArch::GetAssignOp(type), regOpnd, IR::SymOpnd::New(sym, type, this->func), this->func);
  160. opHelperBlock.opHelperEndInstr->InsertBefore(popInstr);
  161. popInstr->CopyNumber(opHelperBlock.opHelperEndInstr);
  162. }
  163. }
  164. else
  165. {
  166. Assert(helperSpillSlots);
  167. Assert(index < maxOpHelperSpilledLiveranges);
  168. if (!sym)
  169. {
  170. // Lazily allocate only as many slots as we really need.
  171. if (!helperSpillSlots[index])
  172. {
  173. helperSpillSlots[index] = StackSym::New(TyMachReg, func);
  174. }
  175. sym = helperSpillSlots[index];
  176. index++;
  177. Assert(sym);
  178. func->StackAllocate(sym, MachRegInt);
  179. }
  180. IR::RegOpnd * regOpnd = IR::RegOpnd::New(nullptr, opHelperSpilledLifetime.reg, sym->GetType(), func);
  181. LowererMD::CreateAssign(IR::SymOpnd::New(sym, sym->GetType(), func), regOpnd, opHelperBlock.opHelperLabel->m_next);
  182. if (opHelperSpilledLifetime.reload)
  183. {
  184. LowererMD::CreateAssign(regOpnd, IR::SymOpnd::New(sym, sym->GetType(), func), opHelperBlock.opHelperEndInstr);
  185. }
  186. }
  187. }
  188. NEXT_SLIST_ENTRY;
  189. }
  190. void
  191. LinearScanMD::EndOfHelperBlock(uint32 helperSpilledLiveranges)
  192. {
  193. if (helperSpilledLiveranges > maxOpHelperSpilledLiveranges)
  194. {
  195. maxOpHelperSpilledLiveranges = helperSpilledLiveranges;
  196. }
  197. }
  198. void
  199. LinearScanMD::GenerateBailOut(IR::Instr * instr, __in_ecount(registerSaveSymsCount) StackSym ** registerSaveSyms, uint registerSaveSymsCount)
  200. {
  201. Func *const func = instr->m_func;
  202. BailOutInfo *const bailOutInfo = instr->GetBailOutInfo();
  203. IR::Instr *firstInstr = instr->m_prev;
  204. // Code analysis doesn't do inter-procesure analysis and cannot infer the value of registerSaveSymsCount,
  205. // but the passed in registerSaveSymsCount is static value RegNumCount-1, so reg-1 in below loop is always a valid index.
  206. __analysis_assume(static_cast<int>(registerSaveSymsCount) == static_cast<int>(RegNumCount-1));
  207. Assert(static_cast<int>(registerSaveSymsCount) == static_cast<int>(RegNumCount-1));
  208. // Save registers used for parameters, and rax, if necessary, into the shadow space allocated for register parameters:
  209. // mov [rsp + 16], RegArg1 (if branchConditionOpnd)
  210. // mov [rsp + 8], RegArg0
  211. // mov [rsp], rax
  212. const RegNum regs[3] = { RegRAX, RegArg0, RegArg1 };
  213. for (int i = (bailOutInfo->branchConditionOpnd ? 2 : 1); i >= 0; i--)
  214. {
  215. RegNum reg = regs[i];
  216. StackSym *const stackSym = registerSaveSyms[reg - 1];
  217. if(!stackSym)
  218. {
  219. continue;
  220. }
  221. const IRType regType = RegTypes[reg];
  222. Lowerer::InsertMove(
  223. IR::SymOpnd::New(func->m_symTable->GetArgSlotSym(static_cast<Js::ArgSlot>(i + 1)), regType, func),
  224. IR::RegOpnd::New(stackSym, reg, regType, func),
  225. instr);
  226. }
  227. if(bailOutInfo->branchConditionOpnd)
  228. {
  229. // Pass in the branch condition
  230. // mov RegArg1, condition
  231. IR::Instr *const newInstr =
  232. Lowerer::InsertMove(
  233. IR::RegOpnd::New(nullptr, RegArg1, bailOutInfo->branchConditionOpnd->GetType(), func),
  234. bailOutInfo->branchConditionOpnd,
  235. instr);
  236. linearScan->SetSrcRegs(newInstr);
  237. }
  238. if (!func->IsOOPJIT())
  239. {
  240. // Pass in the bailout record
  241. // mov RegArg0, bailOutRecord
  242. Lowerer::InsertMove(
  243. IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func),
  244. IR::AddrOpnd::New(bailOutInfo->bailOutRecord, IR::AddrOpndKindDynamicBailOutRecord, func, true),
  245. instr);
  246. }
  247. else
  248. {
  249. // move RegArg0, dataAddr
  250. Lowerer::InsertMove(
  251. IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func),
  252. IR::AddrOpnd::New(func->GetWorkItem()->GetWorkItemData()->nativeDataAddr, IR::AddrOpndKindDynamicNativeCodeDataRef, func),
  253. instr);
  254. // mov RegArg0, [RegArg0]
  255. Lowerer::InsertMove(
  256. IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, func),
  257. IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func), 0, TyMachPtr, func),
  258. instr);
  259. // lea RegArg0, [RegArg0 + bailoutRecord_offset]
  260. int bailoutRecordOffset = NativeCodeData::GetDataTotalOffset(bailOutInfo->bailOutRecord);
  261. Lowerer::InsertLea(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func),
  262. IR::IndirOpnd::New(IR::RegOpnd::New(nullptr, RegArg0, TyVar, this->func), bailoutRecordOffset, TyMachPtr,
  263. #if DBG
  264. NativeCodeData::GetDataDescription(bailOutInfo->bailOutRecord, func->m_alloc),
  265. #endif
  266. this->func), instr);
  267. }
  268. firstInstr = firstInstr->m_next;
  269. for(uint i = 0; i < registerSaveSymsCount; i++)
  270. {
  271. StackSym *const stackSym = registerSaveSyms[i];
  272. if(!stackSym)
  273. {
  274. continue;
  275. }
  276. // Record the use on the lifetime in case it spilled afterwards. Spill loads will be inserted before 'firstInstr', that
  277. // is, before the register saves are done.
  278. this->linearScan->RecordUse(stackSym->scratch.linearScan.lifetime, firstInstr, nullptr, true);
  279. }
  280. // Load the bailout target into rax
  281. // mov rax, BailOut
  282. // call rax
  283. Assert(instr->GetSrc1()->IsHelperCallOpnd());
  284. Lowerer::InsertMove(IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, func), instr->GetSrc1(), instr);
  285. instr->ReplaceSrc1(IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, func));
  286. }
  287. // Gets the InterpreterStackFrame pointer into RAX.
  288. // Restores the live stack locations followed by the live registers from
  289. // the interpreter's register slots.
  290. // RecordDefs each live register that is restored.
  291. //
  292. // Generates the following code:
  293. //
  294. // MOV rax, param0
  295. // MOV rax, [rax + JavascriptGenerator::GetFrameOffset()]
  296. //
  297. // for each live stack location, sym
  298. //
  299. // MOV rcx, [rax + regslot offset]
  300. // MOV sym(stack location), rcx
  301. //
  302. // for each live register, sym (rax is restore last if it is live)
  303. //
  304. // MOV sym(register), [rax + regslot offset]
  305. //
  306. IR::Instr *
  307. LinearScanMD::GenerateBailInForGeneratorYield(IR::Instr * resumeLabelInstr, BailOutInfo * bailOutInfo)
  308. {
  309. IR::Instr * instrAfter = resumeLabelInstr->m_next;
  310. IR::RegOpnd * raxRegOpnd = IR::RegOpnd::New(nullptr, RegRAX, TyMachPtr, this->func);
  311. IR::RegOpnd * rcxRegOpnd = IR::RegOpnd::New(nullptr, RegRCX, TyVar, this->func);
  312. StackSym * sym = StackSym::NewParamSlotSym(1, this->func);
  313. this->func->SetArgOffset(sym, LowererMD::GetFormalParamOffset() * MachPtr);
  314. IR::SymOpnd * symOpnd = IR::SymOpnd::New(sym, TyMachPtr, this->func);
  315. LinearScan::InsertMove(raxRegOpnd, symOpnd, instrAfter);
  316. IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(raxRegOpnd, Js::JavascriptGenerator::GetFrameOffset(), TyMachPtr, this->func);
  317. LinearScan::InsertMove(raxRegOpnd, indirOpnd, instrAfter);
  318. // rax points to the frame, restore stack syms and registers except rax, restore rax last
  319. IR::Instr * raxRestoreInstr = nullptr;
  320. IR::Instr * instrInsertStackSym = instrAfter;
  321. IR::Instr * instrInsertRegSym = instrAfter;
  322. Assert(bailOutInfo->capturedValues.constantValues.Empty());
  323. Assert(bailOutInfo->capturedValues.copyPropSyms.Empty());
  324. Assert(bailOutInfo->liveLosslessInt32Syms->IsEmpty());
  325. Assert(bailOutInfo->liveFloat64Syms->IsEmpty());
  326. auto restoreSymFn = [this, &raxRegOpnd, &rcxRegOpnd, &raxRestoreInstr, &instrInsertStackSym, &instrInsertRegSym](Js::RegSlot regSlot, StackSym* stackSym)
  327. {
  328. Assert(stackSym->IsVar());
  329. int32 offset = regSlot * sizeof(Js::Var) + Js::InterpreterStackFrame::GetOffsetOfLocals();
  330. IR::Opnd * srcOpnd = IR::IndirOpnd::New(raxRegOpnd, offset, stackSym->GetType(), this->func);
  331. Lifetime * lifetime = stackSym->scratch.linearScan.lifetime;
  332. if (lifetime->isSpilled)
  333. {
  334. // stack restores require an extra register since we can't move an indir directly to an indir on amd64
  335. IR::SymOpnd * dstOpnd = IR::SymOpnd::New(stackSym, stackSym->GetType(), this->func);
  336. LinearScan::InsertMove(rcxRegOpnd, srcOpnd, instrInsertStackSym);
  337. LinearScan::InsertMove(dstOpnd, rcxRegOpnd, instrInsertStackSym);
  338. }
  339. else
  340. {
  341. // register restores must come after stack restores so that we have RAX and RCX free to
  342. // use for stack restores and further RAX must be restored last since it holds the
  343. // pointer to the InterpreterStackFrame from which we are restoring values.
  344. // We must also track these restores using RecordDef in case the symbols are spilled.
  345. IR::RegOpnd * dstRegOpnd = IR::RegOpnd::New(stackSym, stackSym->GetType(), this->func);
  346. dstRegOpnd->SetReg(lifetime->reg);
  347. IR::Instr * instr = LinearScan::InsertMove(dstRegOpnd, srcOpnd, instrInsertRegSym);
  348. if (instrInsertRegSym == instrInsertStackSym)
  349. {
  350. // this is the first register sym, make sure we don't insert stack stores
  351. // after this instruction so we can ensure rax and rcx remain free to use
  352. // for restoring spilled stack syms.
  353. instrInsertStackSym = instr;
  354. }
  355. if (lifetime->reg == RegRAX)
  356. {
  357. // ensure rax is restored last
  358. Assert(instrInsertRegSym != instrInsertStackSym);
  359. instrInsertRegSym = instr;
  360. if (raxRestoreInstr != nullptr)
  361. {
  362. AssertMsg(false, "this is unexpected until copy prop is enabled");
  363. // rax was mapped to multiple bytecode registers. Obviously only the first
  364. // restore we do will work so change all following stores to `mov rax, rax`.
  365. // We still need to keep them around for RecordDef in case the corresponding
  366. // dst sym is spilled later on.
  367. raxRestoreInstr->FreeSrc1();
  368. raxRestoreInstr->SetSrc1(raxRegOpnd);
  369. }
  370. raxRestoreInstr = instr;
  371. }
  372. this->linearScan->RecordDef(lifetime, instr, 0);
  373. }
  374. };
  375. FOREACH_BITSET_IN_SPARSEBV(symId, bailOutInfo->byteCodeUpwardExposedUsed)
  376. {
  377. StackSym* stackSym = this->func->m_symTable->FindStackSym(symId);
  378. restoreSymFn(stackSym->GetByteCodeRegSlot(), stackSym);
  379. }
  380. NEXT_BITSET_IN_SPARSEBV;
  381. if (bailOutInfo->capturedValues.argObjSyms)
  382. {
  383. FOREACH_BITSET_IN_SPARSEBV(symId, bailOutInfo->capturedValues.argObjSyms)
  384. {
  385. StackSym* stackSym = this->func->m_symTable->FindStackSym(symId);
  386. restoreSymFn(stackSym->GetByteCodeRegSlot(), stackSym);
  387. }
  388. NEXT_BITSET_IN_SPARSEBV;
  389. }
  390. Js::RegSlot localsCount = this->func->GetJITFunctionBody()->GetLocalsCount();
  391. bailOutInfo->IterateArgOutSyms([localsCount, &restoreSymFn](uint, uint argOutSlotOffset, StackSym* sym) {
  392. restoreSymFn(localsCount + argOutSlotOffset, sym);
  393. });
  394. return instrAfter;
  395. }
  396. uint LinearScanMD::GetRegisterSaveIndex(RegNum reg)
  397. {
  398. if (RegTypes[reg] == TyFloat64)
  399. {
  400. // make room for maximum XMM reg size
  401. Assert(reg >= RegXMM0);
  402. return (reg - RegXMM0) * (sizeof(SIMDValue) / sizeof(Js::Var)) + RegXMM0;
  403. }
  404. else
  405. {
  406. return reg;
  407. }
  408. }
  409. RegNum LinearScanMD::GetRegisterFromSaveIndex(uint offset)
  410. {
  411. return (RegNum)(offset >= RegXMM0 ? (offset - RegXMM0) / (sizeof(SIMDValue) / sizeof(Js::Var)) + RegXMM0 : offset);
  412. }
  413. RegNum LinearScanMD::GetParamReg(IR::SymOpnd *symOpnd, Func *func)
  414. {
  415. RegNum reg = RegNOREG;
  416. StackSym *paramSym = symOpnd->m_sym->AsStackSym();
  417. if (func->GetJITFunctionBody()->IsAsmJsMode() && !func->IsLoopBody())
  418. {
  419. // Asm.js function only have 1 implicit param as they have no CallInfo, and they have float/SIMD params.
  420. // Asm.js loop bodies however are called like normal JS functions.
  421. if (IRType_IsFloat(symOpnd->GetType()) || IRType_IsSimd(symOpnd->GetType()))
  422. {
  423. switch (paramSym->GetParamSlotNum())
  424. {
  425. case 1:
  426. reg = RegXMM1;
  427. break;
  428. case 2:
  429. reg = RegXMM2;
  430. break;
  431. case 3:
  432. reg = RegXMM3;
  433. break;
  434. }
  435. }
  436. else
  437. {
  438. if (paramSym->IsImplicitParamSym())
  439. {
  440. switch (paramSym->GetParamSlotNum())
  441. {
  442. case 1:
  443. reg = RegArg0;
  444. break;
  445. default:
  446. Assert(UNREACHED);
  447. }
  448. }
  449. else
  450. {
  451. switch (paramSym->GetParamSlotNum())
  452. {
  453. case 1:
  454. reg = RegArg1;
  455. break;
  456. case 2:
  457. reg = RegArg2;
  458. break;
  459. case 3:
  460. reg = RegArg3;
  461. break;
  462. }
  463. }
  464. }
  465. }
  466. else // Non-Asm.js
  467. {
  468. Assert(symOpnd->GetType() == TyVar || IRType_IsNativeInt(symOpnd->GetType()));
  469. if (paramSym->IsImplicitParamSym())
  470. {
  471. switch (paramSym->GetParamSlotNum())
  472. {
  473. case 1:
  474. reg = RegArg0;
  475. break;
  476. case 2:
  477. reg = RegArg1;
  478. break;
  479. }
  480. }
  481. else
  482. {
  483. switch (paramSym->GetParamSlotNum())
  484. {
  485. case 1:
  486. reg = RegArg2;
  487. break;
  488. case 2:
  489. reg = RegArg3;
  490. break;
  491. }
  492. }
  493. }
  494. return reg;
  495. }