|
|
@@ -186,7 +186,7 @@ LowererMDArch::LoadHeapArgsCached(IR::Instr *instrArgs)
|
|
|
{
|
|
|
instrArgs->m_opcode = Js::OpCode::MOV;
|
|
|
instrArgs->ReplaceSrc1(IR::AddrOpnd::NewNull(func));
|
|
|
-
|
|
|
+
|
|
|
if (PHASE_TRACE1(Js::StackArgFormalsOptPhase) && func->GetJITFunctionBody()->GetInParamsCount() > 1)
|
|
|
{
|
|
|
Output::Print(_u("StackArgFormals : %s (%d) :Removing Heap Arguments object creation in Lowerer. \n"), instrArgs->m_func->GetJITFunctionBody()->GetDisplayName(), instrArgs->m_func->GetFunctionNumber());
|
|
|
@@ -203,7 +203,7 @@ LowererMDArch::LoadHeapArgsCached(IR::Instr *instrArgs)
|
|
|
// s2 = actual argument count
|
|
|
// s1 = current function
|
|
|
// dst = JavascriptOperators::LoadArguments(s1, s2, s3, s4, s5, s6, s7)
|
|
|
-
|
|
|
+
|
|
|
// s7 = formals are let decls
|
|
|
IR::Opnd * formalsAreLetDecls = IR::IntConstOpnd::New((IntConstType)(instrArgs->m_opcode == Js::OpCode::LdLetHeapArgsCached), TyUint8, func);
|
|
|
this->LoadHelperArgument(instrArgs, formalsAreLetDecls);
|
|
|
@@ -317,7 +317,7 @@ LowererMDArch::LoadHeapArguments(IR::Instr *instrArgs, bool force /* = false */,
|
|
|
// s2 = actual argument count
|
|
|
// s1 = current function
|
|
|
// dst = JavascriptOperators::LoadHeapArguments(s1, s2, s3, s4, s5, s6, s7)
|
|
|
-
|
|
|
+
|
|
|
// s7 = formals are let decls
|
|
|
this->LoadHelperArgument(instrArgs, IR::IntConstOpnd::New(instrArgs->m_opcode == Js::OpCode::LdLetHeapArguments ? TRUE : FALSE, TyUint8, func));
|
|
|
|
|
|
@@ -612,7 +612,14 @@ LowererMDArch::LowerCallIDynamic(IR::Instr *callInstr, IR::Instr*saveThisArgOutI
|
|
|
|
|
|
IR::Opnd *funcObjOpnd = callInstr->UnlinkSrc1();
|
|
|
GeneratePreCall(callInstr, funcObjOpnd, insertBeforeInstrForCFG);
|
|
|
- LowerCall(callInstr, 0);
|
|
|
+
|
|
|
+ // Normally for dynamic calls we move 4 args to registers and push remaining
|
|
|
+ // args onto stack (Windows convention, and unchanged on xplat). We need to
|
|
|
+ // manully home 4 args. inlinees lower differently and follow platform ABI.
|
|
|
+ // So we need to manually home actualArgsCount + 2 args (function, callInfo).
|
|
|
+ const uint32 homeArgs = callInstr->m_func->IsInlinee() ?
|
|
|
+ callInstr->m_func->actualCount + 2 : 4;
|
|
|
+ LowerCall(callInstr, homeArgs);
|
|
|
|
|
|
return callInstr;
|
|
|
}
|
|
|
@@ -752,7 +759,7 @@ LowererMDArch::LowerCallI(IR::Instr * callInstr, ushort callFlags, bool isHelper
|
|
|
else if (insertBeforeInstrForCFG != nullptr)
|
|
|
{
|
|
|
RegNum dstReg = insertBeforeInstrForCFG->GetDst()->AsRegOpnd()->GetReg();
|
|
|
- AssertMsg(dstReg == RegR8 || dstReg == RegR9, "NewScObject should insert the first Argument in R8/R9 only based on Spread call or not.");
|
|
|
+ AssertMsg(dstReg == RegArg2 || dstReg == RegArg3, "NewScObject should insert the first Argument in RegArg2/RegArg3 only based on Spread call or not.");
|
|
|
insertBeforeInstrForCFGCheck = insertBeforeInstrForCFG;
|
|
|
}
|
|
|
|
|
|
@@ -810,6 +817,23 @@ LowererMDArch::LowerCallPut(IR::Instr *callInstr)
|
|
|
return nullptr;
|
|
|
}
|
|
|
|
|
|
+static inline IRType ExtendHelperArg(IRType type)
|
|
|
+{
|
|
|
+#ifdef __clang__
|
|
|
+ // clang expects caller to extend arg size to int
|
|
|
+ switch (type)
|
|
|
+ {
|
|
|
+ case TyInt8:
|
|
|
+ case TyInt16:
|
|
|
+ return TyInt32;
|
|
|
+ case TyUint8:
|
|
|
+ case TyUint16:
|
|
|
+ return TyUint32;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ return type;
|
|
|
+}
|
|
|
+
|
|
|
IR::Instr *
|
|
|
LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount)
|
|
|
{
|
|
|
@@ -857,22 +881,85 @@ LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount)
|
|
|
//
|
|
|
|
|
|
AssertMsg(this->helperCallArgsCount >= 0, "Fatal. helper call arguments ought to be positive");
|
|
|
- AssertMsg(this->helperCallArgsCount < 255, "Too many helper call arguments");
|
|
|
+ AssertMsg(this->helperCallArgsCount < MaxArgumentsToHelper && MaxArgumentsToHelper < 255, "Too many helper call arguments");
|
|
|
|
|
|
uint16 argsLeft = static_cast<uint16>(this->helperCallArgsCount);
|
|
|
|
|
|
+ // Sys V x64 ABI assigns int and xmm arg registers separately.
|
|
|
+ // e.g. args: int, double, int, double, int, double
|
|
|
+ // Windows: int0, xmm1, int2, xmm3, stack, stack
|
|
|
+ // Sys V: int0, xmm0, int1, xmm1, int2, xmm2
|
|
|
+#ifdef _WIN32
|
|
|
+#define _V_ARG_INDEX(index) index
|
|
|
+#else
|
|
|
+ uint16 _vindex[MaxArgumentsToHelper];
|
|
|
+ {
|
|
|
+ uint16 intIndex = 1, doubleIndex = 1, stackIndex = IntArgRegsCount + 1;
|
|
|
+ for (int i = 0; i < this->helperCallArgsCount; i++)
|
|
|
+ {
|
|
|
+ IR::Opnd * helperSrc = this->helperCallArgs[this->helperCallArgsCount - 1 - i];
|
|
|
+ IRType type = helperSrc->GetType();
|
|
|
+ if (IRType_IsFloat(type) || IRType_IsSimd128(type))
|
|
|
+ {
|
|
|
+ if (doubleIndex <= XmmArgRegsCount)
|
|
|
+ {
|
|
|
+ _vindex[i] = doubleIndex++;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ _vindex[i] = stackIndex++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ if (intIndex <= IntArgRegsCount)
|
|
|
+ {
|
|
|
+ _vindex[i] = intIndex++;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ _vindex[i] = stackIndex++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+#define _V_ARG_INDEX(index) _vindex[(index) - 1]
|
|
|
+#endif
|
|
|
+
|
|
|
while (argsLeft > 0)
|
|
|
{
|
|
|
IR::Opnd * helperSrc = this->helperCallArgs[this->helperCallArgsCount - argsLeft];
|
|
|
- StackSym * helperSym = m_func->m_symTable->GetArgSlotSym(static_cast<uint16>(argsLeft));
|
|
|
- helperSym->m_type = helperSrc->GetType();
|
|
|
+ uint16 index = _V_ARG_INDEX(argsLeft);
|
|
|
+ StackSym * helperSym = m_func->m_symTable->GetArgSlotSym(index);
|
|
|
+ helperSym->m_type = ExtendHelperArg(helperSrc->GetType());
|
|
|
Lowerer::InsertMove(
|
|
|
- this->GetArgSlotOpnd(argsLeft, helperSym),
|
|
|
+ this->GetArgSlotOpnd(index, helperSym, /*isHelper*/true),
|
|
|
helperSrc,
|
|
|
callInstr);
|
|
|
--argsLeft;
|
|
|
}
|
|
|
|
|
|
+#ifndef _WIN32
|
|
|
+ // Manually home args
|
|
|
+ if (argCount > 0)
|
|
|
+ {
|
|
|
+ static const RegNum s_argRegs[IntArgRegsCount] = {
|
|
|
+ #define REG_INT_ARG(Index, Name) Reg ## Name,
|
|
|
+ #include "RegList.h"
|
|
|
+ };
|
|
|
+
|
|
|
+ const int callArgCount = static_cast<int>(argCount) + this->helperCallArgsCount;
|
|
|
+ const int argRegs = min(callArgCount, static_cast<int>(IntArgRegsCount));
|
|
|
+ for (int i = argRegs - 1; i >= 0; i--)
|
|
|
+ {
|
|
|
+ StackSym * sym = this->m_func->m_symTable->GetArgSlotSym(static_cast<uint16>(i + 1));
|
|
|
+ Lowerer::InsertMove(
|
|
|
+ IR::SymOpnd::New(sym, TyMachReg, this->m_func),
|
|
|
+ IR::RegOpnd::New(nullptr, s_argRegs[i], TyMachReg, this->m_func),
|
|
|
+ callInstr);
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
//
|
|
|
// load the address into a register because we cannot directly access 64 bit constants
|
|
|
@@ -909,7 +996,7 @@ LowererMDArch::LowerCall(IR::Instr * callInstr, uint32 argCount)
|
|
|
// the first 4 arguments go in registers and the rest are on stack.
|
|
|
//
|
|
|
IR::Opnd *
|
|
|
-LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym)
|
|
|
+LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym, bool isHelper /*= false*/)
|
|
|
{
|
|
|
Assert(index != 0);
|
|
|
|
|
|
@@ -934,51 +1021,38 @@ LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym)
|
|
|
}
|
|
|
|
|
|
IRType type = argSym ? argSym->GetType() : TyMachReg;
|
|
|
- if (argPosition <= 4)
|
|
|
- {
|
|
|
- RegNum reg = RegNOREG;
|
|
|
+ const bool isFloatArg = IRType_IsFloat(type) || IRType_IsSimd128(type);
|
|
|
+ RegNum reg = RegNOREG;
|
|
|
|
|
|
- if (IRType_IsFloat(type) || IRType_IsSimd128(type))
|
|
|
+ if (!isFloatArg && argPosition <= IntArgRegsCount)
|
|
|
+ {
|
|
|
+ switch (argPosition)
|
|
|
{
|
|
|
- switch (argPosition)
|
|
|
- {
|
|
|
- case 4:
|
|
|
- reg = RegXMM3;
|
|
|
- break;
|
|
|
- case 3:
|
|
|
- reg = RegXMM2;
|
|
|
- break;
|
|
|
- case 2:
|
|
|
- reg = RegXMM1;
|
|
|
- break;
|
|
|
- case 1:
|
|
|
- reg = RegXMM0;
|
|
|
- break;
|
|
|
- default:
|
|
|
- Assume(UNREACHED);
|
|
|
- }
|
|
|
+#define REG_INT_ARG(Index, Name) \
|
|
|
+ case ((Index) + 1): \
|
|
|
+ reg = Reg ## Name; \
|
|
|
+ break;
|
|
|
+#include "RegList.h"
|
|
|
+ default:
|
|
|
+ Assume(UNREACHED);
|
|
|
}
|
|
|
- else
|
|
|
+ }
|
|
|
+ else if (isFloatArg && argPosition <= XmmArgRegsCount)
|
|
|
+ {
|
|
|
+ switch (argPosition)
|
|
|
{
|
|
|
- switch (argPosition)
|
|
|
- {
|
|
|
- case 4:
|
|
|
- reg = RegR9;
|
|
|
- break;
|
|
|
- case 3:
|
|
|
- reg = RegR8;
|
|
|
- break;
|
|
|
- case 2:
|
|
|
- reg = RegRDX;
|
|
|
- break;
|
|
|
- case 1:
|
|
|
- reg = RegRCX;
|
|
|
- break;
|
|
|
- default:
|
|
|
- Assume(UNREACHED);
|
|
|
- }
|
|
|
+#define REG_XMM_ARG(Index, Name) \
|
|
|
+ case ((Index) + 1): \
|
|
|
+ reg = Reg ## Name; \
|
|
|
+ break;
|
|
|
+#include "RegList.h"
|
|
|
+ default:
|
|
|
+ Assume(UNREACHED);
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
+ if (reg != RegNOREG)
|
|
|
+ {
|
|
|
IR::RegOpnd *regOpnd = IR::RegOpnd::New(argSym, reg, type, m_func);
|
|
|
regOpnd->m_isCallArg = true;
|
|
|
|
|
|
@@ -988,12 +1062,17 @@ LowererMDArch::GetArgSlotOpnd(uint16 index, StackSym * argSym)
|
|
|
{
|
|
|
if (argSym == nullptr)
|
|
|
{
|
|
|
- argSym = this->m_func->m_symTable->GetArgSlotSym(static_cast<uint16>(index));
|
|
|
+ argSym = this->m_func->m_symTable->GetArgSlotSym(index);
|
|
|
}
|
|
|
|
|
|
- //
|
|
|
- // More than 4 arguments. Assign them to appropriate slots
|
|
|
- //
|
|
|
+#ifndef _WIN32
|
|
|
+ // helper does not home args, adjust stack offset
|
|
|
+ if (isHelper)
|
|
|
+ {
|
|
|
+ const uint16 argIndex = index - IntArgRegsCount;
|
|
|
+ argSym->m_offset = (argIndex - 1) * MachPtr;
|
|
|
+ }
|
|
|
+#endif
|
|
|
|
|
|
argSlotOpnd = IR::SymOpnd::New(argSym, type, this->m_func);
|
|
|
}
|
|
|
@@ -1403,8 +1482,8 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr)
|
|
|
|
|
|
if (Lowerer::IsArgSaveRequired(this->m_func))
|
|
|
{
|
|
|
- if (argSlotsForFunctionsCalled < 4)
|
|
|
- argSlotsForFunctionsCalled = 4;
|
|
|
+ if (argSlotsForFunctionsCalled < IntArgRegsCount)
|
|
|
+ argSlotsForFunctionsCalled = IntArgRegsCount;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
@@ -1489,10 +1568,11 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr)
|
|
|
IR::Instr *movRax0 = nullptr;
|
|
|
IR::Opnd *raxOpnd = nullptr;
|
|
|
|
|
|
- if (this->m_func->HasArgumentSlot() && (this->m_func->IsStackArgsEnabled() ||
|
|
|
- this->m_func->IsJitInDebugMode() ||
|
|
|
- // disabling apply inlining leads to explicit load from the zero-inited slot
|
|
|
- this->m_func->GetJITFunctionBody()->IsInlineApplyDisabled())
|
|
|
+ if ((this->m_func->HasArgumentSlot() &&
|
|
|
+ (this->m_func->IsStackArgsEnabled() ||
|
|
|
+ this->m_func->IsJitInDebugMode() ||
|
|
|
+ // disabling apply inlining leads to explicit load from the zero-inited slot
|
|
|
+ this->m_func->GetJITFunctionBody()->IsInlineApplyDisabled()))
|
|
|
#ifdef BAILOUT_INJECTION
|
|
|
|| Js::Configuration::Global.flags.IsEnabled(Js::BailOutFlag)
|
|
|
|| Js::Configuration::Global.flags.IsEnabled(Js::BailOutAtEveryLineFlag)
|
|
|
@@ -1551,6 +1631,7 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr)
|
|
|
firstPrologInstr->InsertBefore(IR::PragmaInstr::New(Js::OpCode::PrologStart, 0, m_func));
|
|
|
lastPrologInstr->InsertAfter(IR::PragmaInstr::New(Js::OpCode::PrologEnd, 0, m_func));
|
|
|
|
|
|
+#ifdef _WIN32 // home registers
|
|
|
//
|
|
|
// Now store all the arguments in the register in the stack slots
|
|
|
//
|
|
|
@@ -1631,6 +1712,7 @@ LowererMDArch::LowerEntryInstr(IR::EntryInstr * entryInstr)
|
|
|
this->MovArgFromReg2Stack(entryInstr, RegR8, 3);
|
|
|
this->MovArgFromReg2Stack(entryInstr, RegR9, 4);
|
|
|
}
|
|
|
+#endif // _WIN32
|
|
|
|
|
|
IntConstType frameSize = Js::Constants::MinStackJIT + stackArgsSize + stackLocalsSize + savedRegSize;
|
|
|
this->GeneratePrologueStackProbe(entryInstr, frameSize);
|
|
|
@@ -1761,14 +1843,14 @@ LowererMDArch::GeneratePrologueStackProbe(IR::Instr *entryInstr, IntConstType fr
|
|
|
|
|
|
IR::RegOpnd *target;
|
|
|
{
|
|
|
- // MOV rdx, scriptContext
|
|
|
+ // MOV RegArg1, scriptContext
|
|
|
this->lowererMD->CreateAssign(
|
|
|
- IR::RegOpnd::New(nullptr, RegRDX, TyMachReg, m_func),
|
|
|
+ IR::RegOpnd::New(nullptr, RegArg1, TyMachReg, m_func),
|
|
|
this->lowererMD->m_lowerer->LoadScriptContextOpnd(insertInstr), insertInstr);
|
|
|
|
|
|
- // MOV rcx, frameSize
|
|
|
+ // MOV RegArg0, frameSize
|
|
|
this->lowererMD->CreateAssign(
|
|
|
- IR::RegOpnd::New(nullptr, RegRCX, TyMachReg, this->m_func),
|
|
|
+ IR::RegOpnd::New(nullptr, RegArg0, TyMachReg, this->m_func),
|
|
|
IR::AddrOpnd::New((void*)frameSize, IR::AddrOpndKindConstant, this->m_func), insertInstr);
|
|
|
|
|
|
// MOV rax, ThreadContext::ProbeCurrentStack
|
|
|
@@ -2543,7 +2625,7 @@ LowererMDArch::EmitLoadInt32(IR::Instr *instrLoad, bool conversionFromObjectAllo
|
|
|
// Need to bail out instead of calling a helper
|
|
|
return true;
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
if (conversionFromObjectAllowed)
|
|
|
{
|
|
|
lowererMD->m_lowerer->LowerUnaryHelperMem(instrLoad, IR::HelperConv_ToInt32);
|
|
|
@@ -2907,23 +2989,24 @@ LowererMDArch::LowerEHRegionReturn(IR::Instr * insertBeforeInstr, IR::Opnd * tar
|
|
|
// Load the continuation address into the return register.
|
|
|
insertBeforeInstr->InsertBefore(IR::Instr::New(Js::OpCode::MOV, retReg, targetOpnd, this->m_func));
|
|
|
|
|
|
- // MOV r8, spillSize
|
|
|
- IR::Instr *movR8 = IR::Instr::New(Js::OpCode::LdSpillSize,
|
|
|
- IR::RegOpnd::New(nullptr, RegR8, TyMachReg, m_func),
|
|
|
+ // MOV REG_EH_SPILL_SIZE, spillSize
|
|
|
+ IR::Instr *movSpillSize = IR::Instr::New(Js::OpCode::LdSpillSize,
|
|
|
+ IR::RegOpnd::New(nullptr, REG_EH_SPILL_SIZE, TyMachReg, m_func),
|
|
|
m_func);
|
|
|
- insertBeforeInstr->InsertBefore(movR8);
|
|
|
+ insertBeforeInstr->InsertBefore(movSpillSize);
|
|
|
|
|
|
|
|
|
- // MOV r9, argsSize
|
|
|
- IR::Instr *movR9 = IR::Instr::New(Js::OpCode::LdArgSize,
|
|
|
- IR::RegOpnd::New(nullptr, RegR9, TyMachReg, m_func),
|
|
|
+ // MOV REG_EH_ARGS_SIZE, argsSize
|
|
|
+ IR::Instr *movArgsSize = IR::Instr::New(Js::OpCode::LdArgSize,
|
|
|
+ IR::RegOpnd::New(nullptr, REG_EH_ARGS_SIZE, TyMachReg, m_func),
|
|
|
m_func);
|
|
|
- insertBeforeInstr->InsertBefore(movR9);
|
|
|
+ insertBeforeInstr->InsertBefore(movArgsSize);
|
|
|
|
|
|
- // MOV rcx, amd64_ReturnFromCallWithFakeFrame
|
|
|
- // PUSH rcx
|
|
|
+ // MOV REG_EH_TARGET, amd64_ReturnFromCallWithFakeFrame
|
|
|
+ // PUSH REG_EH_TARGET
|
|
|
// RET
|
|
|
- IR::Opnd *endCallWithFakeFrame = endCallWithFakeFrame = IR::RegOpnd::New(nullptr, RegRCX, TyMachReg, m_func);
|
|
|
+ IR::Opnd *endCallWithFakeFrame = endCallWithFakeFrame =
|
|
|
+ IR::RegOpnd::New(nullptr, REG_EH_TARGET, TyMachReg, m_func);
|
|
|
IR::Instr *movTarget = IR::Instr::New(Js::OpCode::MOV,
|
|
|
endCallWithFakeFrame,
|
|
|
IR::HelperCallOpnd::New(IR::HelperOp_ReturnFromCallWithFakeFrame, m_func),
|