|
|
@@ -11,7 +11,9 @@ BackwardPass::BackwardPass(Func * func, GlobOpt * globOpt, Js::Phase tag)
|
|
|
: func(func), globOpt(globOpt), tag(tag), currentPrePassLoop(nullptr), tempAlloc(nullptr),
|
|
|
preOpBailOutInstrToProcess(nullptr),
|
|
|
considerSymAsRealUseInNoImplicitCallUses(nullptr),
|
|
|
- isCollectionPass(false), currentRegion(nullptr)
|
|
|
+ isCollectionPass(false), currentRegion(nullptr),
|
|
|
+ collectionPassSubPhase(CollectionPassSubPhase::None),
|
|
|
+ isLoopPrepass(false)
|
|
|
{
|
|
|
// Those are the only two phase dead store will be used currently
|
|
|
Assert(tag == Js::BackwardPhase || tag == Js::DeadStorePhase);
|
|
|
@@ -47,7 +49,7 @@ BackwardPass::DoSetDead(IR::Opnd * opnd, bool isDead) const
|
|
|
bool
|
|
|
BackwardPass::DoByteCodeUpwardExposedUsed() const
|
|
|
{
|
|
|
- return
|
|
|
+ return
|
|
|
!this->func->GetJITFunctionBody()->IsAsmJsMode() &&
|
|
|
(
|
|
|
(this->tag == Js::DeadStorePhase && this->func->hasBailout) ||
|
|
|
@@ -555,6 +557,12 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
|
|
|
{
|
|
|
Assert(blockSucc->GetDataUseCount() != 0);
|
|
|
deleteData = (blockSucc->DecrementDataUseCount() == 0);
|
|
|
+ if (blockSucc->GetFirstInstr()->m_next->m_opcode == Js::OpCode::SpeculatedLoadFence)
|
|
|
+ {
|
|
|
+ // We hold on to data for these blocks until the arena gets cleared due to unusual data lifetimes.
|
|
|
+ deleteData = false;
|
|
|
+ blockSucc->IncrementDataUseCount();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
#if DBG
|
|
|
@@ -633,6 +641,7 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
|
|
|
Assert((blockSucc->tempObjectVerifyTracker != nullptr)
|
|
|
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|
|
|
|| !this->DoMarkTempObjectVerify());
|
|
|
+
|
|
|
if (blockSucc->upwardExposedUses != nullptr)
|
|
|
{
|
|
|
upwardExposedUses->Or(blockSucc->upwardExposedUses);
|
|
|
@@ -1123,7 +1132,10 @@ BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
|
|
|
// blocks with data intact. Delete the block data now.
|
|
|
Assert(block->backwardPassCurrentLoop);
|
|
|
Assert(block->backwardPassCurrentLoop->hasDeadStoreCollectionPass);
|
|
|
- Assert(!block->backwardPassCurrentLoop->hasDeadStorePrepass);
|
|
|
+ // The two situations where we might be keeping data around are either before we do
|
|
|
+ // the prepass, or when we're storing the data because we have a speculation-cancel
|
|
|
+ // block, which has longer lifetimes for its data.
|
|
|
+ Assert(!block->backwardPassCurrentLoop->hasDeadStorePrepass || block->GetFirstInstr()->m_next->m_opcode == Js::OpCode::SpeculatedLoadFence);
|
|
|
|
|
|
DeleteBlockData(block);
|
|
|
}
|
|
|
@@ -1405,6 +1417,23 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
currentPrePassLoop = collectionPassLoop;
|
|
|
Assert(IsPrePass());
|
|
|
|
|
|
+ // This is also the location where we do the additional step of tracking what opnds
|
|
|
+ // are used inside the loop in memory dereferences, and thus need masking for cache
|
|
|
+ // attacks (Spectre). This is a fairly conservative approach, where we just track a
|
|
|
+ // set of symbols which are determined by each other inside the loop. This lets the
|
|
|
+ // second pass later on determine if a particular operation generating a symbol can
|
|
|
+ // avoid the Spectre masking overhead, since a symbol not dereferenced in the loops
|
|
|
+ // can be masked on the out-edge of the loop, which should be significantly cheaper
|
|
|
+ // than masking it every iteration.
|
|
|
+ AssertMsg(collectionPassLoop->symClusterList == nullptr, "clusterList should not have been initialized yet!");
|
|
|
+ // This is needed to work around tokenization issues with preprocessor macros which
|
|
|
+ // present themselves when using multiple template parameters.
|
|
|
+#ifndef _M_ARM
|
|
|
+ typedef SegmentClusterList<SymID, JitArenaAllocator> symClusterListType;
|
|
|
+ collectionPassLoop->symClusterList = JitAnew(this->func->m_fg->alloc, symClusterListType, this->func->m_fg->alloc, 256);
|
|
|
+ collectionPassLoop->internallyDereferencedSyms = JitAnew(this->func->m_fg->alloc, BVSparse<JitArenaAllocator>, this->func->m_fg->alloc);
|
|
|
+#endif
|
|
|
+
|
|
|
// First pass
|
|
|
BasicBlock *firstInnerLoopHeader = nullptr;
|
|
|
{
|
|
|
@@ -1415,6 +1444,10 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ // We want to be able to disambiguate this in ProcessBlock
|
|
|
+ CollectionPassSubPhase prevCollectionPassSubPhase = this->collectionPassSubPhase;
|
|
|
+ this->collectionPassSubPhase = CollectionPassSubPhase::FirstPass;
|
|
|
+
|
|
|
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, nullptr)
|
|
|
{
|
|
|
ProcessBlock(block);
|
|
|
@@ -1431,6 +1464,8 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
}
|
|
|
} NEXT_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE;
|
|
|
|
|
|
+ this->collectionPassSubPhase = prevCollectionPassSubPhase;
|
|
|
+
|
|
|
#if DBG_DUMP
|
|
|
if(IsTraceEnabled())
|
|
|
{
|
|
|
@@ -1439,6 +1474,49 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+#ifndef _M_ARM
|
|
|
+ // Since we generated the base data structures for the spectre handling, we can now
|
|
|
+ // cross-reference them to get the full set of what may be dereferenced in the loop
|
|
|
+ // and what is safe in speculation.
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
|
|
|
+ {
|
|
|
+ Output::Print(_u("Analysis Results for loop %u:\n"), collectionPassLoop->GetLoopNumber());
|
|
|
+ Output::Print(_u("ClusterList pre-consolidation: "));
|
|
|
+ collectionPassLoop->symClusterList->Dump();
|
|
|
+ }
|
|
|
+#endif // DBG_DUMP
|
|
|
+ collectionPassLoop->symClusterList->Consolidate();
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
|
|
|
+ {
|
|
|
+ Output::Print(_u("ClusterList post-consolidation: "));
|
|
|
+ collectionPassLoop->symClusterList->Dump();
|
|
|
+ Output::Print(_u("Internally dereferenced syms pre-propagation: "));
|
|
|
+ collectionPassLoop->internallyDereferencedSyms->Dump();
|
|
|
+ }
|
|
|
+#endif // DBG_DUMP
|
|
|
+ collectionPassLoop->symClusterList->Map<BVSparse<JitArenaAllocator>*, true>([](SymID index, SymID containingSetRoot, BVSparse<JitArenaAllocator>* bv){
|
|
|
+ if (bv->Test(index))
|
|
|
+ {
|
|
|
+ bv->Set(containingSetRoot);
|
|
|
+ }
|
|
|
+ }, collectionPassLoop->internallyDereferencedSyms);
|
|
|
+ collectionPassLoop->symClusterList->Map<BVSparse<JitArenaAllocator>*, true>([](SymID index, SymID containingSetRoot, BVSparse<JitArenaAllocator>* bv){
|
|
|
+ if (bv->Test(containingSetRoot))
|
|
|
+ {
|
|
|
+ bv->Set(index);
|
|
|
+ }
|
|
|
+ }, collectionPassLoop->internallyDereferencedSyms);
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
|
|
|
+ {
|
|
|
+ Output::Print(_u("Internally dereferenced syms post-propagation: "));
|
|
|
+ collectionPassLoop->internallyDereferencedSyms->Dump();
|
|
|
+ }
|
|
|
+#endif // DBG_DUMP
|
|
|
+#endif // defined(_M_ARM)
|
|
|
+
|
|
|
// Second pass, only needs to run if there are any inner loops, to propagate collected information into those loops
|
|
|
if(firstInnerLoopHeader)
|
|
|
{
|
|
|
@@ -1449,6 +1527,10 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ // We want to be able to disambiguate this in ProcessBlock
|
|
|
+ CollectionPassSubPhase prevCollectionPassSubPhase = this->collectionPassSubPhase;
|
|
|
+ this->collectionPassSubPhase = CollectionPassSubPhase::SecondPass;
|
|
|
+
|
|
|
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, firstInnerLoopHeader)
|
|
|
{
|
|
|
Loop *const loop = block->loop;
|
|
|
@@ -1471,6 +1553,8 @@ BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
|
|
|
ProcessBlock(block);
|
|
|
} NEXT_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE;
|
|
|
|
|
|
+ this->collectionPassSubPhase = prevCollectionPassSubPhase;
|
|
|
+
|
|
|
#if DBG_DUMP
|
|
|
if(IsTraceEnabled())
|
|
|
{
|
|
|
@@ -1494,6 +1578,9 @@ BackwardPass::ProcessLoop(BasicBlock * lastBlock)
|
|
|
|
|
|
Loop *loop = lastBlock->loop;
|
|
|
|
|
|
+ bool prevIsLoopPrepass = this->isLoopPrepass;
|
|
|
+ this->isLoopPrepass = true;
|
|
|
+
|
|
|
// This code doesn't work quite as intended. It is meant to capture fields that are live out of a loop to limit the
|
|
|
// number of implicit call bailouts the forward pass must create (only compiler throughput optimization, no impact
|
|
|
// on emitted code), but because it looks only at the lexically last block in the loop, it does the right thing only
|
|
|
@@ -1560,6 +1647,8 @@ BackwardPass::ProcessLoop(BasicBlock * lastBlock)
|
|
|
__analysis_assume(lastBlock);
|
|
|
lastBlock->loop->hasDeadStorePrepass = true;
|
|
|
|
|
|
+ this->isLoopPrepass = prevIsLoopPrepass;
|
|
|
+
|
|
|
#if DBG_DUMP
|
|
|
if (this->IsTraceEnabled())
|
|
|
{
|
|
|
@@ -2679,6 +2768,7 @@ BackwardPass::ProcessBlock(BasicBlock * block)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+
|
|
|
if(!IsCollectionPass())
|
|
|
{
|
|
|
this->MarkTempProcessInstr(instr);
|
|
|
@@ -2711,11 +2801,540 @@ BackwardPass::ProcessBlock(BasicBlock * block)
|
|
|
|
|
|
if(IsCollectionPass())
|
|
|
{
|
|
|
+#ifndef _M_ARM
|
|
|
+ if (this->collectionPassSubPhase == CollectionPassSubPhase::FirstPass)
|
|
|
+ {
|
|
|
+ // In the collection pass we do multiple passes over loops. In these passes we keep
|
|
|
+ // track of sets of symbols, such that we can know whether or not they are used in
|
|
|
+ // ways that we need to protect them from side-channel attacks.
|
|
|
+ IR::Opnd const * src1 = instr->GetSrc1();
|
|
|
+ IR::Opnd const * src2 = instr->GetSrc2();
|
|
|
+ IR::Opnd const * dest = instr->GetDst();
|
|
|
+ // The marking is as follows, by default:
|
|
|
+ // 1. symbols on an instruction directly get marked as being part of the same set.
|
|
|
+ // 2. symbols used in indiropnds on an instruction get marked as being dereferenced.
|
|
|
+ // 3. symbols used as sources for some instructions get marked as being dereferenced.
|
|
|
+ // 4. non-type-specialized symbols tend to get marked as dereferenced.
|
|
|
+
|
|
|
+ // First, we need to find any symbol associated with this instruction as a targeted
|
|
|
+ // symid for the merge operations. This simplifies the later code.
|
|
|
+ auto getAnyDirectSymID = [](IR::Opnd const* opnd)
|
|
|
+ {
|
|
|
+ SymID temp = SymID_Invalid;
|
|
|
+ if (opnd == nullptr)
|
|
|
+ {
|
|
|
+ return temp;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (opnd->m_kind)
|
|
|
+ {
|
|
|
+ case IR::OpndKind::OpndKindInvalid:
|
|
|
+ AssertOrFailFastMsg(false, "There should be no invalid operand kinds at this point...");
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIntConst:
|
|
|
+ case IR::OpndKind::OpndKindInt64Const:
|
|
|
+ case IR::OpndKind::OpndKindFloatConst:
|
|
|
+ case IR::OpndKind::OpndKindFloat32Const:
|
|
|
+ case IR::OpndKind::OpndKindSimd128Const:
|
|
|
+ // Nothing to do here, no symbols involved
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindHelperCall:
|
|
|
+ // Nothing here either, I think?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindSym:
|
|
|
+ temp = opnd->AsSymOpnd()->m_sym->m_id;
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindReg:
|
|
|
+ temp = opnd->AsRegOpnd()->m_sym->m_id;
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindAddr:
|
|
|
+ // Should be constant, so nothing to do
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIndir:
|
|
|
+ // IndirOpnds don't themselves have symbols
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindLabel:
|
|
|
+ // Should be constant, so not an issue
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindMemRef:
|
|
|
+ // Should get a closer look, but looks ok?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindRegBV:
|
|
|
+ // Should be ok
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindList:
|
|
|
+ // Since it's a list of RegOpnds, we just need to look at the first
|
|
|
+ {
|
|
|
+ IR::ListOpnd const* list = opnd->AsListOpnd();
|
|
|
+ if (list->Count() > 0)
|
|
|
+ {
|
|
|
+ temp = list->Item(0)->m_sym->m_id;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ AssertOrFailFastMsg(false, "This should be unreachable - if we've added another OpndKind, add proper handling for it");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return temp;
|
|
|
+ };
|
|
|
+
|
|
|
+ SymID destSymID = getAnyDirectSymID(dest);
|
|
|
+
|
|
|
+ if (destSymID == SymID_Invalid)
|
|
|
+ {
|
|
|
+ // It looks like we have no assignment to a symbol. As this pass is to mark the
|
|
|
+ // symbols that are in the same set through assignment or computation, the lack
|
|
|
+ // of a destination means that we don't have any set joins to do. We may need a
|
|
|
+ // pass over the source operands to mark dereferences, but that's simpler.
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ // We have a base, so now we want to go through and add any symbols to that set
|
|
|
+ // if they're on the base level of operands on the function.
|
|
|
+ auto addSymbolToSet = [](IR::Opnd const* opnd, Loop::LoopSymClusterList* scl, SymID targetSymID)
|
|
|
+ {
|
|
|
+ if (opnd == nullptr)
|
|
|
+ {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ switch (opnd->m_kind)
|
|
|
+ {
|
|
|
+ case IR::OpndKind::OpndKindInvalid:
|
|
|
+ AssertOrFailFastMsg(false, "There should be no invalid operand kinds at this point...");
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIntConst:
|
|
|
+ case IR::OpndKind::OpndKindInt64Const:
|
|
|
+ case IR::OpndKind::OpndKindFloatConst:
|
|
|
+ case IR::OpndKind::OpndKindFloat32Const:
|
|
|
+ case IR::OpndKind::OpndKindSimd128Const:
|
|
|
+ // Nothing to do here, no symbols involved
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindHelperCall:
|
|
|
+ // Nothing here either, I think?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindSym:
|
|
|
+ scl->Merge(targetSymID, opnd->AsSymOpnd()->m_sym->m_id);
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindReg:
|
|
|
+ scl->Merge(targetSymID, opnd->AsRegOpnd()->m_sym->m_id);
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindAddr:
|
|
|
+ // Should be constant, so nothing to do
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIndir:
|
|
|
+ // IndirOpnds don't themselves have symbols
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindLabel:
|
|
|
+ // Should be constant, so not an issue
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindMemRef:
|
|
|
+ // Should get a closer look, but looks ok?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindRegBV:
|
|
|
+ // Should be ok
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindList:
|
|
|
+ // Needs iteration, but is straightforward beyond that
|
|
|
+ {
|
|
|
+ IR::ListOpnd const* list = opnd->AsListOpnd();
|
|
|
+ for (int iter = 0; iter < list->Count(); iter++)
|
|
|
+ {
|
|
|
+ scl->Merge(targetSymID, list->Item(iter)->m_sym->m_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ AssertOrFailFastMsg(false, "This should be unreachable - if we've added another OpndKind, add proper handling for it");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ addSymbolToSet(src1, this->currentPrePassLoop->symClusterList, destSymID);
|
|
|
+ addSymbolToSet(src2, this->currentPrePassLoop->symClusterList, destSymID);
|
|
|
+ }
|
|
|
+
|
|
|
+ // Now we get to the second part - symbols used in indiropnds get marked as dereferenced
|
|
|
+ // This is just a matter of updating a bitvector, so it's fairly straightforward.
|
|
|
+ auto markDereferences = [](IR::Opnd const* opnd, BVSparse<JitArenaAllocator>* bv)
|
|
|
+ {
|
|
|
+ if (opnd == nullptr)
|
|
|
+ {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ switch (opnd->m_kind)
|
|
|
+ {
|
|
|
+ case IR::OpndKind::OpndKindInvalid:
|
|
|
+ AssertOrFailFastMsg(false, "There should be no invalid operand kinds at this point...");
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIntConst:
|
|
|
+ case IR::OpndKind::OpndKindInt64Const:
|
|
|
+ case IR::OpndKind::OpndKindFloatConst:
|
|
|
+ case IR::OpndKind::OpndKindFloat32Const:
|
|
|
+ case IR::OpndKind::OpndKindSimd128Const:
|
|
|
+ // Nothing to do here, no symbols involved
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindHelperCall:
|
|
|
+ // Nothing here either, I think?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindSym:
|
|
|
+ // If it's not type-specialized, we may dereference it.
|
|
|
+ if (!(opnd->GetValueType().IsNotObject()))
|
|
|
+ {
|
|
|
+ bv->Set(opnd->AsSymOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindReg:
|
|
|
+ // If it's not type-specialized, we may dereference it.
|
|
|
+ if (!(opnd->GetValueType().IsNotObject()) && !opnd->AsRegOpnd()->m_sym->IsTypeSpec())
|
|
|
+ {
|
|
|
+ bv->Set(opnd->AsRegOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindAddr:
|
|
|
+ // Should be constant, so nothing to do
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIndir:
|
|
|
+ // Need to handle each component
|
|
|
+ {
|
|
|
+ IR::IndirOpnd const* indirOpnd = opnd->AsIndirOpnd();
|
|
|
+ if (indirOpnd->GetBaseOpnd())
|
|
|
+ {
|
|
|
+ bv->Set(indirOpnd->GetBaseOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ if (indirOpnd->GetIndexOpnd())
|
|
|
+ {
|
|
|
+ bv->Set(indirOpnd->GetIndexOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindLabel:
|
|
|
+ // Should be constant, so not an issue
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindMemRef:
|
|
|
+ // Should get a closer look, but looks ok?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindRegBV:
|
|
|
+ // Should be ok
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindList:
|
|
|
+ // Needs iteration, but is straightforward beyond that
|
|
|
+ {
|
|
|
+ IR::ListOpnd const* list = opnd->AsListOpnd();
|
|
|
+ for (int iter = 0; iter < list->Count(); iter++)
|
|
|
+ {
|
|
|
+ // should be the same as OpndKindReg, since ListOpndType is RegOpnd
|
|
|
+ if (!(list->Item(iter)->GetValueType().IsNotObject()) && !opnd->AsRegOpnd()->m_sym->IsTypeSpec())
|
|
|
+ {
|
|
|
+ bv->Set(list->Item(iter)->m_sym->m_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ AssertOrFailFastMsg(false, "This should be unreachable - if we've added another OpndKind, add proper handling for it");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ markDereferences(dest, this->currentPrePassLoop->internallyDereferencedSyms);
|
|
|
+ markDereferences(src1, this->currentPrePassLoop->internallyDereferencedSyms);
|
|
|
+ markDereferences(src2, this->currentPrePassLoop->internallyDereferencedSyms);
|
|
|
+
|
|
|
+ auto explicitlyMarkDereferenced = [](IR::Opnd const* opnd, BVSparse<JitArenaAllocator>* bv)
|
|
|
+ {
|
|
|
+ if (opnd == nullptr)
|
|
|
+ {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ switch (opnd->m_kind)
|
|
|
+ {
|
|
|
+ case IR::OpndKind::OpndKindInvalid:
|
|
|
+ AssertOrFailFastMsg(false, "There should be no invalid operand kinds at this point...");
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIntConst:
|
|
|
+ case IR::OpndKind::OpndKindInt64Const:
|
|
|
+ case IR::OpndKind::OpndKindFloatConst:
|
|
|
+ case IR::OpndKind::OpndKindFloat32Const:
|
|
|
+ case IR::OpndKind::OpndKindSimd128Const:
|
|
|
+ // Nothing to do here, no symbols involved
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindHelperCall:
|
|
|
+ // Nothing here either, I think?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindSym:
|
|
|
+ // The instruction using this means that we may dereference the symbol,
|
|
|
+ // regardless of type spec
|
|
|
+ bv->Set(opnd->AsSymOpnd()->m_sym->m_id);
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindReg:
|
|
|
+ // The instruction using this means that we may dereference the symbol,
|
|
|
+ // regardless of type spec
|
|
|
+ bv->Set(opnd->AsRegOpnd()->m_sym->m_id);
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindAddr:
|
|
|
+ // Should be constant, so nothing to do
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindIndir:
|
|
|
+ // Need to handle each component
|
|
|
+ {
|
|
|
+ IR::IndirOpnd const* indirOpnd = opnd->AsIndirOpnd();
|
|
|
+ if (indirOpnd->GetBaseOpnd())
|
|
|
+ {
|
|
|
+ bv->Set(indirOpnd->GetBaseOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ if (indirOpnd->GetIndexOpnd())
|
|
|
+ {
|
|
|
+ bv->Set(indirOpnd->GetIndexOpnd()->m_sym->m_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindLabel:
|
|
|
+ // Should be constant, so not an issue
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindMemRef:
|
|
|
+ // Should get a closer look, but looks ok?
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindRegBV:
|
|
|
+ // Should be ok
|
|
|
+ break;
|
|
|
+ case IR::OpndKind::OpndKindList:
|
|
|
+ // Needs iteration, but is straightforward beyond that
|
|
|
+ {
|
|
|
+ IR::ListOpnd const* list = opnd->AsListOpnd();
|
|
|
+ for (int iter = 0; iter < list->Count(); iter++)
|
|
|
+ {
|
|
|
+ // The instruction using this means that we may dereference the symbol,
|
|
|
+ // regardless of type spec
|
|
|
+ bv->Set(list->Item(iter)->m_sym->m_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ AssertOrFailFastMsg(false, "This should be unreachable - if we've added another OpndKind, add proper handling for it");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ };
|
|
|
+ // We may also have some specific instructions that dereference things - we can
|
|
|
+ // handle those specifically, since there's only a few of them
|
|
|
+ switch (instr->m_opcode)
|
|
|
+ {
|
|
|
+ case Js::OpCode::StArrInlineItem_CI4:
|
|
|
+ case Js::OpCode::StArrItemC_CI4:
|
|
|
+ case Js::OpCode::StArrItemI_CI4:
|
|
|
+ case Js::OpCode::StArrSegElemC:
|
|
|
+ case Js::OpCode::StArrSegItem_A:
|
|
|
+ case Js::OpCode::StArrSegItem_CI4:
|
|
|
+ case Js::OpCode::StArrViewElem:
|
|
|
+ case Js::OpCode::StAtomicWasm:
|
|
|
+ case Js::OpCode::StElemC:
|
|
|
+ case Js::OpCode::StElemI_A:
|
|
|
+ case Js::OpCode::StElemI_A_Strict:
|
|
|
+ case Js::OpCode::StEnvObjSlot:
|
|
|
+ case Js::OpCode::StEnvObjSlotChkUndecl:
|
|
|
+ case Js::OpCode::StFld:
|
|
|
+ case Js::OpCode::StFldStrict:
|
|
|
+ case Js::OpCode::StFuncExpr:
|
|
|
+ case Js::OpCode::StInnerObjSlot:
|
|
|
+ case Js::OpCode::StInnerObjSlotChkUndecl:
|
|
|
+ case Js::OpCode::StInnerSlot:
|
|
|
+ case Js::OpCode::StInnerSlotChkUndecl:
|
|
|
+ case Js::OpCode::StLocalFld:
|
|
|
+ case Js::OpCode::StLocalFuncExpr:
|
|
|
+ case Js::OpCode::StLocalObjSlot:
|
|
|
+ case Js::OpCode::StLocalObjSlotChkUndecl:
|
|
|
+ case Js::OpCode::StLocalSlot:
|
|
|
+ case Js::OpCode::StLocalSlotChkUndecl:
|
|
|
+ case Js::OpCode::StLoopBodyCount:
|
|
|
+ case Js::OpCode::StModuleSlot:
|
|
|
+ case Js::OpCode::StObjSlot:
|
|
|
+ case Js::OpCode::StObjSlotChkUndecl:
|
|
|
+ case Js::OpCode::StParamObjSlot:
|
|
|
+ case Js::OpCode::StParamObjSlotChkUndecl:
|
|
|
+ case Js::OpCode::StParamSlot:
|
|
|
+ case Js::OpCode::StParamSlotChkUndecl:
|
|
|
+ case Js::OpCode::StRootFld:
|
|
|
+ case Js::OpCode::StRootFldStrict:
|
|
|
+ case Js::OpCode::StSlot:
|
|
|
+ case Js::OpCode::StSlotBoxTemp:
|
|
|
+ case Js::OpCode::StSlotChkUndecl:
|
|
|
+ case Js::OpCode::StSuperFld:
|
|
|
+ case Js::OpCode::ProfiledStElemI_A:
|
|
|
+ case Js::OpCode::ProfiledStElemI_A_Strict:
|
|
|
+ case Js::OpCode::ProfiledStFld:
|
|
|
+ case Js::OpCode::ProfiledStFldStrict:
|
|
|
+ case Js::OpCode::ProfiledStLocalFld:
|
|
|
+ case Js::OpCode::ProfiledStRootFld:
|
|
|
+ case Js::OpCode::ProfiledStRootFldStrict:
|
|
|
+ case Js::OpCode::ProfiledStSuperFld:
|
|
|
+ // Unfortunately, being fed into a store means that we could have aliasing, and the
|
|
|
+ // consequence is that it may be re-read and then dereferenced. Note that we can do
|
|
|
+ // this case if we poison any array symbol that we store to on the way out, but the
|
|
|
+ // aliasing problem remains.
|
|
|
+ case Js::OpCode::ArgOut_A:
|
|
|
+ case Js::OpCode::ArgOut_ANonVar:
|
|
|
+ case Js::OpCode::ArgOut_A_Dynamic:
|
|
|
+ case Js::OpCode::ArgOut_A_FixupForStackArgs:
|
|
|
+ case Js::OpCode::ArgOut_A_FromStackArgs:
|
|
|
+ case Js::OpCode::ProfiledArgOut_A:
|
|
|
+ // Getting passed to another function is a boundary that we can't analyze over.
|
|
|
+ case Js::OpCode::Ret:
|
|
|
+ // Return arcs are pretty short in speculation, so we have to assume that we may be
|
|
|
+ // returning to a situation that will dereference the symbol. Note that we will not
|
|
|
+ // hit this path in normal jitted code, but it's more common in jitloopbody'd code.
|
|
|
+ explicitlyMarkDereferenced(instr->GetSrc1(), this->currentPrePassLoop->internallyDereferencedSyms);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ // most instructions don't have this sort of behavior
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ // Continue normal CollectionPass behavior
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if (this->tag == Js::DeadStorePhase)
|
|
|
{
|
|
|
+#ifndef _M_ARM
|
|
|
+ if(block->loop && !this->isLoopPrepass)
|
|
|
+ {
|
|
|
+ // In the second pass, we mark instructions that we go by as being safe or unsafe.
|
|
|
+ //
|
|
|
+ // This is all based on the information which we gathered in the previous pass. The
|
|
|
+ // symbol sets are cross-referenced and the bit-vector information is set such that
|
|
|
+ // the bit vector now holds a complete list of which symbols are dereferenced, both
|
|
|
+ // directly or indirectly, in the loop, so we can see if a particular instr creates
|
|
|
+ // such a symbol. If it doesn't, then we will not mask its destination, as it's not
|
|
|
+ // necessary to create a safe program.
|
|
|
+ //
|
|
|
+ // Note that if we avoiding doing the masking here, we need to instead do it on the
|
|
|
+ // out-edges of the loop - otherwise an unsafe use of the symbol could happen after
|
|
|
+ // the loop and not get caught.
|
|
|
+
|
|
|
+ // This helper goes through and marks loop out-edges for a particular symbol set.
|
|
|
+ static void (*addOutEdgeMasking)(SymID, Loop*, JitArenaAllocator*) = [](SymID symID, Loop* loop, JitArenaAllocator *alloc) -> void
|
|
|
+ {
|
|
|
+ // There are rare cases where we have no out-edges (the only way to leave this loop
|
|
|
+ // is via a return inside the jitloopbody); in this case, we don't need to mask any
|
|
|
+ // symbols on the out-edges, as we only need to worry about the store cases.
|
|
|
+ if(loop->outwardSpeculationMaskInstrs == nullptr)
|
|
|
+ {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ BVSparse<JitArenaAllocator> *syms = JitAnew(alloc, BVSparse<JitArenaAllocator>, alloc);
|
|
|
+ // We only need to do this for stack syms, and only for ones that are upwardexposed
|
|
|
+ // in the block sourcing to the masking block, but it needs to be for all symbols a
|
|
|
+ // mask-skipped load may be written to.
|
|
|
+ loop->symClusterList->MapSet<BVSparse<JitArenaAllocator>*>(symID, [](SymID a, BVSparse<JitArenaAllocator> *symbols) {
|
|
|
+ symbols->Set(a);
|
|
|
+ }, syms);
|
|
|
+ FOREACH_BITSET_IN_SPARSEBV(curSymID, syms)
|
|
|
+ {
|
|
|
+ if (!loop->GetFunc()->m_symTable->Find(curSymID)->IsStackSym())
|
|
|
+ {
|
|
|
+ syms->Clear(curSymID);
|
|
|
+ }
|
|
|
+ } NEXT_BITSET_IN_SPARSEBV;
|
|
|
+ if (syms->IsEmpty())
|
|
|
+ {
|
|
|
+ // If there's no non-stack symids, we have nothing to mask
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ // Now that we have a bitvector of things to try to mask on the out-edges, we'll go
|
|
|
+ // over the list of outmask instructions.
|
|
|
+ FOREACH_SLIST_ENTRY(IR::ByteCodeUsesInstr*, bcuInstr, loop->outwardSpeculationMaskInstrs)
|
|
|
+ {
|
|
|
+ // Get the upwardExposed information for the previous block
|
|
|
+ IR::LabelInstr *blockLabel = bcuInstr->m_prev->AsLabelInstr();
|
|
|
+ BasicBlock* maskingBlock = blockLabel->GetBasicBlock();
|
|
|
+ // Since it's possible we have a multi-level loop structure (each with its own mask
|
|
|
+ // instructions and dereferenced symbol list), we may be able to avoid masking some
|
|
|
+ // symbols in interior loop->exterior loop edges if they're not dereferenced in the
|
|
|
+ // exterior loop. This does mean, however, that we need to mask them further out.
|
|
|
+ Loop* maskingBlockLoop = maskingBlock->loop;
|
|
|
+ if (maskingBlockLoop != nullptr && !maskingBlockLoop->internallyDereferencedSyms->Test(symID))
|
|
|
+ {
|
|
|
+ addOutEdgeMasking(symID, maskingBlockLoop, alloc);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ // Instead of looking at the previous block (inside the loop), which may be cleaned
|
|
|
+ // up or may yet be processed for dead stores, we instead can look at the mask/cmov
|
|
|
+ // block, which we can keep from being cleaned up, and which will always be handled
|
|
|
+ // before the loop is looked at (in this phase), since it is placed after the loop.
|
|
|
+ AssertOrFailFast(maskingBlock->upwardExposedUses);
|
|
|
+ AssertOrFailFast(maskingBlock->upwardExposedFields);
|
|
|
+ BVSparse<JitArenaAllocator> *symsToMask = JitAnew(alloc, BVSparse<JitArenaAllocator>, alloc);
|
|
|
+ symsToMask->Or(maskingBlock->upwardExposedUses);
|
|
|
+ symsToMask->Or(maskingBlock->upwardExposedFields);
|
|
|
+ symsToMask->And(syms);
|
|
|
+ // If nothing is exposed, we have nothing to mask, and nothing to do here.
|
|
|
+ if (!symsToMask->IsEmpty())
|
|
|
+ {
|
|
|
+ if (bcuInstr->GetByteCodeUpwardExposedUsed() == nullptr)
|
|
|
+ {
|
|
|
+ // This will initialize the internal structure properly
|
|
|
+ bcuInstr->SetBV(JitAnew(bcuInstr->m_func->m_alloc, BVSparse<JitArenaAllocator>, bcuInstr->m_func->m_alloc));
|
|
|
+ }
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, loop->topFunc))
|
|
|
+ {
|
|
|
+ Output::Print(_u("Adding symbols to out-edge masking for loop %u outward block %u:\n"), loop->GetLoopNumber(), maskingBlock->GetBlockNum());
|
|
|
+ symsToMask->Dump();
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ // Add the syms to the mask set
|
|
|
+ const_cast<BVSparse<JitArenaAllocator> *>(bcuInstr->GetByteCodeUpwardExposedUsed())->Or(symsToMask);
|
|
|
+ }
|
|
|
+ } NEXT_SLIST_ENTRY;
|
|
|
+ };
|
|
|
+ switch (instr->m_opcode)
|
|
|
+ {
|
|
|
+ case Js::OpCode::LdElemI_A:
|
|
|
+ case Js::OpCode::ProfiledLdElemI_A:
|
|
|
+ {
|
|
|
+ IR::Opnd* dest = instr->GetDst();
|
|
|
+ if (dest->IsRegOpnd())
|
|
|
+ {
|
|
|
+ SymID symid = dest->AsRegOpnd()->m_sym->m_id;
|
|
|
+ if (!block->loop->internallyDereferencedSyms->Test(symid))
|
|
|
+ {
|
|
|
+ instr->SetIsSafeToSpeculate(true);
|
|
|
+ addOutEdgeMasking(symid, block->loop, this->tempAlloc);
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
|
|
|
+ {
|
|
|
+ Output::Print(_u("Marking instruction as safe:\n"));
|
|
|
+ instr->highlight = 0x0f;
|
|
|
+ instr->Dump();
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else if (dest->IsSymOpnd())
|
|
|
+ {
|
|
|
+ SymID symid = dest->AsSymOpnd()->m_sym->m_id;
|
|
|
+ if (!block->loop->internallyDereferencedSyms->Test(symid))
|
|
|
+ {
|
|
|
+ instr->SetIsSafeToSpeculate(true);
|
|
|
+ addOutEdgeMasking(symid, block->loop, this->tempAlloc);
|
|
|
+#if DBG_DUMP
|
|
|
+ if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
|
|
|
+ {
|
|
|
+ Output::Print(_u("Marking instruction as safe:\n"));
|
|
|
+ instr->highlight = 0x0f;
|
|
|
+ instr->Dump();
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ // Most instructions don't have any particular handling needed here, as they don't
|
|
|
+ // get any masking regardless.
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+#endif
|
|
|
switch(instr->m_opcode)
|
|
|
{
|
|
|
case Js::OpCode::LdSlot:
|
|
|
@@ -2943,6 +3562,102 @@ BackwardPass::ProcessBlock(BasicBlock * block)
|
|
|
}
|
|
|
NEXT_INSTR_BACKWARD_IN_BLOCK_EDITING;
|
|
|
|
|
|
+#ifndef _M_ARM
|
|
|
+ if (this->tag == Js::DeadStorePhase
|
|
|
+ // We don't need the masking blocks in asmjs/wasm mode
|
|
|
+ && !block->GetFirstInstr()->m_func->GetJITFunctionBody()->IsAsmJsMode()
|
|
|
+ && !block->GetFirstInstr()->m_func->GetJITFunctionBody()->IsWasmFunction()
|
|
|
+ && !block->isDead
|
|
|
+ && !block->isDeleted)
|
|
|
+ {
|
|
|
+ FOREACH_PREDECESSOR_BLOCK(blockPred, block)
|
|
|
+ {
|
|
|
+ // Now we need to handle loop out-edges. These need blocks inserted to prevent load
|
|
|
+ // of those symbols in speculation; the easiest way to do this is to CMOV them with
|
|
|
+ // a flag that we always know will be false, as this introduces a dependency on the
|
|
|
+ // register that can't be speculated (currently).
|
|
|
+ //
|
|
|
+ // Note that we're doing this backwards - looking from the target into the loop. We
|
|
|
+ // do this because this way because we're going backwards over the blocks anyway; a
|
|
|
+ // block inserted after the branch may be impossible to correctly handle.
|
|
|
+ if (!blockPred->isDead && !blockPred->isDeleted && blockPred->loop != nullptr)
|
|
|
+ {
|
|
|
+ Loop* targetLoop = block->loop;
|
|
|
+ Loop* startingLoop = blockPred->loop;
|
|
|
+ bool addMaskingBlock = false;
|
|
|
+ if (targetLoop == nullptr)
|
|
|
+ {
|
|
|
+ // If we're leaving to a non-looping context, we definitely want the masking block
|
|
|
+ addMaskingBlock = true;
|
|
|
+ }
|
|
|
+ else if (targetLoop == startingLoop)
|
|
|
+ {
|
|
|
+ // If we're still inside the same loop, we don't want a masking block
|
|
|
+ addMaskingBlock = false;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ // We want a masking block if we're going to a loop enclosing the current one.
|
|
|
+ Loop* loopTest = targetLoop;
|
|
|
+ addMaskingBlock = true;
|
|
|
+ while (loopTest != nullptr)
|
|
|
+ {
|
|
|
+ if (loopTest == startingLoop)
|
|
|
+ {
|
|
|
+ // the target loop is a child of the starting loop, so don't mask on the way
|
|
|
+ addMaskingBlock = false;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ loopTest = loopTest->parent;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (addMaskingBlock)
|
|
|
+ {
|
|
|
+ // Avoid masking on the way from a masking block - we're already masking this jmp
|
|
|
+ if (block->GetFirstInstr()->m_next->m_opcode == Js::OpCode::SpeculatedLoadFence)
|
|
|
+ {
|
|
|
+ addMaskingBlock = false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (addMaskingBlock)
|
|
|
+ {
|
|
|
+ // It's architecture dependent, so we just mark the block here and leave the actual
|
|
|
+ // generation of the masking to the Lowerer.
|
|
|
+ // Generated code here:
|
|
|
+ // newTarget:
|
|
|
+ // syms = targetedloadfence syms
|
|
|
+ // jmp oldTarget
|
|
|
+
|
|
|
+ // We need to increment the data use count since we're changing a successor.
|
|
|
+ blockPred->IncrementDataUseCount();
|
|
|
+ BasicBlock *newBlock = this->func->m_fg->InsertAirlockBlock(this->func->m_fg->FindEdge(blockPred, block));
|
|
|
+ LABELNAMESET(newBlock->GetFirstInstr()->AsLabelInstr(), "Loop out-edge masking block");
|
|
|
+ // This is a little bit of a misuse of ByteCodeUsesInstr - we're using it as just
|
|
|
+ // a bitvector that we can add things to.
|
|
|
+ IR::ByteCodeUsesInstr* masker = IR::ByteCodeUsesInstr::New(newBlock->GetFirstInstr());
|
|
|
+ masker->m_opcode = Js::OpCode::SpeculatedLoadFence;
|
|
|
+ // Add the one instruction we need to this block
|
|
|
+ newBlock->GetFirstInstr()->InsertAfter(masker);
|
|
|
+ // We need to initialize the data for this block, so that later stages of deadstore work properly.
|
|
|
+ // Setting use count to 0 makes mergesucc create the structures
|
|
|
+ newBlock->SetDataUseCount(0);
|
|
|
+ // If we inserted an airlock block compensation block, we need to set the use count on that too.
|
|
|
+ if (newBlock->prev && newBlock->prev->isAirLockCompensationBlock)
|
|
|
+ {
|
|
|
+ newBlock->prev->SetDataUseCount(0);
|
|
|
+ }
|
|
|
+ if (startingLoop->outwardSpeculationMaskInstrs == nullptr)
|
|
|
+ {
|
|
|
+ startingLoop->outwardSpeculationMaskInstrs = JitAnew(this->func->m_fg->alloc, SList<IR::ByteCodeUsesInstr*>, this->func->m_fg->alloc);
|
|
|
+ }
|
|
|
+ // We fill in the instruction later, so we need to add it to the loop's list of such instructions.
|
|
|
+ startingLoop->outwardSpeculationMaskInstrs->Prepend(masker);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } NEXT_PREDECESSOR_BLOCK;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
EndIntOverflowDoesNotMatterRange();
|
|
|
|
|
|
if (!this->IsPrePass() && !block->isDead && block->isLoopHeader)
|
|
|
@@ -4866,7 +5581,7 @@ BackwardPass::InsertTypeTransition(IR::Instr *instrInsertBefore, StackSym *objSy
|
|
|
int newCount;
|
|
|
Js::PropertyIndex inlineSlotCapacity;
|
|
|
Js::PropertyIndex newInlineSlotCapacity;
|
|
|
- bool needSlotAdjustment =
|
|
|
+ bool needSlotAdjustment =
|
|
|
JITTypeHandler::NeedSlotAdjustment(initialType->GetTypeHandler(), finalType->GetTypeHandler(), &oldCount, &newCount, &inlineSlotCapacity, &newInlineSlotCapacity);
|
|
|
if (needSlotAdjustment)
|
|
|
{
|