diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index d9bce2e665759..db4f43d206c91 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -285,6 +285,25 @@ bool BasicBlock::IsFirstColdBlock(Compiler* compiler) const return (this == compiler->fgFirstColdBlock); } +//------------------------------------------------------------------------ +// CanRemoveJumpToNext: determine if jump to the next block can be omitted +// +// Arguments: +// compiler - current compiler instance +// +// Returns: +// true if the peephole optimization is enabled, +// and block is a BBJ_ALWAYS to the next block that we can fall through into +// +bool BasicBlock::CanRemoveJumpToNext(Compiler* compiler) +{ + assert(KindIs(BBJ_ALWAYS)); + const bool tryJumpOpt = compiler->opts.OptimizationEnabled() || ((bbFlags & BBF_NONE_QUIRK) != 0); + const bool skipJump = tryJumpOpt && JumpsToNext() && !hasAlign() && ((bbFlags & BBF_KEEP_BBJ_ALWAYS) == 0) && + !compiler->fgInDifferentRegions(this, bbJumpDest); + return skipJump; +} + //------------------------------------------------------------------------ // checkPredListOrder: see if pred list is properly ordered // @@ -719,10 +738,6 @@ void BasicBlock::dspJumpKind() printf(" (return)"); break; - case BBJ_NONE: - // For fall-through blocks, print nothing. - break; - case BBJ_ALWAYS: if (bbFlags & BBF_KEEP_BBJ_ALWAYS) { @@ -981,7 +996,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const //------------------------------------------------------------------------ // GetUniqueSucc: Returns the unique successor of a block, if one exists. -// Only considers BBJ_ALWAYS and BBJ_NONE block types. +// Only considers BBJ_ALWAYS block types. // // Arguments: // None. @@ -991,18 +1006,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const // BasicBlock* BasicBlock::GetUniqueSucc() const { - if (bbJumpKind == BBJ_ALWAYS) - { - return bbJumpDest; - } - else if (bbJumpKind == BBJ_NONE) - { - return bbNext; - } - else - { - return nullptr; - } + return KindIs(BBJ_ALWAYS) ? bbJumpDest : nullptr; } // Static vars. @@ -1120,7 +1124,6 @@ bool BasicBlock::bbFallsThrough() const case BBJ_SWITCH: return false; - case BBJ_NONE: case BBJ_COND: return true; @@ -1156,7 +1159,6 @@ unsigned BasicBlock::NumSucc() const case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - case BBJ_NONE: return 1; case BBJ_COND: @@ -1215,9 +1217,6 @@ BasicBlock* BasicBlock::GetSucc(unsigned i) const case BBJ_LEAVE: return bbJumpDest; - case BBJ_NONE: - return bbNext; - case BBJ_COND: if (i == 0) { @@ -1283,7 +1282,6 @@ unsigned BasicBlock::NumSucc(Compiler* comp) case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: - case BBJ_NONE: return 1; case BBJ_COND: @@ -1340,9 +1338,6 @@ BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) case BBJ_LEAVE: return bbJumpDest; - case BBJ_NONE: - return bbNext; - case BBJ_COND: if (i == 0) { @@ -1619,7 +1614,7 @@ BasicBlock* BasicBlock::New(Compiler* compiler, BBjumpKinds jumpKind, BasicBlock block->bbJumpKind = jumpKind; block->bbJumpDest = jumpDest; - if (jumpKind == BBJ_THROW) + if (block->KindIs(BBJ_THROW)) { block->bbSetRunRarely(); } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 805dbe4a30ca7..aab4f8f77ac31 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -65,7 +65,6 @@ enum BBjumpKinds : BYTE BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) BBJ_THROW, // block ends with 'throw' BBJ_RETURN, // block ends with 'ret' - BBJ_NONE, // block flows into the next one (no jump) BBJ_ALWAYS, // block always jumps to the target BBJ_LEAVE, // block always jumps to the target, maybe out of guarded region. Only used until importing. BBJ_CALLFINALLY, // block always calls the target finally @@ -83,7 +82,6 @@ const char* const BBjumpKindNames[] = { "BBJ_EHCATCHRET", "BBJ_THROW", "BBJ_RETURN", - "BBJ_NONE", "BBJ_ALWAYS", "BBJ_LEAVE", "BBJ_CALLFINALLY", @@ -430,6 +428,10 @@ enum BasicBlockFlags : unsigned __int64 BBF_RECURSIVE_TAILCALL = MAKE_BBFLAG(40), // Block has recursive tailcall that may turn into a loop BBF_NO_CSE_IN = MAKE_BBFLAG(41), // Block should kill off any incoming CSE BBF_CAN_ADD_PRED = MAKE_BBFLAG(42), // Ok to add pred edge to this block, even when "safe" edge creation disabled + BBF_NONE_QUIRK = MAKE_BBFLAG(43), // Block was created as a BBJ_ALWAYS to the next block, + // and should be treated as if it falls through. + // This is just to reduce diffs from removing BBJ_NONE. + // (TODO: Remove this quirk after refactoring Compiler::fgFindInsertPoint) // The following are sets of flags. @@ -459,7 +461,7 @@ enum BasicBlockFlags : unsigned __int64 // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ? BBF_SPLIT_GAINED = BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_PROF_WEIGHT | \ - BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_HISTOGRAM_PROFILE | BBF_HAS_MDARRAYREF | BBF_NEEDS_GCPOLL, + BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_HISTOGRAM_PROFILE | BBF_HAS_MDARRAYREF | BBF_NEEDS_GCPOLL | BBF_NONE_QUIRK, // Flags that must be propagated to a new block if code is copied from a block to a new block. These are flags that // limit processing of a block if the code in question doesn't exist. This is conservative; we might not @@ -599,6 +601,8 @@ struct BasicBlock : private LIR::Range bool IsFirstColdBlock(Compiler* compiler) const; + bool CanRemoveJumpToNext(Compiler* compiler); + unsigned GetJumpOffs() const { return bbJumpOffs; @@ -729,7 +733,7 @@ struct BasicBlock : private LIR::Range unsigned dspPreds(); // Print the predecessors (bbPreds) void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH // regions are printed: see NumSucc() for details. - void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.). + void dspJumpKind(); // Print the block jump kind (e.g., BBJ_ALWAYS, BBJ_COND, etc.). // Print a simple basic block header for various output, including a list of predecessors and successors. void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); @@ -1771,12 +1775,6 @@ inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) m_end = &m_succs[1]; break; - case BBJ_NONE: - m_succs[0] = block->bbNext; - m_begin = &m_succs[0]; - m_end = &m_succs[1]; - break; - case BBJ_COND: m_succs[0] = block->bbNext; m_begin = &m_succs[0]; diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index e617e57651e8f..ed9ab2791455b 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -379,6 +379,15 @@ void CodeGen::genMarkLabelsForCodegen() switch (block->GetJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. + { + // If we can skip this jump, don't create a label for the target + if (block->CanRemoveJumpToNext(compiler)) + { + break; + } + + FALLTHROUGH; + } case BBJ_COND: case BBJ_EHCATCHRET: JITDUMP(" " FMT_BB " : branch target\n", block->GetJumpDest()->bbNum); @@ -422,7 +431,6 @@ void CodeGen::genMarkLabelsForCodegen() case BBJ_EHFILTERRET: case BBJ_RETURN: case BBJ_THROW: - case BBJ_NONE: break; default: diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 2c82990d2bb34..56e62fcbba34e 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -599,6 +599,7 @@ void CodeGen::genCodeForBBlist() noway_assert(genStackLevel == 0); #ifdef TARGET_AMD64 + bool emitNop = false; // On AMD64, we need to generate a NOP after a call that is the last instruction of the block, in several // situations, to support proper exception handling semantics. This is mostly to ensure that when the stack // walker computes an instruction pointer for a frame, that instruction pointer is in the correct EH region. @@ -621,6 +622,11 @@ void CodeGen::genCodeForBBlist() switch (block->GetJumpKind()) { case BBJ_ALWAYS: + // We might skip generating the jump via a peephole optimization. + // If that happens, make sure a NOP is emitted as the last instruction in the block. + emitNop = true; + break; + case BBJ_THROW: case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: @@ -631,20 +637,6 @@ void CodeGen::genCodeForBBlist() case BBJ_EHFAULTRET: case BBJ_EHFILTERRET: // These are the "epilog follows" case, handled in the emitter. - - break; - - case BBJ_NONE: - if (block->IsLast()) - { - // Call immediately before the end of the code; we should never get here . - instGen(INS_BREAKPOINT); // This should never get executed - } - else - { - // We need the NOP - instGen(INS_nop); - } break; case BBJ_COND: @@ -734,13 +726,26 @@ void CodeGen::genCodeForBBlist() #endif // !FEATURE_EH_FUNCLETS - case BBJ_NONE: case BBJ_SWITCH: break; case BBJ_ALWAYS: -#ifdef TARGET_XARCH { + // Peephole optimization: If this block jumps to the next one, skip emitting the jump + // (unless we are jumping between hot/cold sections, or if we need the jump for EH reasons) + // (Skip this if optimizations are disabled, unless the block shouldn't have a jump in the first place) + if (block->CanRemoveJumpToNext(compiler)) + { +#ifdef TARGET_AMD64 + if (emitNop) + { + instGen(INS_nop); + } +#endif // TARGET_AMD64 + + break; + } +#ifdef TARGET_XARCH // If a block was selected to place an alignment instruction because it ended // with a jump, do not remove jumps from such blocks. // Do not remove a jump between hot and cold regions. @@ -755,10 +760,10 @@ void CodeGen::genCodeForBBlist() #endif // TARGET_AMD64 inst_JMP(EJ_jmp, block->GetJumpDest(), isRemovableJmpCandidate); - } #else inst_JMP(EJ_jmp, block->GetJumpDest()); #endif // TARGET_XARCH + } FALLTHROUGH; diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index c04f19eaad0a0..694b0fdc9aff1 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5279,8 +5279,9 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } } - // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && block->KindIs(BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) + // If there is an unconditional jump (which is not part of callf/always pair, and isn't to the next block) + if (opts.compJitHideAlignBehindJmp && block->KindIs(BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail() && + ((block->bbFlags & BBF_NONE_QUIRK) == 0)) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index d3eabe34466c4..ac3d76cacc3b6 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -5903,7 +5903,10 @@ class Compiler bool fgIsThrow(GenTree* tree); +public: bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); + +private: bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); @@ -6483,7 +6486,7 @@ class Compiler { if (lpHead->NextIs(lpEntry)) { - assert(lpHead->bbFallsThrough()); + assert(lpHead->bbFallsThrough() || lpHead->JumpsToNext()); assert(lpTop == lpEntry); return true; } diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 26f4392326a0d..563574c084aa8 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -622,10 +622,6 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) return BasicBlockVisit::Continue; - case BBJ_NONE: - RETURN_ON_ABORT(func(bbNext)); - return VisitEHSuccs(comp, func); - case BBJ_COND: RETURN_ON_ABORT(func(bbNext)); @@ -687,9 +683,6 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) case BBJ_ALWAYS: return func(bbJumpDest); - case BBJ_NONE: - return func(bbNext); - case BBJ_COND: RETURN_ON_ABORT(func(bbNext)); diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index a1875ee4ab2bd..f4afeb1ebffe3 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -201,9 +201,9 @@ void Compiler::fgInit() // that they do not mess up the order of things added to this block and // inadvertently change semantics. // -// We maintain the invariant that a scratch BB ends with BBJ_NONE or -// BBJ_ALWAYS, so that when adding independent bits of initialization, -// callers can generally append to the fgFirstBB block without worring +// We maintain the invariant that a scratch BB ends with BBJ_ALWAYS, +// so that when adding independent bits of initialization, +// callers can generally append to the fgFirstBB block without worrying // about what code is there already. // // Can be called at any time, and can be called multiple times. @@ -218,7 +218,8 @@ bool Compiler::fgEnsureFirstBBisScratch() assert(fgFirstBBScratch == nullptr); - BasicBlock* block = BasicBlock::New(this, BBJ_NONE); + BasicBlock* block = BasicBlock::New(this, BBJ_ALWAYS, fgFirstBB); + block->bbFlags |= BBF_NONE_QUIRK; if (fgFirstBB != nullptr) { @@ -291,7 +292,7 @@ bool Compiler::fgFirstBBisScratch() // Normally, the first scratch block is a fall-through block. However, if the block after it was an empty // BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block // a BBJ_ALWAYS block. - assert(fgFirstBBScratch->KindIs(BBJ_NONE, BBJ_ALWAYS)); + assert(fgFirstBBScratch->KindIs(BBJ_ALWAYS)); return true; } @@ -2932,7 +2933,12 @@ void Compiler::fgLinkBasicBlocks() case BBJ_ALWAYS: case BBJ_LEAVE: { - BasicBlock* const jumpDest = fgLookupBB(curBBdesc->GetJumpOffs()); + // Avoid fgLookupBB overhead for blocks that jump to next block + // (curBBdesc cannot be the last block if it jumps to the next block) + const bool jumpsToNext = (curBBdesc->GetJumpOffs() == curBBdesc->bbCodeOffsEnd); + assert(!(curBBdesc->IsLast() && jumpsToNext)); + BasicBlock* const jumpDest = jumpsToNext ? curBBdesc->Next() : fgLookupBB(curBBdesc->GetJumpOffs()); + // Redundantly use SetJumpKindAndTarget() instead of SetJumpDest() just this once, // so we don't break the HasInitializedJumpDest() invariant of SetJumpDest(). curBBdesc->SetJumpKindAndTarget(curBBdesc->GetJumpKind(), jumpDest); @@ -2943,7 +2949,7 @@ void Compiler::fgLinkBasicBlocks() fgMarkBackwardJump(curBBdesc->GetJumpDest(), curBBdesc); } - // Is the next block reachable? + // Can this block fall through into the next block? // if (curBBdesc->KindIs(BBJ_ALWAYS, BBJ_LEAVE)) { @@ -2954,14 +2960,12 @@ void Compiler::fgLinkBasicBlocks() { BADCODE("Fall thru the end of a method"); } - } - - // Fall through, the next block is also reachable - FALLTHROUGH; - case BBJ_NONE: + // The fall-through block is also reachable + assert(curBBdesc->KindIs(BBJ_COND)); fgAddRefPred(curBBdesc->Next(), curBBdesc, oldEdge); break; + } case BBJ_EHFILTERRET: // We can't set up the pred list for these just yet. @@ -3072,7 +3076,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F unsigned nxtBBoffs; OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); - BBjumpKinds jmpKind = BBJ_NONE; + BBjumpKinds jmpKind = BBJ_COUNT; DECODE_OPCODE: @@ -3443,7 +3447,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F /* Do we have a jump? */ - if (jmpKind == BBJ_NONE) + if (jmpKind == BBJ_COUNT) { /* No jump; make sure we don't fall off the end of the function */ @@ -3471,8 +3475,15 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F { continue; } + + // Jump to the next block + jmpKind = BBJ_ALWAYS; + jmpAddr = nxtBBoffs; + bbFlags |= BBF_NONE_QUIRK; } + assert(jmpKind != BBJ_COUNT); + /* We need to create a new basic block */ switch (jmpKind) @@ -4181,8 +4192,8 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->KindIs(BBJ_NONE)); - fgRemoveRefPred(fgFirstBB->Next(), fgFirstBB); + assert(fgFirstBB->KindIs(BBJ_ALWAYS) && fgFirstBB->JumpsToNext()); + fgRemoveRefPred(fgFirstBB->GetJumpDest(), fgFirstBB); fgFirstBB->SetJumpKindAndTarget(BBJ_ALWAYS, fgOSREntryBB); FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4224,12 +4235,6 @@ void Compiler::fgCheckBasicBlockControlFlow() switch (blk->GetJumpKind()) { - case BBJ_NONE: // block flows into the next one (no jump) - - fgControlFlowPermitted(blk, blk->Next()); - - break; - case BBJ_ALWAYS: // block does unconditional jump to target fgControlFlowPermitted(blk, blk->GetJumpDest()); @@ -4786,8 +4791,10 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->SetJumpKindAndTarget(BBJ_NONE); + curr->bbFlags |= BBF_NONE_QUIRK; + curr->SetJumpKindAndTarget(BBJ_ALWAYS, newBlock); fgAddRefPred(newBlock, curr); + assert(curr->JumpsToNext()); return newBlock; } @@ -4877,6 +4884,10 @@ BasicBlock* Compiler::fgSplitBlockBeforeTree( block->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT | BBF_LOOP_PREHEADER | BBF_RETLESS_CALL); + // prevBb should flow into block + assert(prevBb->KindIs(BBJ_ALWAYS) && prevBb->JumpsToNext() && prevBb->NextIs(block)); + prevBb->bbFlags |= BBF_NONE_QUIRK; + if (optLoopTableValid && prevBb->bbNatLoopNum != BasicBlock::NOT_IN_LOOP) { block->bbNatLoopNum = prevBb->bbNatLoopNum; @@ -5019,7 +5030,9 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. - newBlock = fgNewBBafter(BBJ_NONE, curr, true /*extendRegion*/); + newBlock = fgNewBBafter(BBJ_ALWAYS, curr, true /* extendRegion */, /* jumpDest */ succ); + newBlock->bbFlags |= BBF_NONE_QUIRK; + assert(newBlock->JumpsToNext()); } else { @@ -5215,16 +5228,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; } - else if (bPrev->KindIs(BBJ_ALWAYS) && block->NextIs(bPrev->GetJumpDest()) && - !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && !block->IsFirstColdBlock(this) && - !block->IsLastHotBlock(this)) - { - // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. - // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), - // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by - // BBJ_ALWAYS blocks. - bPrev->SetJumpKindAndTarget(BBJ_NONE); - } // If this is the first Cold basic block update fgFirstColdBlock if (block->IsFirstColdBlock(this)) @@ -5281,44 +5284,15 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) { printf("Removing empty " FMT_BB "\n", block->bbNum); } -#endif // DEBUG -#ifdef DEBUG /* Some extra checks for the empty case */ + noway_assert(block->KindIs(BBJ_ALWAYS)); - switch (block->GetJumpKind()) - { - case BBJ_NONE: - break; - - case BBJ_ALWAYS: - /* Do not remove a block that jumps to itself - used for while (true){} */ - noway_assert(!block->HasJumpTo(block)); - - /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->KindIs(BBJ_NONE)); - break; - - default: - noway_assert(!"Empty block of this type cannot be removed!"); - break; - } + /* Do not remove a block that jumps to itself - used for while (true){} */ + noway_assert(!block->HasJumpTo(block)); #endif // DEBUG - noway_assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); - - /* Who is the "real" successor of this block? */ - - BasicBlock* succBlock; - - if (block->KindIs(BBJ_ALWAYS)) - { - succBlock = block->GetJumpDest(); - } - else - { - succBlock = block->Next(); - } + BasicBlock* succBlock = block->GetJumpDest(); bool skipUnmarkLoop = false; @@ -5362,10 +5336,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block == fgFirstBB); - /* Must be a fall through to next block */ - - noway_assert(block->KindIs(BBJ_NONE)); - /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; succBlock->bbRefs++; @@ -5409,18 +5379,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); break; - case BBJ_NONE: - noway_assert(predBlock == bPrev); - PREFIX_ASSUME(bPrev != nullptr); - - /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->KindIs(BBJ_ALWAYS)) - { - /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->SetJumpKindAndTarget(BBJ_ALWAYS, succBlock); - } - break; - case BBJ_COND: /* The links for the direct predecessor case have already been updated above */ if (!predBlock->HasJumpTo(block)) @@ -5474,24 +5432,6 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL); break; - case BBJ_ALWAYS: - // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not - // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph - // because we can be called by ComputeDominators and it expects it to remove this jump to - // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS - // pairing. - - if (bPrev->JumpsToNext() && - !fgInDifferentRegions(bPrev, bPrev->GetJumpDest())) // We don't remove a branch from Hot -> Cold - { - if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) - { - // It's safe to change the jump type - bPrev->SetJumpKindAndTarget(BBJ_NONE); - } - } - break; - case BBJ_COND: /* Check for branch to next block */ if (bPrev->JumpsToNext()) @@ -5534,12 +5474,6 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { switch (bSrc->GetJumpKind()) { - case BBJ_NONE: - bSrc->SetJumpKindAndTarget(BBJ_ALWAYS, bDst); - JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", - bSrc->bbNum, bSrc->GetJumpDest()->bbNum); - break; - case BBJ_CALLFINALLY: case BBJ_COND: @@ -5604,21 +5538,10 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) break; } } - else + else if (bSrc->KindIs(BBJ_ALWAYS) && ((bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) == 0) && + bSrc->HasInitializedJumpDest() && bSrc->JumpsToNext()) { - // If bSrc is an unconditional branch to the next block - // then change it to a BBJ_NONE block - // (It's possible for bSrc's jump destination to not be set yet, - // so check with BasicBlock::HasJump before calling BasicBlock::JumpsToNext) - // - if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && bSrc->HasInitializedJumpDest() && - bSrc->JumpsToNext()) - { - bSrc->SetJumpKindAndTarget(BBJ_NONE); - JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB - " into a BBJ_NONE block\n", - bSrc->bbNum, bSrc->Next()->bbNum); - } + bSrc->bbFlags |= BBF_NONE_QUIRK; } } @@ -6391,7 +6314,7 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) { // bCur can't be NULL and must be a fall through bbJumpKind noway_assert(bCur != nullptr); - noway_assert(bCur->bbFallsThrough()); + noway_assert(bCur->bbFallsThrough() || (bCur->KindIs(BBJ_ALWAYS) && bCur->JumpsToNext())); noway_assert(bAlt != nullptr); // We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND @@ -6406,6 +6329,12 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) return false; } + // BBJ_CALLFINALLY shouldn't have a flow edge into the next (BBJ_ALWAYS) block + if (bCur->isBBCallAlwaysPair()) + { + return false; + } + // Currently bNext is the fall through for bCur BasicBlock* bNext = bCur->Next(); noway_assert(bNext != nullptr); @@ -6650,13 +6579,15 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, // Look for an insert location: // 1. We want blocks that don't end with a fall through, // 2. Also, when blk equals nearBlk we may want to insert here. - if (!blk->bbFallsThrough() || (blk == nearBlk)) + const bool blkJumpsToNext = + blk->KindIs(BBJ_ALWAYS) && ((blk->bbFlags & BBF_NONE_QUIRK) != 0) && blk->JumpsToNext(); + if ((!blk->bbFallsThrough() && !blkJumpsToNext) || (blk == nearBlk)) { bool updateBestBlk = true; // We will probably update the bestBlk // If blk falls through then we must decide whether to use the nearBlk // hint - if (blk->bbFallsThrough()) + if (blk->bbFallsThrough() || blkJumpsToNext) { noway_assert(blk == nearBlk); if (jumpBlk != nullptr) diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index 5465318ef4d3b..2cf8488f4f6f1 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -77,7 +77,7 @@ void Compiler::fgDebugCheckUpdate() * no empty blocks -> !block->isEmpty(), unless non-removable or multiple in-edges * no un-imported blocks -> no blocks have BBF_IMPORTED not set (this is * kind of redundand with the above, but to make sure) - * no un-compacted blocks -> BBJ_NONE followed by block with no jumps to it (countOfInEdges() = 1) + * no un-compacted blocks -> BBJ_ALWAYS with jump to block with no other jumps to it (countOfInEdges() = 1) */ BasicBlock* prev; @@ -140,33 +140,9 @@ void Compiler::fgDebugCheckUpdate() if (block->KindIs(BBJ_COND)) { // A conditional branch should never jump to the next block - // as it can be folded into a BBJ_NONE; + // as it can be folded into a BBJ_ALWAYS; doAssertOnJumpToNextBlock = true; } - else if (block->KindIs(BBJ_ALWAYS)) - { - // Generally we will want to assert if a BBJ_ALWAYS branches to the next block - doAssertOnJumpToNextBlock = true; - - // If the BBF_KEEP_BBJ_ALWAYS flag is set we allow it to jump to the next block - if (block->bbFlags & BBF_KEEP_BBJ_ALWAYS) - { - doAssertOnJumpToNextBlock = false; - } - - // A call/always pair is also allowed to jump to the next block - if (prevIsCallAlwaysPair) - { - doAssertOnJumpToNextBlock = false; - } - - // We are allowed to have a branch from a hot 'block' to a cold 'bbNext' - // - if (!block->IsLast() && fgInDifferentRegions(block, block->Next())) - { - doAssertOnJumpToNextBlock = false; - } - } if (doAssertOnJumpToNextBlock) { @@ -2777,12 +2753,8 @@ bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) assert(blockPred->NextIs(block) || blockPred->HasJumpTo(block)); return true; - case BBJ_NONE: - assert(blockPred->NextIs(block)); - return true; - - case BBJ_CALLFINALLY: case BBJ_ALWAYS: + case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: assert(blockPred->HasJumpTo(block)); @@ -5018,19 +4990,10 @@ void Compiler::fgDebugCheckLoopTable() BasicBlock* h = loop.lpHead; assert(h->bbFlags & BBF_LOOP_PREHEADER); - // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. + // The pre-header can only be BBJ_ALWAYS and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->KindIs(BBJ_ALWAYS)) - { - assert(h->HasJumpTo(e)); - } - else - { - assert(h->KindIs(BBJ_NONE)); - assert(h->NextIs(e)); - assert(loop.lpTop == e); - assert(loop.lpIsTopEntry()); - } + assert(h->KindIs(BBJ_ALWAYS)); + assert(h->HasJumpTo(e)); // The entry block has a single non-loop predecessor, and it is the pre-header. for (BasicBlock* const predBlock : e->PredBlocks()) diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index f6638f4d4728f..684c26e6f588d 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -808,11 +808,7 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) - { - jumpDest = block->Next(); - } - else if (block->KindIs(BBJ_ALWAYS)) + if (block->KindIs(BBJ_ALWAYS)) { jumpDest = block->GetJumpDest(); } @@ -1031,7 +1027,7 @@ PhaseStatus Compiler::fgCloneFinally() // callfinallys, depending on the EH implementation. const unsigned hndIndex = 0; BasicBlock* const nearBlk = cloneInsertAfter; - newBlock = fgNewBBinRegion(BBJ_NONE, finallyTryIndex, hndIndex, nearBlk); + newBlock = fgNewBBinRegion(BBJ_ALWAYS, finallyTryIndex, hndIndex, nearBlk); // If the clone ends up just after the finally, adjust // the stopping point for finally traversal. @@ -1045,7 +1041,7 @@ PhaseStatus Compiler::fgCloneFinally() { // Put subsequent blocks in the same region... const bool extendRegion = true; - newBlock = fgNewBBafter(BBJ_NONE, insertAfter, extendRegion); + newBlock = fgNewBBafter(BBJ_ALWAYS, insertAfter, extendRegion); } cloneBBCount++; @@ -1081,7 +1077,8 @@ PhaseStatus Compiler::fgCloneFinally() newBlock->clearHndIndex(); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. - assert(newBlock->KindIs(BBJ_NONE)); + assert(newBlock->KindIs(BBJ_ALWAYS)); + assert(!newBlock->HasInitializedJumpDest()); } if (!clonedOk) @@ -1104,7 +1101,8 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; // Jump kind/target should not be set yet - assert(newBlock->KindIs(BBJ_NONE)); + assert(newBlock->KindIs(BBJ_ALWAYS)); + assert(!newBlock->HasInitializedJumpDest()); if (block->KindIs(BBJ_EHFINALLYRET)) { @@ -1435,19 +1433,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->KindIs(BBJ_NONE)) - { - if (succBlock->isEmpty()) - { - BasicBlock* const succSuccBlock = succBlock->Next(); - - // case (d) - if (succSuccBlock->bbFlags & BBF_CLONED_FINALLY_BEGIN) - { - isJumpToClonedFinally = true; - } - } - } + // ~~case (d) via a fallthrough to an empty block to (b)~~ [no longer possible] bool isReturnFromFinally = false; @@ -2061,13 +2047,6 @@ PhaseStatus Compiler::fgTailMergeThrows() switch (predBlock->GetJumpKind()) { - case BBJ_NONE: - { - fgTailMergeThrowsFallThroughHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); - updated = true; - } - break; - case BBJ_ALWAYS: { fgTailMergeThrowsJumpToHelper(predBlock, nonCanonicalBlock, canonicalBlock, predEdge); diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 12cb0e87dc709..fa4bc29feea9f 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -355,10 +355,6 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) fgRemoveRefPred(block->GetJumpDest(), block); break; - case BBJ_NONE: - fgRemoveRefPred(block->Next(), block); - break; - case BBJ_COND: fgRemoveRefPred(block->GetJumpDest(), block); fgRemoveRefPred(block->Next(), block); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index a27bf4a17eabe..8910902b17361 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -673,15 +673,16 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorgtBashToNOP(); m_madeChanges = true; - if (!condTree->IsIntegralConst(0)) + if (condTree->IsIntegralConst(0)) { - block->SetJumpKind(BBJ_ALWAYS); - m_compiler->fgRemoveRefPred(block->Next(), block); + m_compiler->fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKindAndTarget(BBJ_ALWAYS, block->Next()); + block->bbFlags |= BBF_NONE_QUIRK; } else { - m_compiler->fgRemoveRefPred(block->GetJumpDest(), block); - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKind(BBJ_ALWAYS); + m_compiler->fgRemoveRefPred(block->Next(), block); } } } @@ -1529,19 +1530,16 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); - if (block->IsLast()) - { - JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->SetJumpKindAndTarget(BBJ_NONE); - } - else - { - JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", - block->bbNum, bottomBlock->bbNum); - block->SetJumpKindAndTarget(BBJ_ALWAYS, bottomBlock); - } + JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, + bottomBlock->bbNum); + block->SetJumpKindAndTarget(BBJ_ALWAYS, bottomBlock); fgAddRefPred(bottomBlock, block); + + if (block == InlineeCompiler->fgLastBB) + { + block->bbFlags |= BBF_NONE_QUIRK; + } } } @@ -1550,7 +1548,11 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) InlineeCompiler->fgFirstBB->bbRefs--; // Insert inlinee's blocks into inliner's block list. + assert(topBlock->KindIs(BBJ_ALWAYS)); + assert(topBlock->HasJumpTo(bottomBlock)); topBlock->SetNext(InlineeCompiler->fgFirstBB); + topBlock->SetJumpDest(topBlock->Next()); + topBlock->bbFlags |= BBF_NONE_QUIRK; fgRemoveRefPred(bottomBlock, topBlock); fgAddRefPred(InlineeCompiler->fgFirstBB, topBlock); InlineeCompiler->fgLastBB->SetNext(bottomBlock); diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index b5757ce3f291f..676456ab54b7b 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -132,19 +132,16 @@ bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) if (b1->bbNum > fgDomBBcount) { - noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND)); + noway_assert(b1->KindIs(BBJ_ALWAYS, BBJ_COND)); - if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->Next(), b2)) + if (b1->KindIs(BBJ_COND)) { - return true; + return fgReachable(b1->Next(), b2) || fgReachable(b1->GetJumpDest(), b2); } - - if (b1->KindIs(BBJ_ALWAYS, BBJ_COND) && fgReachable(b1->GetJumpDest(), b2)) + else { - return true; + return fgReachable(b1->GetJumpDest(), b2); } - - return false; } /* Check if b1 can reach b2 */ @@ -473,7 +470,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // change the kind to something else. Otherwise, we can hit asserts below in fgRemoveBlock that // the leaveBlk BBJ_ALWAYS is not allowed to be a CallAlwaysPairTail. assert(block->KindIs(BBJ_CALLFINALLY)); - block->SetJumpKind(BBJ_NONE); + block->SetJumpKindAndTarget(BBJ_ALWAYS, block->Next()); } leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; @@ -1605,8 +1602,8 @@ PhaseStatus Compiler::fgPostImportationCleanup() // What follows is similar to fgNewBBInRegion, but we can't call that // here as the oldTryEntry is no longer in the main bb list. - newTryEntry = BasicBlock::New(this, BBJ_NONE); - newTryEntry->bbFlags |= (BBF_IMPORTED | BBF_INTERNAL); + newTryEntry = BasicBlock::New(this, BBJ_ALWAYS, tryEntryPrev->Next()); + newTryEntry->bbFlags |= (BBF_IMPORTED | BBF_INTERNAL | BBF_NONE_QUIRK); newTryEntry->bbRefs = 0; // Set the right EH region indices on this new block. @@ -1890,9 +1887,9 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) return false; } - noway_assert(block->NextIs(bNext)); + assert(block->NextIs(bNext)); - if (!block->KindIs(BBJ_NONE)) + if (!block->KindIs(BBJ_ALWAYS) || !block->HasJumpTo(bNext) || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)) { return false; } @@ -1996,11 +1993,15 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(bNext != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->KindIs(BBJ_NONE)); noway_assert(block->NextIs(bNext)); noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); noway_assert(bNext->bbPreds != nullptr); + assert(block->KindIs(BBJ_ALWAYS)); + assert(block->HasJumpTo(bNext)); + assert(!block->isBBCallAlwaysPairTail()); + assert(!fgInDifferentRegions(block, bNext)); + // Make sure the second block is not the start of a TRY block or an exception handler noway_assert(!bbIsTryBeg(bNext)); @@ -2299,8 +2300,13 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) FALLTHROUGH; - case BBJ_COND: case BBJ_ALWAYS: + // Propagate BBF_NONE_QUIRK flag + block->bbFlags |= (bNext->bbFlags & BBF_NONE_QUIRK); + + FALLTHROUGH; + + case BBJ_COND: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: block->SetJumpKindAndTarget(bNext->GetJumpKind(), bNext->GetJumpDest()); @@ -2315,12 +2321,6 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) } break; - case BBJ_NONE: - block->SetJumpKindAndTarget(BBJ_NONE); - /* Update the predecessor list for 'bNext->bbNext' */ - fgReplacePred(bNext->Next(), bNext, block); - break; - case BBJ_EHFINALLYRET: block->SetJumpKindAndTarget(bNext->GetJumpKind(), bNext->GetJumpEhf()); fgChangeEhfBlock(bNext, block); @@ -2571,15 +2571,16 @@ void Compiler::fgRemoveConditionalJump(BasicBlock* block) FlowEdge* flow = fgGetPredForBlock(block->Next(), block); noway_assert(flow->getDupCount() == 2); - // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->SetJumpKindAndTarget(BBJ_NONE); + // Change the BBJ_COND to BBJ_ALWAYS, and adjust the refCount and dupCount. + block->SetJumpKind(BBJ_ALWAYS); + block->bbFlags |= BBF_NONE_QUIRK; --block->Next()->bbRefs; flow->decrementDupCount(); #ifdef DEBUG if (verbose) { - printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" + printf("Block " FMT_BB " becoming a BBJ_ALWAYS to " FMT_BB " (jump target is the same whether the condition" " is true or false)\n", block->bbNum, block->Next()->bbNum); } @@ -2836,26 +2837,34 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) case BBJ_ALWAYS: - // A GOTO cannot be to the next block since that - // should have been fixed by the optimization above - // An exception is made for a jump from Hot to Cold - noway_assert(!block->JumpsToNext() || block->isBBCallAlwaysPairTail() || - fgInDifferentRegions(block, block->Next())); - - /* Cannot remove the first BB */ + /* Special case for first BB */ if (!bPrev) { - break; + assert(block == fgFirstBB); + if (!block->JumpsToNext()) + { + break; + } } - - /* Do not remove a block that jumps to itself - used for while (true){} */ - if (block->HasJumpTo(block)) + else { - break; + /* If this block follows a BBJ_CALLFINALLY do not remove it + * (because we don't know who may jump to it) */ + if (bPrev->KindIs(BBJ_CALLFINALLY)) + { + break; + } + + // TODO: Once BBJ_COND blocks have pointers to their false branches, + // allow removing empty BBJ_ALWAYS and pointing bPrev's false branch to block->bbJumpDest. + if (bPrev->bbFallsThrough() && !block->JumpsToNext()) + { + break; + } } - /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (!bPrev->KindIs(BBJ_NONE)) + /* Do not remove a block that jumps to itself - used for while (true){} */ + if (block->HasJumpTo(block)) { break; } @@ -2866,26 +2875,10 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) break; } - /* Can fall through since this is similar with removing - * a BBJ_NONE block, only the successor is different */ - - FALLTHROUGH; - - case BBJ_NONE: - - /* special case if this is the first BB */ - if (!bPrev) - { - assert(block == fgFirstBB); - } - else + // Don't remove fgEntryBB + if (block == fgEntryBB) { - /* If this block follows a BBJ_CALLFINALLY do not remove it - * (because we don't know who may jump to it) */ - if (bPrev->KindIs(BBJ_CALLFINALLY)) - { - break; - } + break; } #if defined(FEATURE_EH_FUNCLETS) @@ -2897,16 +2890,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) * to ensure we generate code for the block, if we keep it. */ { - BasicBlock* succBlock; - - if (block->KindIs(BBJ_ALWAYS)) - { - succBlock = block->GetJumpDest(); - } - else - { - succBlock = block->Next(); - } + BasicBlock* succBlock = block->GetJumpDest(); if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) { @@ -3739,160 +3723,124 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev) { - assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); + assert(block->KindIs(BBJ_COND)); assert(block->HasJumpTo(bNext)); assert(block->NextIs(bNext)); assert(block->PrevIs(bPrev)); - if (block->KindIs(BBJ_ALWAYS)) - { - // We can't remove it if it is a branch from hot => cold - if (!fgInDifferentRegions(block, bNext)) - { - // We can't remove if it is marked as BBF_KEEP_BBJ_ALWAYS - if (!(block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) - { - // We can't remove if the BBJ_ALWAYS is part of a BBJ_CALLFINALLY pair - if (!block->isBBCallAlwaysPairTail()) - { - /* the unconditional jump is to the next BB */ - block->SetJumpKindAndTarget(BBJ_NONE); #ifdef DEBUG - if (verbose) - { - printf("\nRemoving unconditional jump to next block (" FMT_BB " -> " FMT_BB - ") (converted " FMT_BB " to " - "fall-through)\n", - block->bbNum, bNext->bbNum, block->bbNum); - } -#endif // DEBUG - return true; - } - } - } + if (verbose) + { + printf("\nRemoving conditional jump to next block (" FMT_BB " -> " FMT_BB ")\n", block->bbNum, bNext->bbNum); } - else +#endif // DEBUG + + if (block->IsLIR()) { - /* remove the conditional statement at the end of block */ - noway_assert(block->KindIs(BBJ_COND)); - noway_assert(block->isValid()); + LIR::Range& blockRange = LIR::AsRange(block); + GenTree* jmp = blockRange.LastNode(); + assert(jmp->OperIsConditionalJump()); -#ifdef DEBUG - if (verbose) + bool isClosed; + unsigned sideEffects; + LIR::ReadOnlyRange jmpRange; + + if (jmp->OperIs(GT_JCC)) { - printf("\nRemoving conditional jump to next block (" FMT_BB " -> " FMT_BB ")\n", block->bbNum, - bNext->bbNum); + // For JCC we have an invariant until resolution that the + // previous node sets those CPU flags. + GenTree* prevNode = jmp->gtPrev; + assert((prevNode != nullptr) && ((prevNode->gtFlags & GTF_SET_FLAGS) != 0)); + prevNode->gtFlags &= ~GTF_SET_FLAGS; + jmpRange = blockRange.GetTreeRange(prevNode, &isClosed, &sideEffects); + jmpRange = LIR::ReadOnlyRange(jmpRange.FirstNode(), jmp); + } + else + { + jmpRange = blockRange.GetTreeRange(jmp, &isClosed, &sideEffects); } -#endif // DEBUG - if (block->IsLIR()) + if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) { - LIR::Range& blockRange = LIR::AsRange(block); - GenTree* jmp = blockRange.LastNode(); - assert(jmp->OperIsConditionalJump()); + // If the jump and its operands form a contiguous, side-effect-free range, + // remove them. + blockRange.Delete(this, block, std::move(jmpRange)); + } + else + { + // Otherwise, just remove the jump node itself. + blockRange.Remove(jmp, true); + } + } + else + { + Statement* condStmt = block->lastStmt(); + GenTree* cond = condStmt->GetRootNode(); + noway_assert(cond->gtOper == GT_JTRUE); - bool isClosed; - unsigned sideEffects; - LIR::ReadOnlyRange jmpRange; + /* check for SIDE_EFFECTS */ + if (cond->gtFlags & GTF_SIDE_EFFECT) + { + /* Extract the side effects from the conditional */ + GenTree* sideEffList = nullptr; - if (jmp->OperIs(GT_JCC)) - { - // For JCC we have an invariant until resolution that the - // previous node sets those CPU flags. - GenTree* prevNode = jmp->gtPrev; - assert((prevNode != nullptr) && ((prevNode->gtFlags & GTF_SET_FLAGS) != 0)); - prevNode->gtFlags &= ~GTF_SET_FLAGS; - jmpRange = blockRange.GetTreeRange(prevNode, &isClosed, &sideEffects); - jmpRange = LIR::ReadOnlyRange(jmpRange.FirstNode(), jmp); - } - else - { - jmpRange = blockRange.GetTreeRange(jmp, &isClosed, &sideEffects); - } + gtExtractSideEffList(cond, &sideEffList); - if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) + if (sideEffList == nullptr) { - // If the jump and its operands form a contiguous, side-effect-free range, - // remove them. - blockRange.Delete(this, block, std::move(jmpRange)); + compCurBB = block; + fgRemoveStmt(block, condStmt); } else { - // Otherwise, just remove the jump node itself. - blockRange.Remove(jmp, true); - } - } - else - { - Statement* condStmt = block->lastStmt(); - GenTree* cond = condStmt->GetRootNode(); - noway_assert(cond->gtOper == GT_JTRUE); - - /* check for SIDE_EFFECTS */ - if (cond->gtFlags & GTF_SIDE_EFFECT) - { - /* Extract the side effects from the conditional */ - GenTree* sideEffList = nullptr; - - gtExtractSideEffList(cond, &sideEffList); - - if (sideEffList == nullptr) + noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); +#ifdef DEBUG + if (verbose) { - compCurBB = block; - fgRemoveStmt(block, condStmt); + printf("\nConditional has side effects! Extracting side effects...\n"); + gtDispTree(cond); + printf("\n"); + gtDispTree(sideEffList); + printf("\n"); } - else - { - noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); -#ifdef DEBUG - if (verbose) - { - printf("\nConditional has side effects! Extracting side effects...\n"); - gtDispTree(cond); - printf("\n"); - gtDispTree(sideEffList); - printf("\n"); - } #endif // DEBUG - /* Replace the conditional statement with the list of side effects */ - noway_assert(sideEffList->gtOper != GT_JTRUE); + /* Replace the conditional statement with the list of side effects */ + noway_assert(sideEffList->gtOper != GT_JTRUE); - condStmt->SetRootNode(sideEffList); + condStmt->SetRootNode(sideEffList); - if (fgNodeThreading == NodeThreading::AllTrees) - { - compCurBB = block; + if (fgNodeThreading == NodeThreading::AllTrees) + { + compCurBB = block; - /* Update ordering, costs, FP levels, etc. */ - gtSetStmtInfo(condStmt); + /* Update ordering, costs, FP levels, etc. */ + gtSetStmtInfo(condStmt); - /* Re-link the nodes for this statement */ - fgSetStmtSeq(condStmt); - } + /* Re-link the nodes for this statement */ + fgSetStmtSeq(condStmt); } } - else - { - compCurBB = block; - /* conditional has NO side effect - remove it */ - fgRemoveStmt(block, condStmt); - } } + else + { + compCurBB = block; + /* conditional has NO side effect - remove it */ + fgRemoveStmt(block, condStmt); + } + } - /* Conditional is gone - simply fall into the next block */ + /* Conditional is gone - always jump to the next block */ - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKind(BBJ_ALWAYS); - /* Update bbRefs and bbNum - Conditional predecessors to the same - * block are counted twice so we have to remove one of them */ + /* Update bbRefs and bbNum - Conditional predecessors to the same + * block are counted twice so we have to remove one of them */ - noway_assert(bNext->countOfInEdges() > 1); - fgRemoveRefPred(bNext, block); + noway_assert(bNext->countOfInEdges() > 1); + fgRemoveRefPred(bNext, block); - return true; - } - return false; + return true; } //------------------------------------------------------------- @@ -3922,13 +3870,18 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } + // We might be able to compact blocks that always jump to the next block. + if (bJump->JumpsToNext()) + { + return false; + } + if (bJump->bbFlags & BBF_KEEP_BBJ_ALWAYS) { return false; } - // Don't hoist a conditional branch into the scratch block; we'd prefer it stay - // either BBJ_NONE or BBJ_ALWAYS. + // Don't hoist a conditional branch into the scratch block; we'd prefer it stay BBJ_ALWAYS. if (fgBBisScratch(bJump)) { return false; @@ -4541,14 +4494,6 @@ bool Compiler::fgExpandRarelyRunBlocks() } break; - case BBJ_NONE: - - if (block->isRunRarely()) - { - reason = "Falling into a rarely run block"; - } - break; - case BBJ_COND: if (block->isRunRarely() && bPrev->GetJumpDest()->isRunRarely()) @@ -4655,15 +4600,12 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->KindIs(BBJ_NONE)) + if (fgCanCompactBlocks(bPrev, block)) { - if (fgCanCompactBlocks(bPrev, block)) - { - fgCompactBlocks(bPrev, block); + fgCompactBlocks(bPrev, block); - block = bPrev; - continue; - } + block = bPrev; + continue; } // // if bPrev->bbWeight is not based upon profile data we can adjust @@ -4849,11 +4791,17 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bPrev->KindIs(BBJ_ALWAYS)) { + if (bPrev->JumpsToNext()) + { + bDest = nullptr; + goto CHECK_FOR_RARE; + } // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block // also the weight of bDest can't be zero. + // Don't reorder if bPrev's jump destination is the next block. // - if ((bDest->bbWeight < block->bbWeight) || (bDest->bbWeight == BB_ZERO_WEIGHT)) + else if ((bDest->bbWeight < block->bbWeight) || (bDest->bbWeight == BB_ZERO_WEIGHT)) { reorderBlock = false; } @@ -5072,7 +5020,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) // candidateBlock and have it fall into bTmp // if ((candidateBlock == nullptr) || !candidateBlock->KindIs(BBJ_COND, BBJ_ALWAYS) || - !candidateBlock->HasJumpTo(bTmp)) + !candidateBlock->HasJumpTo(bTmp) || + (candidateBlock->KindIs(BBJ_ALWAYS) && candidateBlock->JumpsToNext())) { // otherwise we have a new candidateBlock // @@ -5081,7 +5030,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } - if ((bTmp->bbFallsThrough() == false) || (bTmp->bbWeight == BB_ZERO_WEIGHT)) + const bool bTmpJumpsToNext = bTmp->KindIs(BBJ_ALWAYS) && bTmp->JumpsToNext(); + if ((!bTmp->bbFallsThrough() && !bTmpJumpsToNext) || (bTmp->bbWeight == BB_ZERO_WEIGHT)) { lastNonFallThroughBlock = bTmp; } @@ -5172,7 +5122,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // later in the method and hopefully connecting the jump dest block // so that it becomes the fall through block // - // And when bDest in not NULL, we also consider: + // And when bDest is not NULL, we also consider: // // 2. Moving the bDest block (or blocks) up to bPrev // so that it could be used as a fall through block @@ -5348,7 +5298,11 @@ bool Compiler::fgReorderBlocks(bool useProfile) break; } - if (bEnd2->bbFallsThrough() == false) + if (bEnd2->KindIs(BBJ_ALWAYS) && bEnd2->JumpsToNext()) + { + // Treat jumps to next block as fall-through + } + else if (bEnd2->bbFallsThrough() == false) { break; } @@ -5514,8 +5468,19 @@ bool Compiler::fgReorderBlocks(bool useProfile) } /* Temporarily unlink [bStart..bEnd] from the flow graph */ + const bool bStartPrevJumpsToNext = bStartPrev->KindIs(BBJ_ALWAYS) && bStartPrev->JumpsToNext(); fgUnlinkRange(bStart, bEnd); + // If bStartPrev is a BBJ_ALWAYS to some block after bStart, unlinking bStart can move + // bStartPrev's jump destination up, making bStartPrev jump to the next block for now. + // This can lead us to make suboptimal decisions in Compiler::fgFindInsertPoint, + // so make sure the BBF_NONE_QUIRK flag is unset for bStartPrev beforehand. + // TODO: Remove quirk. + if (bStartPrev->KindIs(BBJ_ALWAYS) && (bStartPrevJumpsToNext != bStartPrev->JumpsToNext())) + { + bStartPrev->bbFlags &= ~BBF_NONE_QUIRK; + } + if (insertAfterBlk == nullptr) { // Find new location for the unlinked block(s) @@ -5628,8 +5593,8 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->GetJumpDest()->isRunRarely()) && - fgIsForwardBranch(bEnd, bPrev)) + if (bEnd->KindIs(BBJ_ALWAYS) && !bEnd->JumpsToNext() && + (!isRare || bEnd->GetJumpDest()->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] // such that nearBlk->NextIs(bEnd->JumpDest) @@ -6020,6 +5985,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bDest = block->GetJumpDest(); if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) { + assert(block->KindIs(BBJ_COND)); change = true; modified = true; bDest = block->GetJumpDest(); @@ -6027,23 +5993,20 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->KindIs(BBJ_NONE)) + // Remove jumps to the following block + // and optimize any JUMPS to JUMPS + + if (block->KindIs(BBJ_ALWAYS)) { - bDest = nullptr; - if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->Next())) + bDest = block->GetJumpDest(); + if (bDest == bNext) { - assert(block->KindIs(BBJ_COND)); - change = true; - modified = true; - bDest = block->GetJumpDest(); - bNext = block->Next(); + // Skip jump optimizations, and try to compact block and bNext later + block->bbFlags |= BBF_NONE_QUIRK; + bDest = nullptr; } } - - // Remove JUMPS to the following block - // and optimize any JUMPS to JUMPS - - if (block->KindIs(BBJ_COND, BBJ_ALWAYS)) + else if (block->KindIs(BBJ_COND)) { bDest = block->GetJumpDest(); if (bDest == bNext) @@ -6063,7 +6026,9 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && !bDest->HasJumpTo(bDest)) // special case for self jumps { - if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) + // TODO: Allow optimizing branches to blocks that jump to the next block + const bool optimizeBranch = !bDest->JumpsToNext() || !(bDest->bbFlags & BBF_NONE_QUIRK); + if (optimizeBranch && fgOptimizeBranchToEmptyUnconditional(block, bDest)) { change = true; modified = true; @@ -6083,6 +6048,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) (bNext != nullptr) && // block is not the last block (bNext->bbRefs == 1) && // No other block jumps to bNext bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + !bNext->JumpsToNext() && // and it doesn't jump to the next block (we might compact them) bNext->isEmpty() && // and it is an empty block !bNext->HasJumpTo(bNext) && // special case for self jumps !bDest->IsFirstColdBlock(this) && @@ -6182,7 +6148,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // Add fall through fixup block, if needed. // - if (bDest->KindIs(BBJ_NONE, BBJ_COND)) + if (bDest->KindIs(BBJ_COND)) { BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true, bDestNext); bFixup->inheritWeight(bDestNext); @@ -6453,9 +6419,6 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) switch (block->GetJumpKind()) { - case BBJ_NONE: - costSz = 0; - break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: @@ -6746,7 +6709,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->KindIs(BBJ_NONE)); + bool const isFallThrough = (predBlock->KindIs(BBJ_ALWAYS) && predBlock->JumpsToNext()); // Is this block possibly better than what we have? // diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 17610e38fe05b..5a2579bc1e974 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -497,12 +497,6 @@ void BlockCountInstrumentor::RelocateProbes() } else { - // Ensure this pred is not a fall through. - // - if (pred->KindIs(BBJ_NONE)) - { - pred->SetJumpKindAndTarget(BBJ_ALWAYS, block); - } assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -513,8 +507,9 @@ void BlockCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* const intermediary = m_comp->fgNewBBbefore(BBJ_NONE, block, /* extendRegion*/ true); - intermediary->bbFlags |= BBF_IMPORTED | BBF_MARKED; + BasicBlock* const intermediary = + m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + intermediary->bbFlags |= BBF_IMPORTED | BBF_MARKED | BBF_NONE_QUIRK; intermediary->inheritWeight(block); m_comp->fgAddRefPred(block, intermediary); SetModifiedFlow(); @@ -1231,7 +1226,7 @@ static int32_t EfficientEdgeCountBlockToKey(BasicBlock* block) { static const int IS_INTERNAL_BLOCK = (int32_t)0x80000000; int32_t key = (int32_t)block->bbCodeOffs; - // We may see empty BBJ_NONE BBF_INTERNAL blocks that were added + // We may see empty BBJ_ALWAYS BBF_INTERNAL blocks that were added // by fgNormalizeEH. // // We'll use their bbNum in place of IL offset, and set @@ -1544,14 +1539,6 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() if (found) { - // Importer folding may have changed the block jump kind - // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. - // - if (block->KindIs(BBJ_NONE)) - { - block->SetJumpKindAndTarget(BBJ_ALWAYS, target); - } - instrumentedBlock = m_comp->fgSplitEdge(block, target); instrumentedBlock->bbFlags |= BBF_IMPORTED; edgesSplit++; @@ -1687,13 +1674,10 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // NewRelocatedProbe(pred, probe->source, probe->target, &leader); - // Ensure this pred is not a fall through. + // Ensure this pred always jumps to block // - if (pred->KindIs(BBJ_NONE)) - { - pred->SetJumpKindAndTarget(BBJ_ALWAYS, block); - } assert(pred->KindIs(BBJ_ALWAYS)); + assert(pred->HasJumpTo(block)); } } @@ -1702,8 +1686,9 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // if (criticalPreds.Height() > 0) { - BasicBlock* intermediary = m_comp->fgNewBBbefore(BBJ_NONE, block, /* extendRegion*/ true); - intermediary->bbFlags |= BBF_IMPORTED; + BasicBlock* intermediary = + m_comp->fgNewBBbefore(BBJ_ALWAYS, block, /* extendRegion */ true, /* jumpDest */ block); + intermediary->bbFlags |= BBF_IMPORTED | BBF_NONE_QUIRK; intermediary->inheritWeight(block); m_comp->fgAddRefPred(block, intermediary); NewRelocatedProbe(intermediary, probe->source, probe->target, &leader); @@ -4422,11 +4407,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->KindIs(BBJ_NONE)) - { - bOnlyNext = bSrc->Next(); - } - else if (bSrc->KindIs(BBJ_ALWAYS)) + if (bSrc->KindIs(BBJ_ALWAYS)) { bOnlyNext = bSrc->GetJumpDest(); } @@ -4443,11 +4424,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->KindIs(BBJ_NONE)) - { - bOnlyNext = bDst->Next(); - } - else if (bDst->KindIs(BBJ_ALWAYS)) + if (bDst->KindIs(BBJ_ALWAYS)) { bOnlyNext = bDst->GetJumpDest(); } @@ -4684,7 +4661,6 @@ PhaseStatus Compiler::fgComputeEdgeWeights() { case BBJ_ALWAYS: case BBJ_EHCATCHRET: - case BBJ_NONE: case BBJ_CALLFINALLY: // We know the exact edge weight assignOK &= edge->setEdgeWeightMinChecked(bSrc->bbWeight, bDst, slop, &usedSlop); diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index c9204c1e971e6..b7b13e887317e 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -142,7 +142,6 @@ void ProfileSynthesis::AssignLikelihoods() // (todo: finally ret may have succs) break; - case BBJ_NONE: case BBJ_CALLFINALLY: // Single successor next cases // @@ -509,7 +508,6 @@ void ProfileSynthesis::RepairLikelihoods() // Nothing to do. break; - case BBJ_NONE: case BBJ_CALLFINALLY: // Single successor next cases. // Just assign 1.0 @@ -601,7 +599,6 @@ void ProfileSynthesis::BlendLikelihoods() // Nothing to do. break; - case BBJ_NONE: case BBJ_CALLFINALLY: // Single successor next cases. // Just assign 1.0 diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index a237cfea0ff44..bd06afeab9882 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -108,7 +108,7 @@ PhaseStatus Compiler::fgInsertGCPolls() // If we're doing GCPOLL_CALL, just insert a GT_CALL node before the last node in the block. - assert(block->KindIs(BBJ_RETURN, BBJ_ALWAYS, BBJ_COND, BBJ_SWITCH, BBJ_NONE, BBJ_THROW, BBJ_CALLFINALLY)); + assert(block->KindIs(BBJ_RETURN, BBJ_ALWAYS, BBJ_COND, BBJ_SWITCH, BBJ_THROW, BBJ_CALLFINALLY)); GCPollType pollType = GCPOLL_INLINE; @@ -206,9 +206,9 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // newStmt = fgNewStmtAtBeg(block, call); } - else if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_NONE)) + else if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY)) { - // For BBJ_ALWAYS, BBJ_CALLFINALLY, and BBJ_NONE we don't need to insert it before the condition. + // For BBJ_ALWAYS and BBJ_CALLFINALLY, we don't need to insert it before the condition. // Just append it. newStmt = fgNewStmtAtEnd(block, call); } @@ -277,10 +277,12 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) lpIndexFallThrough = topFallThrough->bbNatLoopNum; } - BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); + BasicBlock* poll = fgNewBBafter(BBJ_ALWAYS, top, true); bottom = fgNewBBafter(top->GetJumpKind(), poll, true, top->GetJumpDest()); BBjumpKinds oldJumpKind = top->GetJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; + poll->SetJumpDest(bottom); + assert(poll->JumpsToNext()); // Update block flags const BasicBlockFlags originalFlags = top->bbFlags | BBF_GC_SAFE_POINT; @@ -294,7 +296,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) bottom->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT | BBF_LOOP_PREHEADER | BBF_RETLESS_CALL); bottom->inheritWeight(top); - poll->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT); + poll->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT | BBF_NONE_QUIRK); // Mark Poll as rarely run. poll->bbSetRunRarely(); @@ -396,9 +398,6 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) // (1 for unconditional branches, 2 for conditional branches, N for switches). switch (oldJumpKind) { - case BBJ_NONE: - fgReplacePred(bottom->Next(), top, bottom); - break; case BBJ_RETURN: case BBJ_THROW: // no successors @@ -2985,14 +2984,15 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) /* Allocate a new basic block */ - BasicBlock* newHead = BasicBlock::New(this, BBJ_NONE); - newHead->bbFlags |= BBF_INTERNAL; + BasicBlock* newHead = BasicBlock::New(this, BBJ_ALWAYS, block); + newHead->bbFlags |= (BBF_INTERNAL | BBF_NONE_QUIRK); newHead->inheritWeight(block); newHead->bbRefs = 0; fgInsertBBbefore(block, newHead); // insert the new block in the block list - fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH - // block. + assert(newHead->JumpsToNext()); + fgExtendEHRegionBefore(block); // Update the EH table to make the prolog block the first block in the block's EH + // block. // Distribute the pred list between newHead and block. Incoming edges coming from outside // the handler go to the prolog. Edges coming from with the handler are back-edges, and @@ -3422,13 +3422,6 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() fgAddRefPred(transitionBlock, prevToFirstColdBlock); } break; - - case BBJ_NONE: - // If the block preceding the first cold block is BBJ_NONE, - // convert it to BBJ_ALWAYS to force an explicit jump. - - prevToFirstColdBlock->SetJumpKindAndTarget(BBJ_ALWAYS, firstColdBlock); - break; } } } @@ -3601,13 +3594,13 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() assert(!fgRngChkThrowAdded); const static BBjumpKinds jumpKinds[] = { - BBJ_NONE, // SCK_NONE - BBJ_THROW, // SCK_RNGCHK_FAIL - BBJ_THROW, // SCK_DIV_BY_ZERO - BBJ_THROW, // SCK_ARITH_EXCP, SCK_OVERFLOW - BBJ_THROW, // SCK_ARG_EXCPN - BBJ_THROW, // SCK_ARG_RNG_EXCPN - BBJ_THROW, // SCK_FAIL_FAST + BBJ_ALWAYS, // SCK_NONE + BBJ_THROW, // SCK_RNGCHK_FAIL + BBJ_THROW, // SCK_DIV_BY_ZERO + BBJ_THROW, // SCK_ARITH_EXCP, SCK_OVERFLOW + BBJ_THROW, // SCK_ARG_EXCPN + BBJ_THROW, // SCK_ARG_RNG_EXCPN + BBJ_THROW, // SCK_FAIL_FAST }; noway_assert(sizeof(jumpKinds) == SCK_COUNT); // sanity check @@ -3622,6 +3615,7 @@ PhaseStatus Compiler::fgCreateThrowHelperBlocks() // regions since the time the descriptor was created. // assert((add->acdKind == SCK_FAIL_FAST) || (bbThrowIndex(srcBlk) == add->acdData)); + assert(add->acdKind != SCK_NONE); BasicBlock* const newBlk = fgNewBBinRegion(jumpKinds[add->acdKind], srcBlk, /* jumpDest */ nullptr, /* runRarely */ true, /* insertAtEnd */ true); @@ -3934,7 +3928,8 @@ PhaseStatus Compiler::fgSetBlockOrder() { case BBJ_COND: case BBJ_ALWAYS: - partiallyInterruptible = EDGE_IS_GC_SAFE(block, block->GetJumpDest()); + partiallyInterruptible = + ((block->bbFlags & BBF_NONE_QUIRK) != 0) || EDGE_IS_GC_SAFE(block, block->GetJumpDest()); break; case BBJ_SWITCH: diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 4b5809d069552..395574b89375f 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -252,7 +252,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Non-dynamic expansion case (no size check): // - // prevBb(BBJ_NONE): [weight: 1.0] + // prevBb(BBJ_ALWAYS): [weight: 1.0] // ... // // nullcheckBb(BBJ_COND): [weight: 1.0] @@ -263,7 +263,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // rtLookupLcl = *fastPathValue; // goto block; // - // fallbackBb(BBJ_NONE): [weight: 0.2] + // fallbackBb(BBJ_ALWAYS): [weight: 0.2] // rtLookupLcl = HelperCall(); // // block(...): [weight: 1.0] @@ -285,7 +285,11 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // Fallback basic block GenTree* fallbackValueDef = gtNewStoreLclVarNode(rtLookupLcl->GetLclNum(), call); - BasicBlock* fallbackBb = fgNewBBFromTreeAfter(BBJ_NONE, nullcheckBb, fallbackValueDef, debugInfo, nullptr, true); + BasicBlock* fallbackBb = + fgNewBBFromTreeAfter(BBJ_ALWAYS, nullcheckBb, fallbackValueDef, debugInfo, nullcheckBb->Next(), true); + + assert(fallbackBb->JumpsToNext()); + fallbackBb->bbFlags |= BBF_NONE_QUIRK; // Set nullcheckBb's real jump target nullcheckBb->SetJumpDest(fallbackBb); @@ -299,7 +303,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm { // Dynamic expansion case (sizeCheckBb is added and some preds are changed): // - // prevBb(BBJ_NONE): [weight: 1.0] + // prevBb(BBJ_ALWAYS): [weight: 1.0] // // sizeCheckBb(BBJ_COND): [weight: 1.0] // if (sizeValue <= offsetValue) @@ -314,7 +318,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm // rtLookupLcl = *fastPathValue; // goto block; // - // fallbackBb(BBJ_NONE): [weight: 0.36] + // fallbackBb(BBJ_ALWAYS): [weight: 0.36] // rtLookupLcl = HelperCall(); // // block(...): [weight: 1.0] @@ -343,10 +347,12 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm fgRemoveRefPred(block, prevBb); fgAddRefPred(block, fastPathBb); fgAddRefPred(block, fallbackBb); + assert(prevBb->KindIs(BBJ_ALWAYS)); if (needsSizeCheck) { // sizeCheckBb is the first block after prevBb + prevBb->SetJumpDest(sizeCheckBb); fgAddRefPred(sizeCheckBb, prevBb); // sizeCheckBb flows into nullcheckBb in case if the size check passes fgAddRefPred(nullcheckBb, sizeCheckBb); @@ -359,6 +365,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm else { // nullcheckBb is the first block after prevBb + prevBb->SetJumpDest(nullcheckBb); fgAddRefPred(nullcheckBb, prevBb); // No size check, nullcheckBb jumps to fast path fgAddRefPred(fastPathBb, nullcheckBb); @@ -715,7 +722,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* gtNewOperNode(GT_NE, TYP_INT, threadStaticBlockBaseLclValueUse, gtNewIconNode(0, TYP_I_IMPL)); threadStaticBlockNullCond = gtNewOperNode(GT_JTRUE, TYP_VOID, threadStaticBlockNullCond); - // prevBb (BBJ_NONE): [weight: 1.0] + // prevBb (BBJ_ALWAYS): [weight: 1.0] // ... // // maxThreadStaticBlocksCondBB (BBJ_COND): [weight: 1.0] @@ -782,6 +789,8 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // // Update preds in all new blocks // + assert(prevBb->KindIs(BBJ_ALWAYS)); + prevBb->SetJumpDest(maxThreadStaticBlocksCondBB); fgRemoveRefPred(block, prevBb); fgAddRefPred(maxThreadStaticBlocksCondBB, prevBb); @@ -1086,7 +1095,9 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // Fallback basic block // TODO-CQ: for JIT we can replace the original call with CORINFO_HELP_INITCLASS // that only accepts a single argument - BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_NONE, isInitedBb, call, debugInfo, nullptr, true); + BasicBlock* helperCallBb = fgNewBBFromTreeAfter(BBJ_ALWAYS, isInitedBb, call, debugInfo, isInitedBb->Next(), true); + assert(helperCallBb->JumpsToNext()); + helperCallBb->bbFlags |= BBF_NONE_QUIRK; GenTree* replacementNode = nullptr; if (retValKind == SHRV_STATIC_BASE_PTR) @@ -1126,14 +1137,14 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G // Final block layout looks like this: // - // prevBb(BBJ_NONE): [weight: 1.0] + // prevBb(BBJ_ALWAYS): [weight: 1.0] // ... // // isInitedBb(BBJ_COND): [weight: 1.0] // if (isInited) // goto block; // - // helperCallBb(BBJ_NONE): [weight: 0.0] + // helperCallBb(BBJ_ALWAYS): [weight: 0.0] // helperCall(); // // block(...): [weight: 1.0] @@ -1152,7 +1163,11 @@ bool Compiler::fgExpandStaticInitForCall(BasicBlock** pBlock, Statement* stmt, G fgAddRefPred(block, isInitedBb); fgAddRefPred(block, helperCallBb); - // prevBb always flow into isInitedBb + // prevBb always flows into isInitedBb + assert(prevBb->KindIs(BBJ_ALWAYS)); + prevBb->SetJumpDest(isInitedBb); + prevBb->bbFlags |= BBF_NONE_QUIRK; + assert(prevBb->JumpsToNext()); fgAddRefPred(isInitedBb, prevBb); // Both fastPathBb and helperCallBb have a single common pred - isInitedBb @@ -1435,8 +1450,9 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // In theory, we could just emit the const U8 data to the data section and use GT_BLK here // but that would be a bit less efficient since we would have to load the data from memory. // - BasicBlock* fastpathBb = fgNewBBafter(BBJ_NONE, lengthCheckBb, true); - fastpathBb->bbFlags |= BBF_INTERNAL; + BasicBlock* fastpathBb = fgNewBBafter(BBJ_ALWAYS, lengthCheckBb, true, lengthCheckBb->Next()); + assert(fastpathBb->JumpsToNext()); + fastpathBb->bbFlags |= (BBF_INTERNAL | BBF_NONE_QUIRK); // The widest type we can use for loads const var_types maxLoadType = roundDownMaxType(srcLenU8); @@ -1491,6 +1507,10 @@ bool Compiler::fgVNBasedIntrinsicExpansionForCall_ReadUtf8(BasicBlock** pBlock, // block is no longer a predecessor of prevBb fgRemoveRefPred(block, prevBb); // prevBb flows into lengthCheckBb + assert(prevBb->KindIs(BBJ_ALWAYS)); + prevBb->SetJumpDest(lengthCheckBb); + prevBb->bbFlags |= BBF_NONE_QUIRK; + assert(prevBb->JumpsToNext()); fgAddRefPred(lengthCheckBb, prevBb); // lengthCheckBb has two successors: block and fastpathBb fgAddRefPred(fastpathBb, lengthCheckBb); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index c80856a6f4325..722afadcd9a21 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1983,8 +1983,8 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H { // Create extra basic block for the spill // - BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); - newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; + BasicBlock* newBlk = fgNewBBbefore(BBJ_ALWAYS, hndBlk, /* extendRegion */ true, /* jumpDest */ hndBlk); + newBlk->bbFlags |= (BBF_IMPORTED | BBF_DONT_REMOVE | BBF_NONE_QUIRK); newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; @@ -4975,7 +4975,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // will be treated as pair and handled correctly. if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = BasicBlock::New(this, block->GetJumpKind(), block->GetJumpDest()); + BasicBlock* dupBlock = BasicBlock::New(this, BBJ_CALLFINALLY, block->GetJumpDest()); dupBlock->bbFlags = block->bbFlags; fgAddRefPred(dupBlock->GetJumpDest(), dupBlock); dupBlock->copyEHRegion(block); @@ -7282,20 +7282,21 @@ void Compiler::impImportBlockCode(BasicBlock* block) BADCODE("invalid type for brtrue/brfalse"); } - if (opts.OptimizationEnabled() && (block->KindIs(BBJ_NONE) || block->JumpsToNext())) + if (opts.OptimizationEnabled() && block->JumpsToNext()) { // We may have already modified `block`'s jump kind, if this is a re-importation. // if (block->KindIs(BBJ_COND)) { - JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", + JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->Next()->bbNum); fgRemoveRefPred(block->GetJumpDest(), block); - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKind(BBJ_ALWAYS); + block->bbFlags |= BBF_NONE_QUIRK; } else { - assert(block->KindIs(BBJ_NONE)); + assert(block->KindIs(BBJ_ALWAYS)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7346,27 +7347,26 @@ void Compiler::impImportBlockCode(BasicBlock* block) unreachable under compDbgCode */ assert(!opts.compDbgCode); - BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); // BBJ_COND: normal case - // foldedJumpKind: this can happen if we are reimporting the block for the second time - assertImp(block->KindIs(BBJ_COND, foldedJumpKind)); // normal case + // BBJ_ALWAYS: this can happen if we are reimporting the block for the second time + assertImp(block->KindIs(BBJ_COND, BBJ_ALWAYS)); // normal case if (block->KindIs(BBJ_COND)) { - if (foldedJumpKind == BBJ_NONE) - { - JITDUMP("\nThe block falls through into the next " FMT_BB "\n", block->Next()->bbNum); - fgRemoveRefPred(block->GetJumpDest(), block); - block->SetJumpKindAndTarget(BBJ_NONE); - } - else + if (op1->AsIntCon()->gtIconVal) { JITDUMP("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->GetJumpDest()->bbNum); fgRemoveRefPred(block->Next(), block); - assert(foldedJumpKind == BBJ_ALWAYS); block->SetJumpKind(BBJ_ALWAYS); } + else + { + JITDUMP("\nThe block jumps to the next " FMT_BB "\n", block->Next()->bbNum); + fgRemoveRefPred(block->GetJumpDest(), block); + block->SetJumpKindAndTarget(BBJ_ALWAYS, block->Next()); + block->bbFlags |= BBF_NONE_QUIRK; + } } break; @@ -7530,20 +7530,22 @@ void Compiler::impImportBlockCode(BasicBlock* block) assertImp((genActualType(op1) == genActualType(op2)) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); - if (opts.OptimizationEnabled() && (block->KindIs(BBJ_NONE) || block->JumpsToNext())) + if (opts.OptimizationEnabled() && block->JumpsToNext()) { // We may have already modified `block`'s jump kind, if this is a re-importation. // if (block->KindIs(BBJ_COND)) { - JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", + JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_ALWAYS\n", block->bbNum, block->Next()->bbNum); fgRemoveRefPred(block->GetJumpDest(), block); - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKind(BBJ_ALWAYS); + block->bbFlags |= BBF_NONE_QUIRK; } else { - assert(block->KindIs(BBJ_NONE)); + assert(block->KindIs(BBJ_ALWAYS)); + assert((block->bbFlags & BBF_NONE_QUIRK) != 0); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7616,16 +7618,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (block->NextIs(curJump)) - { - // transform the basic block into a BBJ_NONE - block->SetJumpKindAndTarget(BBJ_NONE); - } - else - { - // transform the basic block into a BBJ_ALWAYS - block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); - } + // transform the basic block into a BBJ_ALWAYS + block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); foundVal = true; } else @@ -7636,17 +7630,17 @@ void Compiler::impImportBlockCode(BasicBlock* block) } assert(foundVal); + if (block->JumpsToNext()) + { + block->bbFlags |= BBF_NONE_QUIRK; + } #ifdef DEBUG if (verbose) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->KindIs(BBJ_ALWAYS)) - { - printf(" to " FMT_BB, block->GetJumpDest()->bbNum); - } + printf(FMT_BB " becomes a %s", block->bbNum, "BBJ_ALWAYS"); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); printf("\n"); } #endif @@ -11241,12 +11235,6 @@ void Compiler::impImportBlock(BasicBlock* block) tgtBlock = block->GetJumpDest(); break; - case BBJ_NONE: - multRef |= block->Next()->bbRefs; - baseTmp = block->Next()->bbStkTempsIn; - tgtBlock = block->Next(); - break; - case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); @@ -12051,18 +12039,8 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->KindIs(BBJ_NONE)) - { - entryBlock = entryBlock->Next(); - } - else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) - { - entryBlock = entryBlock->GetJumpDest(); - } - else - { - assert(!"unexpected bbJumpKind in entry sequence"); - } + assert(entryBlock->KindIs(BBJ_ALWAYS)); + entryBlock = entryBlock->GetJumpDest(); } // Note for OSR we'd like to be able to verify this block must be diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 6aa3712c0fd86..3b4f41d6b8982 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -42,7 +42,7 @@ // current block // { // previous statements -// } BBJ_NONE check block +// } BBJ_ALWAYS check block // check block // { // jump to else if function ptr has the FAT_POINTER_MASK bit set. @@ -58,7 +58,7 @@ // load instantiation argument // create newArgList = (instantiation argument, original argList) // call (actual function pointer, newArgList) -// } BBJ_NONE remainder block +// } BBJ_ALWAYS remainder block // remainder block // { // subsequent statements @@ -273,11 +273,13 @@ class IndirectCallTransformer if (checkBlock != currBlock) { + assert(currBlock->KindIs(BBJ_ALWAYS)); + currBlock->SetJumpDest(checkBlock); compiler->fgAddRefPred(checkBlock, currBlock); } // checkBlock - assert(checkBlock->KindIs(BBJ_NONE)); + assert(checkBlock->KindIs(BBJ_ALWAYS)); checkBlock->SetJumpKindAndTarget(BBJ_COND, elseBlock); compiler->fgAddRefPred(elseBlock, checkBlock); compiler->fgAddRefPred(thenBlock, checkBlock); @@ -365,7 +367,8 @@ class IndirectCallTransformer { assert(checkIdx == 0); - checkBlock = CreateAndInsertBasicBlock(BBJ_NONE, currBlock); + checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, currBlock, currBlock->Next()); + checkBlock->bbFlags |= BBF_NONE_QUIRK; GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK); GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress); GenTree* fatPointerAnd = compiler->gtNewOperNode(GT_AND, TYP_I_IMPL, fptrAddressCopy, fatPointerMask); @@ -393,7 +396,8 @@ class IndirectCallTransformer // virtual void CreateElse() { - elseBlock = CreateAndInsertBasicBlock(BBJ_NONE, thenBlock); + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock->bbFlags |= BBF_NONE_QUIRK; GenTree* fixedFptrAddress = GetFixedFptrAddress(); GenTree* actualCallAddress = compiler->gtNewIndir(pointerType, fixedFptrAddress); @@ -583,8 +587,9 @@ class IndirectCallTransformer else { // In case of multiple checks, append to the previous thenBlock block + // (Set jump target of new checkBlock in CreateThen()) BasicBlock* prevCheckBlock = checkBlock; - checkBlock = CreateAndInsertBasicBlock(BBJ_NONE, thenBlock); + checkBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock); checkFallsThrough = false; // prevCheckBlock is expected to jump to this new check (if its type check doesn't succeed) @@ -653,11 +658,11 @@ class IndirectCallTransformer // In case if GDV candidates are "exact" (e.g. we have the full list of classes implementing // the given interface in the app - NativeAOT only at this moment) we assume the last - // check will always be true, so we just simplify the block to BBJ_NONE + // check will always be true, so we just simplify the block to BBJ_ALWAYS const bool isLastCheck = (checkIdx == origCall->GetInlineCandidatesCount() - 1); if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { - checkBlock->SetJumpKindAndTarget(BBJ_NONE); + assert(checkBlock->KindIs(BBJ_ALWAYS)); checkFallsThrough = true; return; } @@ -1005,6 +1010,10 @@ class IndirectCallTransformer thenBlock->inheritWeightPercentage(currBlock, origCall->GetGDVCandidateInfo(checkIdx)->likelihood); // Also, thenBlock has a single pred - last checkBlock + assert(checkBlock->KindIs(BBJ_ALWAYS)); + checkBlock->SetJumpDest(thenBlock); + checkBlock->bbFlags |= BBF_NONE_QUIRK; + assert(checkBlock->JumpsToNext()); compiler->fgAddRefPred(thenBlock, checkBlock); compiler->fgAddRefPred(remainderBlock, thenBlock); @@ -1016,8 +1025,8 @@ class IndirectCallTransformer // virtual void CreateElse() { - elseBlock = CreateAndInsertBasicBlock(BBJ_NONE, thenBlock); - elseBlock->bbFlags |= currBlock->bbFlags & BBF_SPLIT_GAINED; + elseBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, thenBlock, thenBlock->Next()); + elseBlock->bbFlags |= ((currBlock->bbFlags & BBF_SPLIT_GAINED) | BBF_NONE_QUIRK); // CheckBlock flows into elseBlock unless we deal with the case // where we know the last check is always true (in case of "exact" GDV) @@ -1076,7 +1085,7 @@ class IndirectCallTransformer // For chained gdv, we modify the expansion as follows: // - // We verify the check block has two BBJ_NONE/ALWAYS predecessors, one of + // We verify the check block has two BBJ_ALWAYS predecessors, one of // which (the "cold path") ends in a normal call, the other in an // inline candidate call. // @@ -1093,7 +1102,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->Prev(); - if (!coldBlock->KindIs(BBJ_NONE)) + if (!coldBlock->KindIs(BBJ_ALWAYS) || !coldBlock->JumpsToNext()) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 2ba17de500c21..e9ce0430c1e95 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -1807,8 +1807,8 @@ void Compiler::fgSortEHTable() // // to this: // -// try3 ------------- BB08 // empty BBJ_NONE block -// | try2 ------ BB09 // empty BBJ_NONE block +// try3 ------------- BB08 // empty BBJ_ALWAYS block +// | try2 ------ BB09 // empty BBJ_ALWAYS block // | | try1 // | | |--- BB01 // | | | BB02 @@ -1866,12 +1866,12 @@ void Compiler::fgSortEHTable() // | | handler1 BB03 // | | | BB04 // | | |------- BB05 -// | |-------------- BB06 // empty BBJ_NONE block -// |--------------------- BB07 // empty BBJ_NONE block +// | |-------------- BB06 // empty BBJ_ALWAYS block +// |--------------------- BB07 // empty BBJ_ALWAYS block // // No branches need to change: if something branched to BB05, it will still branch to BB05. If BB05 is a -// BBJ_NONE block, then control flow will fall through the newly added blocks as well. If it is anything -// else, it will retain that block branch type and BB06 and BB07 will be unreachable. +// BBJ_ALWAYS block to the next block, then control flow will fall through the newly added blocks as well. +// If it is anything else, it will retain that block branch type and BB06 and BB07 will be unreachable. // // The benefit of this is, once again, to remove the need to consider every EH region when adding new blocks. // @@ -1984,7 +1984,7 @@ bool Compiler::fgNormalizeEHCase1() { // ...then we want to insert an empty, non-removable block outside the try to be the new first block of the // handler. - BasicBlock* newHndStart = BasicBlock::New(this, BBJ_NONE); + BasicBlock* newHndStart = BasicBlock::New(this, BBJ_ALWAYS, handlerStart); fgInsertBBbefore(handlerStart, newHndStart); fgAddRefPred(handlerStart, newHndStart); @@ -2024,7 +2024,7 @@ bool Compiler::fgNormalizeEHCase1() newHndStart->bbCodeOffs = handlerStart->bbCodeOffs; newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newHndStart->inheritWeight(handlerStart); - newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL); + newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL | BBF_NONE_QUIRK); modified = true; #ifdef DEBUG @@ -2153,7 +2153,7 @@ bool Compiler::fgNormalizeEHCase2() // We've got multiple 'try' blocks starting at the same place! // Add a new first 'try' block for 'ehOuter' that will be outside 'eh'. - BasicBlock* newTryStart = BasicBlock::New(this, BBJ_NONE); + BasicBlock* newTryStart = BasicBlock::New(this, BBJ_ALWAYS, insertBeforeBlk); newTryStart->bbRefs = 0; fgInsertBBbefore(insertBeforeBlk, newTryStart); fgAddRefPred(insertBeforeBlk, newTryStart); @@ -2186,7 +2186,7 @@ bool Compiler::fgNormalizeEHCase2() // Note that we don't need to clear any flags on the old try start, since it is still a 'try' // start. - newTryStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL); + newTryStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL | BBF_NONE_QUIRK); if (insertBeforeBlk->bbFlags & BBF_BACKWARD_JUMP_TARGET) { @@ -2248,10 +2248,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (!predBlock->KindIs(BBJ_NONE)) - { - fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); - } + fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); if (predBlock->NextIs(newTryStart) && predBlock->bbFallsThrough()) { @@ -2632,7 +2629,7 @@ bool Compiler::fgNormalizeEHCase3() // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and // shares a 'last' pointer - BasicBlock* newLast = BasicBlock::New(this, BBJ_NONE); + BasicBlock* newLast = BasicBlock::New(this, BBJ_ALWAYS, insertAfterBlk->Next()); newLast->bbRefs = 0; assert(insertAfterBlk != nullptr); fgInsertBBafter(insertAfterBlk, newLast); @@ -2680,7 +2677,7 @@ bool Compiler::fgNormalizeEHCase3() newLast->bbCodeOffs = insertAfterBlk->bbCodeOffsEnd; newLast->bbCodeOffsEnd = newLast->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newLast->inheritWeight(insertAfterBlk); - newLast->bbFlags |= BBF_INTERNAL; + newLast->bbFlags |= (BBF_INTERNAL | BBF_NONE_QUIRK); fgAddRefPred(newLast, insertAfterBlk); // Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this. diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index b348d001952f2..7cef1f2b5d4a4 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -888,11 +888,6 @@ void Compiler::fgExtendDbgLifetimes() switch (block->GetJumpKind()) { - case BBJ_NONE: - PREFIX_ASSUME(!block->IsLast()); - VarSetOps::UnionD(this, initVars, block->Next()->bbScope); - break; - case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index b7c0facd5065c..a3aa7fa3426e5 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -824,14 +824,15 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* { noway_assert(conds.Size() > 0); assert(slowHead != nullptr); - assert(insertAfter->KindIs(BBJ_NONE, BBJ_COND)); + assert(insertAfter->KindIs(BBJ_ALWAYS, BBJ_COND)); // Choose how to generate the conditions const bool generateOneConditionPerBlock = true; if (generateOneConditionPerBlock) { - BasicBlock* newBlk = nullptr; + BasicBlock* newBlk = nullptr; + BasicBlock* firstInsertAfter = insertAfter; for (unsigned i = 0; i < conds.Size(); ++i) { @@ -862,6 +863,14 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* insertAfter = newBlk; } + // The block preceding the newly-generated conditions could have been a BBJ_ALWAYS. + // Make sure it jumps to the new block chain inserted after it. + if (firstInsertAfter->KindIs(BBJ_ALWAYS)) + { + assert(!firstInsertAfter->IsLast()); + firstInsertAfter->SetJumpDest(firstInsertAfter->Next()); + } + return newBlk; } else @@ -874,6 +883,10 @@ BasicBlock* LoopCloneContext::CondToStmtInBlock(Compiler* comp->fgAddRefPred(newBlk->GetJumpDest(), newBlk); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", insertAfter->bbNum, newBlk->bbNum); + if (insertAfter->KindIs(BBJ_ALWAYS)) + { + insertAfter->SetJumpDest(newBlk); + } comp->fgAddRefPred(newBlk, insertAfter); JITDUMP("Adding conditions to " FMT_BB "\n", newBlk->bbNum); @@ -1944,7 +1957,8 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->KindIs(BBJ_NONE)); + assert(insertAfter->KindIs(BBJ_ALWAYS)); + assert(insertAfter->JumpsToNext()); if (context->HasBlockConditions(loopNum)) { @@ -2036,19 +2050,20 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make a new pre-header block 'h2' for the loop. 'h' will fall through to 'h2'. JITDUMP("Create new header block for loop\n"); - BasicBlock* h2 = fgNewBBafter(BBJ_NONE, h, /*extendRegion*/ true); + BasicBlock* h2 = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true, /*jumpDest*/ loop.lpEntry); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", h2->bbNum, h->bbNum); h2->bbWeight = h2->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (!h->KindIs(BBJ_NONE)) + if (h2->JumpsToNext()) { - assert(h->KindIs(BBJ_ALWAYS)); - assert(h->HasJumpTo(loop.lpEntry)); - h2->SetJumpKindAndTarget(BBJ_ALWAYS, loop.lpEntry); + h2->bbFlags |= BBF_NONE_QUIRK; } + assert(h->KindIs(BBJ_ALWAYS)); + assert(h->HasJumpTo(loop.lpEntry)); + fgReplacePred(loop.lpEntry, h, h2); JITDUMP("Replace " FMT_BB " -> " FMT_BB " with " FMT_BB " -> " FMT_BB "\n", h->bbNum, loop.lpEntry->bbNum, h2->bbNum, loop.lpEntry->bbNum); @@ -2060,7 +2075,9 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->SetJumpKindAndTarget(BBJ_NONE); + h->SetJumpDest(h2); + assert(h->JumpsToNext()); + h->bbFlags |= BBF_NONE_QUIRK; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. @@ -2100,7 +2117,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // would end up in the loop table as a pre-header without creating any new pre-header block). This puts the // slow loop in the canonical loop form. JITDUMP("Create unique head block for slow path loop\n"); - BasicBlock* slowHead = fgNewBBafter(BBJ_NONE, newPred, /*extendRegion*/ true); + // Set the jump target later + BasicBlock* slowHead = fgNewBBafter(BBJ_ALWAYS, newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " after " FMT_BB "\n", slowHead->bbNum, newPred->bbNum); slowHead->bbWeight = newPred->isRunRarely() ? BB_ZERO_WEIGHT : ambientWeight; slowHead->scaleBBWeight(slowPathWeightScaleFactor); @@ -2112,8 +2130,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - // Initialize newBlk as BBJ_NONE, and fix up jump kind/target later with optCopyBlkDest() - BasicBlock* newBlk = fgNewBBafter(BBJ_NONE, newPred, /*extendRegion*/ true); + // Initialize newBlk as BBJ_ALWAYS without jump target, and fix up jump target later with optCopyBlkDest() + BasicBlock* newBlk = fgNewBBafter(BBJ_ALWAYS, newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2172,8 +2190,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - // Jump kind/target should not be set yet - assert(newblk->KindIs(BBJ_NONE)); + // Jump target should not be set yet + assert(!newblk->HasInitializedJumpDest()); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2184,10 +2202,6 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add predecessor edges for the new successors, as well as the fall-through paths. switch (newblk->GetJumpKind()) { - case BBJ_NONE: - fgAddRefPred(newblk->Next(), newblk); - break; - case BBJ_ALWAYS: case BBJ_CALLFINALLY: fgAddRefPred(newblk->GetJumpDest(), newblk); @@ -2241,20 +2255,18 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->KindIs(BBJ_NONE)); - assert(h->NextIs(h2)); + assert(h->KindIs(BBJ_ALWAYS)); + assert(h->HasJumpTo(h2)); // If any condition is false, go to slowHead (which branches or falls through to e2). BasicBlock* e2 = nullptr; bool foundIt = blockMap->Lookup(loop.lpEntry, &e2); assert(foundIt && e2 != nullptr); - if (!slowHead->NextIs(e2)) - { - // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. - slowHead->SetJumpKindAndTarget(BBJ_ALWAYS, e2); - } + // We haven't set the jump target yet + assert(slowHead->KindIs(BBJ_ALWAYS)); + assert(!slowHead->HasInitializedJumpDest()); + slowHead->SetJumpDest(e2); fgAddRefPred(e2, slowHead); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", slowHead->bbNum, e2->bbNum); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index e62b4575ff7ef..bf3923c48013d 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -867,14 +867,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); - if (originalSwitchBB->NextIs(jumpTab[0])) - { - originalSwitchBB->SetJumpKindAndTarget(BBJ_NONE); - } - else + originalSwitchBB->SetJumpKindAndTarget(BBJ_ALWAYS, jumpTab[0]); + + if (originalSwitchBB->JumpsToNext()) { - originalSwitchBB->SetJumpKindAndTarget(BBJ_ALWAYS, jumpTab[0]); + originalSwitchBB->bbFlags |= BBF_NONE_QUIRK; } + // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { @@ -955,9 +954,9 @@ GenTree* Lowering::LowerSwitch(GenTree* node) BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode()); // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. - // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock + // originalSwitchBB is now a BBJ_ALWAYS, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->KindIs(BBJ_NONE)); + assert(originalSwitchBB->KindIs(BBJ_ALWAYS)); assert(originalSwitchBB->NextIs(afterDefaultCondBlock)); assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->GetJumpSwt()->bbsHasDefault); @@ -1020,13 +1019,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } - if (afterDefaultCondBlock->NextIs(uniqueSucc)) - { - afterDefaultCondBlock->SetJumpKindAndTarget(BBJ_NONE); - } - else + + afterDefaultCondBlock->SetJumpKindAndTarget(BBJ_ALWAYS, uniqueSucc); + + if (afterDefaultCondBlock->JumpsToNext()) { - afterDefaultCondBlock->SetJumpKindAndTarget(BBJ_ALWAYS, uniqueSucc); + afterDefaultCondBlock->bbFlags |= BBF_NONE_QUIRK; } } // If the number of possible destinations is small enough, we proceed to expand the switch @@ -1074,7 +1072,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { - BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true); + BasicBlock* newBlock = comp->fgNewBBafter(BBJ_ALWAYS, currentBlock, true, currentBlock->Next()); + newBlock->bbFlags |= BBF_NONE_QUIRK; comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); @@ -1129,12 +1128,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) if (!fUsedAfterDefaultCondBlock) { // All the cases were fall-through! We don't need this block. - // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag + // Convert it from BBJ_SWITCH to BBJ_ALWAYS and unset the BBF_DONT_REMOVE flag // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->KindIs(BBJ_SWITCH)); - currentBlock->SetJumpKindAndTarget(BBJ_NONE); + currentBlock->SetJumpKindAndTarget(BBJ_ALWAYS, currentBlock->Next()); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 4403bd341a387..23307435025ee 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -6891,7 +6891,7 @@ void LinearScan::insertUpperVectorRestore(GenTree* tree, } else { - assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); + assert(block->KindIs(BBJ_ALWAYS)); blockRange.InsertAtEnd(LIR::SeqTree(compiler, simdUpperRestore)); } } @@ -7840,7 +7840,7 @@ void LinearScan::insertSwap( } else { - assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); + assert(block->KindIs(BBJ_ALWAYS)); blockRange.InsertAtEnd(std::move(swapRange)); } } diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 73a8474324d39..c41ba8a209f6f 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -13186,10 +13186,11 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) optUnmarkLoopBlocks(block->GetJumpDest(), block); } - /* JTRUE 0 - transform the basic block into a BBJ_NONE */ + /* JTRUE 0 - transform the basic block into a BBJ_ALWAYS */ bTaken = block->Next(); bNotTaken = block->GetJumpDest(); - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKindAndTarget(BBJ_ALWAYS, bTaken); + block->bbFlags |= BBF_NONE_QUIRK; } if (fgHaveValidEdgeWeights) @@ -13244,13 +13245,6 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->GetJumpKind()) { - case BBJ_NONE: - edge = fgGetPredForBlock(bUpdated->Next(), bUpdated); - newMaxWeight = bUpdated->bbWeight; - newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); - edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->Next()); - break; - case BBJ_COND: edge = fgGetPredForBlock(bUpdated->Next(), bUpdated); newMaxWeight = bUpdated->bbWeight; @@ -13281,11 +13275,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->KindIs(BBJ_ALWAYS)) - { - printf(" to " FMT_BB, block->GetJumpDest()->bbNum); - } + printf(FMT_BB " becomes a %s", block->bbNum, "BBJ_ALWAYS"); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); printf("\n"); } #endif @@ -13413,16 +13404,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { - if (!block->NextIs(curJump)) - { - // transform the basic block into a BBJ_ALWAYS - block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); - } - else - { - // transform the basic block into a BBJ_NONE - block->SetJumpKindAndTarget(BBJ_NONE); - } + block->SetJumpKindAndTarget(BBJ_ALWAYS, curJump); foundVal = true; } else @@ -13433,16 +13415,17 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } assert(foundVal); + if (block->JumpsToNext()) + { + block->bbFlags |= BBF_NONE_QUIRK; + } #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->KindIs(BBJ_ALWAYS)) - { - printf(" to " FMT_BB, block->GetJumpDest()->bbNum); - } + printf(FMT_BB " becomes a %s", block->bbNum, "BBJ_ALWAYS"); + printf(" to " FMT_BB, block->GetJumpDest()->bbNum); printf("\n"); } #endif @@ -13566,7 +13549,7 @@ bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(cons // The rest of block has been removed and we will always throw an exception. // - // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. + // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_ALWAYS. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { @@ -14653,13 +14636,15 @@ bool Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. - BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); + BasicBlock* helperBlock = fgNewBBafter(BBJ_ALWAYS, block, true, block->Next()); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true, remainderBlock); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true, remainderBlock); - BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); + BasicBlock* asgBlock = fgNewBBafter(BBJ_ALWAYS, block, true, block->Next()); block->bbFlags &= ~BBF_NEEDS_GCPOLL; remainderBlock->bbFlags |= propagateFlags; + helperBlock->bbFlags |= BBF_NONE_QUIRK; + asgBlock->bbFlags |= BBF_NONE_QUIRK; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. @@ -14676,6 +14661,8 @@ bool Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) } // Chain the flow correctly. + assert(block->KindIs(BBJ_ALWAYS)); + block->SetJumpDest(asgBlock); fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); @@ -14855,8 +14842,8 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. - BasicBlock* condBlock = fgNewBBafter(BBJ_NONE, block, true); - BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); + BasicBlock* condBlock = fgNewBBafter(BBJ_ALWAYS, block, true); + BasicBlock* elseBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. @@ -14873,12 +14860,19 @@ bool Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) condBlock->inheritWeight(block); + assert(block->KindIs(BBJ_ALWAYS)); + block->SetJumpDest(condBlock); + condBlock->SetJumpDest(elseBlock); + elseBlock->SetJumpDest(remainderBlock); + assert(condBlock->JumpsToNext()); + assert(elseBlock->JumpsToNext()); + fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); - condBlock->bbFlags |= propagateFlagsToAll; - elseBlock->bbFlags |= propagateFlagsToAll; + condBlock->bbFlags |= (propagateFlagsToAll | BBF_NONE_QUIRK); + elseBlock->bbFlags |= (propagateFlagsToAll | BBF_NONE_QUIRK); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 76f01ae535277..98f18819fde91 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -1008,7 +1008,8 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->GetJumpDest(), m_b1); - m_b1->SetJumpKindAndTarget(BBJ_NONE); + m_b1->SetJumpKindAndTarget(BBJ_ALWAYS, m_b1->Next()); + m_b1->bbFlags |= BBF_NONE_QUIRK; // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 01d95e01dd0e0..3b702368ef69a 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -742,7 +742,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if (predBlock->KindIs(BBJ_NONE) && predBlock->NextIs(optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->HasJumpTo(optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && !predBlock->IsFirst() && predBlock->Prev()->bbFallsThrough()) { @@ -1151,7 +1151,7 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if (initBlock->KindIs(BBJ_NONE) && initBlock->NextIs(top) && (initBlock->countOfInEdges() == 1) && + if (initBlock->KindIs(BBJ_ALWAYS) && initBlock->HasJumpTo(top) && (initBlock->countOfInEdges() == 1) && !initBlock->IsFirst() && initBlock->Prev()->bbFallsThrough()) { initBlock = initBlock->Prev(); @@ -1393,8 +1393,6 @@ void Compiler::optCheckPreds() { break; } - FALLTHROUGH; - case BBJ_NONE: noway_assert(bb->NextIs(block)); break; case BBJ_EHFILTERRET: @@ -1802,7 +1800,7 @@ class LoopSearch } } // Can we fall through into the loop? - else if (head->KindIs(BBJ_NONE, BBJ_COND)) + else if (head->KindIs(BBJ_COND)) { // The ENTRY is at the TOP (a do-while loop) return top; @@ -2308,14 +2306,6 @@ class LoopSearch } else if (block->KindIs(BBJ_ALWAYS) && block->HasJumpTo(newNext)) { - // We've made `block`'s jump target its bbNext, so remove the jump. - if (!comp->fgOptimizeBranchToNext(block, newNext, block->Prev())) - { - // If optimizing away the goto-next failed for some reason, mark it KEEP_BBJ_ALWAYS to - // prevent assertions from complaining about it. - block->bbFlags |= BBF_KEEP_BBJ_ALWAYS; - } - // If block is newNext's only predecessor, move the IR from block to newNext, // but keep the now-empty block around. // @@ -2362,9 +2352,6 @@ class LoopSearch } } - // Make sure we don't leave around a goto-next unless it's marked KEEP_BBJ_ALWAYS. - assert(!block->KindIs(BBJ_COND, BBJ_ALWAYS) || !block->HasJumpTo(newNext) || - ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)); return newBlock; } @@ -2415,9 +2402,6 @@ class LoopSearch } break; - case BBJ_NONE: - break; - case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: case BBJ_EHFILTERRET: @@ -2725,7 +2709,6 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R switch (blk->GetJumpKind()) { - case BBJ_NONE: case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFILTERRET: @@ -2735,6 +2718,13 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R break; case BBJ_ALWAYS: + // Fall-through successors are assumed correct and are not modified + if (blk->JumpsToNext() && ((blk->bbFlags & BBF_NONE_QUIRK) != 0)) + { + break; + } + + FALLTHROUGH; case BBJ_LEAVE: case BBJ_CALLFINALLY: case BBJ_COND: @@ -2745,11 +2735,11 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R { fgRemoveRefPred(blk->GetJumpDest(), blk); } + blk->SetJumpDest(newJumpDest); if (updatePreds || addPreds) { fgAddRefPred(newJumpDest, blk); } - blk->SetJumpDest(newJumpDest); } else if (addPreds) { @@ -2840,6 +2830,7 @@ void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) break; default: to->SetJumpKindAndTarget(from->GetJumpKind(), from->GetJumpDest()); + to->bbFlags |= (from->bbFlags & BBF_NONE_QUIRK); break; } @@ -2942,11 +2933,12 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if (h->KindIs(BBJ_ALWAYS) && h->HasJumpTo(t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (h->KindIs(BBJ_ALWAYS) && h->HasJumpTo(t) && ((h->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)) { // Insert new head - BasicBlock* const newH = fgNewBBafter(BBJ_NONE, h, /*extendRegion*/ true); + BasicBlock* const newH = fgNewBBafter(BBJ_ALWAYS, h, /*extendRegion*/ true, /*jumpDest*/ h->Next()); + newH->bbFlags |= BBF_NONE_QUIRK; newH->inheritWeight(h); newH->bbNatLoopNum = h->bbNatLoopNum; h->SetJumpDest(newH); @@ -2959,7 +2951,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) JITDUMP("in optCanonicalizeLoop: " FMT_LP " head " FMT_BB " is BBJ_ALWAYS of BBJ_CALLFINALLY/BBJ_ALWAYS pair that targets top " FMT_BB - ". Replacing with new BBJ_NONE head " FMT_BB ".", + ". Replacing with new BBJ_ALWAYS head " FMT_BB ".", loopInd, h->bbNum, t->bbNum, newH->bbNum); h = newH; @@ -3041,7 +3033,8 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); - BasicBlock* const newH = fgNewBBbefore(BBJ_NONE, t, /*extendRegion*/ false); + BasicBlock* const newH = fgNewBBbefore(BBJ_ALWAYS, t, /*extendRegion*/ false, /*jumpDest*/ t); + newH->bbFlags |= BBF_NONE_QUIRK; fgSetEHRegionForNewLoopHead(newH, t); @@ -3150,7 +3143,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // this block. In this case, we need to insert a new block for the outer loop // in the same EH region as the branch from the "bottom": // - // BB30 BBJ_NONE + // BB30 BBJ_ALWAYS // BB08 try { } // ... // BB10 BBJ_ALWAYS => BB08 @@ -3211,20 +3204,26 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // Because of this, introducing a block before t automatically gives us // the right flow out of h. // - assert(h->NextIs(t)); - assert(h->bbFallsThrough()); - assert(h->KindIs(BBJ_NONE, BBJ_COND)); - if (h->KindIs(BBJ_COND)) - { - BasicBlock* const hj = h->GetJumpDest(); - assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); - } + assert(h->NextIs(t) || !h->KindIs(BBJ_COND)); + assert(h->HasJumpTo(t) || !h->KindIs(BBJ_ALWAYS)); + assert(h->KindIs(BBJ_ALWAYS, BBJ_COND)); // If the bottom block is in the same "try" region, then we extend the EH // region. Otherwise, we add the new block outside the "try" region. // const bool extendRegion = BasicBlock::sameTryRegion(t, b); - BasicBlock* const newT = fgNewBBbefore(BBJ_NONE, t, extendRegion); + BasicBlock* const newT = fgNewBBbefore(BBJ_ALWAYS, t, extendRegion, t); + newT->bbFlags |= BBF_NONE_QUIRK; + + if (h->KindIs(BBJ_COND)) + { + BasicBlock* const hj = h->GetJumpDest(); + assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); + } + else + { + h->SetJumpDest(newT); + } fgRemoveRefPred(t, h); fgAddRefPred(t, newT); @@ -3365,8 +3364,9 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop != BasicBlock::NOT_IN_LOOP; // childLoop = optLoopTable[childLoop].lpSibling) { + assert(newT->KindIs(BBJ_ALWAYS)); if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - newT->KindIs(BBJ_NONE) && newT->NextIs(origE)) + newT->HasJumpTo(origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -3437,16 +3437,8 @@ bool Compiler::optLoopContains(unsigned l1, unsigned l2) const BasicBlock* Compiler::optLoopEntry(BasicBlock* preHeader) { assert((preHeader->bbFlags & BBF_LOOP_PREHEADER) != 0); - - if (preHeader->KindIs(BBJ_NONE)) - { - return preHeader->Next(); - } - else - { - assert(preHeader->KindIs(BBJ_ALWAYS)); - return preHeader->GetJumpDest(); - } + assert(preHeader->KindIs(BBJ_ALWAYS)); + return preHeader->GetJumpDest(); } //----------------------------------------------------------------------------- @@ -4366,9 +4358,9 @@ PhaseStatus Compiler::optUnrollLoops() // every iteration. for (BasicBlock* block = loop.lpTop; !loop.lpBottom->NextIs(block); block = block->Next()) { - // Use BBJ_NONE to avoid setting a jump target for now. + // Don't set a jump target for now. // Compiler::optCopyBlkDest() will fix the jump kind/target in the loop below. - BasicBlock* newBlock = insertAfter = fgNewBBafter(BBJ_NONE, insertAfter, /*extendRegion*/ true); + BasicBlock* newBlock = insertAfter = fgNewBBafter(BBJ_ALWAYS, insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4402,7 +4394,8 @@ PhaseStatus Compiler::optUnrollLoops() newBlock->scaleBBWeight(1.0 / BB_LOOP_WEIGHT_SCALE); // Jump dests are set in a post-pass; make sure CloneBlockState hasn't tried to set them. - assert(newBlock->KindIs(BBJ_NONE)); + assert(newBlock->KindIs(BBJ_ALWAYS)); + assert(!newBlock->HasInitializedJumpDest()); if (block == bottom) { @@ -4431,7 +4424,7 @@ PhaseStatus Compiler::optUnrollLoops() { // Jump kind/target should not be set yet BasicBlock* newBlock = blockMap[block]; - assert(newBlock->KindIs(BBJ_NONE)); + assert(!newBlock->HasInitializedJumpDest()); // Now copy the jump kind/target optCopyBlkDest(block, newBlock); @@ -4442,7 +4435,16 @@ PhaseStatus Compiler::optUnrollLoops() // or from the previous unroll clone of the bottom block (subsequent iterations). // After doing this, all the newly cloned blocks now have proper flow and pred lists. // - BasicBlock* const clonedTop = blockMap[loop.lpTop]; + BasicBlock* const clonedTop = blockMap[loop.lpTop]; + BasicBlock* clonedTopPrev = clonedTop->Prev(); + assert(clonedTopPrev->KindIs(BBJ_ALWAYS, BBJ_COND)); + + if (clonedTopPrev->KindIs(BBJ_ALWAYS)) + { + assert(!clonedTopPrev->HasInitializedJumpDest()); + clonedTopPrev->SetJumpDest(clonedTop); + } + fgAddRefPred(clonedTop, clonedTop->Prev()); /* update the new value for the unrolled iterator */ @@ -4495,9 +4497,10 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } - block->SetJumpKindAndTarget(BBJ_NONE); + block->SetJumpKindAndTarget(BBJ_ALWAYS, block->Next()); block->bbStmtList = nullptr; block->bbNatLoopNum = newLoopNum; + block->bbFlags |= BBF_NONE_QUIRK; // Remove a few unnecessary flags (this list is not comprehensive). block->bbFlags &= ~(BBF_LOOP_HEAD | BBF_BACKWARD_JUMP_SOURCE | BBF_BACKWARD_JUMP_TARGET | @@ -4530,7 +4533,7 @@ PhaseStatus Compiler::optUnrollLoops() // the pre-header HEAD (if it exists), or the now empty TOP (if totalIter == 0), // or the first cloned top. // - // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). + // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_ALWAYS block). // if (initBlock->KindIs(BBJ_COND)) { @@ -4539,14 +4542,15 @@ PhaseStatus Compiler::optUnrollLoops() noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->GetJumpDest(), initBlock); - initBlock->SetJumpKindAndTarget(BBJ_NONE); + initBlock->SetJumpKindAndTarget(BBJ_ALWAYS, initBlock->Next()); + initBlock->bbFlags |= BBF_NONE_QUIRK; } else { /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->KindIs(BBJ_NONE)); + noway_assert(initBlock->KindIs(BBJ_ALWAYS)); } // The loop will be removed, so no need to fix up the pre-header. @@ -4556,7 +4560,8 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->KindIs(BBJ_NONE)); + assert(head->KindIs(BBJ_ALWAYS)); + assert(head->JumpsToNext()); } // If we actually unrolled, tail is now reached @@ -4565,7 +4570,12 @@ PhaseStatus Compiler::optUnrollLoops() // if (totalIter > 0) { - fgAddRefPred(tail, blockMap[bottom]); + BasicBlock* clonedBottom = blockMap[bottom]; + assert(clonedBottom->KindIs(BBJ_ALWAYS)); + assert(!clonedBottom->HasInitializedJumpDest()); + + clonedBottom->SetJumpDest(tail); + fgAddRefPred(tail, clonedBottom); fgRemoveRefPred(tail, bottom); } @@ -4848,7 +4858,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (!block->KindIs(BBJ_ALWAYS) || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (!block->KindIs(BBJ_ALWAYS) || block->JumpsToNext() || ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0)) { // It can't be one of the ones we use for our exception magic return false; @@ -5085,8 +5095,10 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->SetJumpKindAndTarget(BBJ_NONE); BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true, bJoin); + block->SetJumpKindAndTarget(BBJ_ALWAYS, bNewCond); + block->bbFlags |= BBF_NONE_QUIRK; + assert(block->JumpsToNext()); // Clone each statement in bTest and append to bNewCond. for (Statement* const stmt : bTest->Statements()) @@ -8154,15 +8166,10 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // Allocate a new basic block for the pre-header. const bool isTopEntryLoop = loop.lpIsTopEntry(); - BasicBlock* preHead; - + BasicBlock* preHead = BasicBlock::New(this, BBJ_ALWAYS, entry); if (isTopEntryLoop) { - preHead = BasicBlock::New(this, BBJ_NONE); - } - else - { - preHead = BasicBlock::New(this, BBJ_ALWAYS, entry); + preHead->bbFlags |= BBF_NONE_QUIRK; } preHead->bbFlags |= BBF_INTERNAL | BBF_LOOP_PREHEADER; @@ -8309,14 +8316,6 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) switch (predBlock->GetJumpKind()) { - case BBJ_NONE: - // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, - // meaning it must be fall-through to 'entry', and we must have a top-entry loop. - noway_assert((entry == top) && (predBlock == head) && predBlock->NextIs(preHead)); - fgRemoveRefPred(entry, predBlock); - fgAddRefPred(preHead, predBlock); - break; - case BBJ_COND: if (predBlock->HasJumpTo(entry)) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index a7197c85ad413..8ce36ce2f9fb3 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -143,13 +143,13 @@ class PatchpointTransformer // Current block now becomes the test block BasicBlock* remainderBlock = compiler->fgSplitBlockAtBeginning(block); - BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); + BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_ALWAYS, block, block->Next()); // Update flow and flags block->SetJumpKindAndTarget(BBJ_COND, remainderBlock); block->bbFlags |= BBF_INTERNAL; - helperBlock->bbFlags |= BBF_BACKWARD_JUMP; + helperBlock->bbFlags |= (BBF_BACKWARD_JUMP | BBF_NONE_QUIRK); compiler->fgAddRefPred(helperBlock, block); compiler->fgAddRefPred(remainderBlock, helperBlock); diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 071f8bdb5251c..72dcb172df6e7 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1441,27 +1441,21 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) return false; } - bool modifiedFlow = false; - if ((jti.m_numAmbiguousPreds > 0) && (jti.m_fallThroughPred != nullptr)) { - // If fall through pred is BBJ_NONE, and we have only (ambiguous, true) or (ambiguous, false) preds, - // we can change the fall through to BBJ_ALWAYS. - // + // TODO: Simplify jti.m_fallThroughPred logic, now that implicit fallthrough is disallowed. const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); + const bool predJumpsToNext = jti.m_fallThroughPred->KindIs(BBJ_ALWAYS) && jti.m_fallThroughPred->JumpsToNext(); - if (jti.m_fallThroughPred->KindIs(BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if (predJumpsToNext && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" - "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", + "Fall through pred " FMT_BB " is BBJ_ALWAYS\n", jti.m_block->bbNum, fallThroughIsTruePred ? "true" : "false", fallThroughIsTruePred ? "false" : "true", jti.m_fallThroughPred->bbNum); - // Possibly defer this until after early out below. - // - jti.m_fallThroughPred->SetJumpKindAndTarget(BBJ_ALWAYS, jti.m_block); - modifiedFlow = true; + assert(jti.m_fallThroughPred->HasJumpTo(jti.m_block)); } else { @@ -1496,7 +1490,6 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // This is possible, but also should be rare. // JITDUMP(FMT_BB " now only has ambiguous preds, not jump threading\n", jti.m_block->bbNum); - assert(!modifiedFlow); return false; } @@ -1540,7 +1533,9 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->SetJumpKindAndTarget(BBJ_NONE); + jti.m_block->SetJumpKindAndTarget(BBJ_ALWAYS, jti.m_falseTarget); + jti.m_block->bbFlags |= BBF_NONE_QUIRK; + assert(jti.m_block->JumpsToNext()); } // Now reroute the flow from the predecessors. diff --git a/src/coreclr/jit/ssabuilder.cpp b/src/coreclr/jit/ssabuilder.cpp index 9bedef7bc937e..b89edc83e9c48 100644 --- a/src/coreclr/jit/ssabuilder.cpp +++ b/src/coreclr/jit/ssabuilder.cpp @@ -1560,8 +1560,8 @@ void SsaBuilder::SetupBBRoot() return; } - BasicBlock* bbRoot = BasicBlock::New(m_pCompiler, BBJ_NONE); - bbRoot->bbFlags |= BBF_INTERNAL; + BasicBlock* bbRoot = BasicBlock::New(m_pCompiler, BBJ_ALWAYS, m_pCompiler->fgFirstBB); + bbRoot->bbFlags |= (BBF_INTERNAL | BBF_NONE_QUIRK); // May need to fix up preds list, so remember the old first block. BasicBlock* oldFirst = m_pCompiler->fgFirstBB;