diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index f032ce7748c75..eb4d5666fcf4e 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -507,6 +507,7 @@ void BasicBlock::dspFlags() const {BBF_HAS_IDX_LEN, "idxlen"}, {BBF_HAS_MD_IDX_LEN, "mdidxlen"}, {BBF_HAS_NEWOBJ, "newobj"}, + {BBF_HAS_NEWARR, "newarr"}, {BBF_HAS_NULLCHECK, "nullcheck"}, {BBF_BACKWARD_JUMP, "bwd"}, {BBF_BACKWARD_JUMP_TARGET, "bwd-target"}, diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index d0d09143b1eda..03770815f0125 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -461,12 +461,14 @@ enum BasicBlockFlags : uint64_t BBF_CAN_ADD_PRED = MAKE_BBFLAG(38), // Ok to add pred edge to this block, even when "safe" edge creation disabled BBF_HAS_VALUE_PROFILE = MAKE_BBFLAG(39), // Block has a node that needs a value probing + BBF_HAS_NEWARR = MAKE_BBFLAG(40), // BB contains 'new' of an array type. + // The following are sets of flags. // Flags to update when two blocks are compacted BBF_COMPACT_UPD = BBF_GC_SAFE_POINT | BBF_NEEDS_GCPOLL | BBF_HAS_JMP | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_BACKWARD_JUMP | \ - BBF_HAS_NEWOBJ | BBF_HAS_NULLCHECK | BBF_HAS_MDARRAYREF | BBF_LOOP_HEAD, + BBF_HAS_NEWOBJ | BBF_HAS_NEWARR | BBF_HAS_NULLCHECK | BBF_HAS_MDARRAYREF | BBF_LOOP_HEAD, // Flags a block should not have had before it is split. @@ -484,7 +486,7 @@ enum BasicBlockFlags : uint64_t // For example, the bottom block might or might not have BBF_HAS_NULLCHECK, but we assume it has BBF_HAS_NULLCHECK. // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ? - BBF_SPLIT_GAINED = BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_PROF_WEIGHT | \ + BBF_SPLIT_GAINED = BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_PROF_WEIGHT | BBF_HAS_NEWARR | \ BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_HISTOGRAM_PROFILE | BBF_HAS_VALUE_PROFILE | BBF_HAS_MDARRAYREF | BBF_NEEDS_GCPOLL, // Flags that must be propagated to a new block if code is copied from a block to a new block. These are flags that @@ -492,7 +494,7 @@ enum BasicBlockFlags : uint64_t // have actually copied one of these type of tree nodes, but if we only copy a portion of the block's statements, // we don't know (unless we actually pay close attention during the copy). - BBF_COPY_PROPAGATE = BBF_HAS_NEWOBJ | BBF_HAS_NULLCHECK | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_HAS_MDARRAYREF, + BBF_COPY_PROPAGATE = BBF_HAS_NEWOBJ | BBF_HAS_NEWARR | BBF_HAS_NULLCHECK | BBF_HAS_IDX_LEN | BBF_HAS_MD_IDX_LEN | BBF_HAS_MDARRAYREF, }; FORCEINLINE diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index cc65f6269fd31..9003430bb0bcc 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -959,6 +959,10 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, "\n callsNew=\"true\""); } + if (block->HasFlag(BBF_HAS_NEWARR)) + { + fprintf(fgxFile, "\n callsNewArr=\"true\""); + } if (block->HasFlag(BBF_LOOP_HEAD)) { fprintf(fgxFile, "\n loopHead=\"true\""); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 334cf5e411aab..576e8be6f367c 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -9679,10 +9679,32 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Remember that this function contains 'new' of an SD array. optMethodFlags |= OMF_HAS_NEWARRAY; + block->SetFlags(BBF_HAS_NEWARR); - /* Push the result of the call on the stack */ + if (opts.OptimizationEnabled()) + { + // We assign the newly allocated object (by a GT_CALL to newarr node) + // to a temp. Note that the pattern "temp = allocArr" is required + // by ObjectAllocator phase to be able to determine newarr nodes + // without exhaustive walk over all expressions. + lclNum = lvaGrabTemp(true DEBUGARG("NewArr temp")); - impPushOnStack(op1, tiRetVal); + impStoreToTemp(lclNum, op1, CHECK_SPILL_ALL); + + assert(lvaTable[lclNum].lvSingleDef == 0); + lvaTable[lclNum].lvSingleDef = 1; + JITDUMP("Marked V%02u as a single def local\n", lclNum); + lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); + + /* Push the result of the call on the stack */ + + impPushOnStack(gtNewLclvNode(lclNum, TYP_REF), tiRetVal); + } + else + { + /* Push the result of the call on the stack */ + impPushOnStack(op1, tiRetVal); + } callTyp = TYP_REF; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 08336a28d553a..37ba933a75a97 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -2606,12 +2606,33 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) } // - // We start by looking at the last statement, making sure it's a store, and - // that the target of the store is the array passed to InitializeArray. + // We start by looking at the last statement, making sure it's a store. // GenTree* arrayLocalStore = impLastStmt->GetRootNode(); - if (!arrayLocalStore->OperIs(GT_STORE_LCL_VAR) || !arrayLocalNode->OperIs(GT_LCL_VAR) || - (arrayLocalStore->AsLclVar()->GetLclNum() != arrayLocalNode->AsLclVar()->GetLclNum())) + if (arrayLocalStore->OperIs(GT_STORE_LCL_VAR) && arrayLocalNode->OperIs(GT_LCL_VAR)) + { + // Make sure the target of the store is the array passed to InitializeArray. + if (arrayLocalStore->AsLclVar()->GetLclNum() != arrayLocalNode->AsLclVar()->GetLclNum()) + { + if (opts.OptimizationDisabled()) + { + return nullptr; + } + + // The array can be spilled to a temp for stack allocation. + // Try getting the actual store node from the previous statement. + if (arrayLocalStore->AsLclVar()->Data()->OperIs(GT_LCL_VAR) && impLastStmt->GetPrevStmt() != nullptr) + { + arrayLocalStore = impLastStmt->GetPrevStmt()->GetRootNode(); + if (!arrayLocalStore->OperIs(GT_STORE_LCL_VAR) || + arrayLocalStore->AsLclVar()->GetLclNum() != arrayLocalNode->AsLclVar()->GetLclNum()) + { + return nullptr; + } + } + } + } + else { return nullptr; }