diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/AsyncHelpers.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/AsyncHelpers.CoreCLR.cs index 291fd8802d3016..600a6722f0645d 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/AsyncHelpers.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/AsyncHelpers.CoreCLR.cs @@ -78,11 +78,27 @@ internal enum ContinuationFlags ContinueOnCapturedTaskScheduler = 64, } + // Keep in sync with dataAsyncResumeInfo in the JIT + internal unsafe struct ResumeInfo + { + public delegate* Resume; + // IP to use for diagnostics. Points into the jitted suspension code. + // For debug codegen the IP resolves via an ASYNC native->IL mapping to + // the IL AsyncHelpers.Await (or other async function) call which + // caused the suspension. + // For optimized codegen the mapping into the root method may be more + // approximate (e.g. because of inlining). + // For all codegens the offset of DiagnosticsIP matches + // DiagnosticNativeOffset for the corresponding AsyncSuspensionPoint in + // the debug info. + public void* DiagnosticIP; + } + #pragma warning disable CA1852 // "Type can be sealed" -- no it cannot because the runtime constructs subtypes dynamically internal unsafe class Continuation { public Continuation? Next; - public delegate* Resume; + public ResumeInfo* ResumeInfo; public ContinuationFlags Flags; public int State; @@ -195,9 +211,8 @@ private interface IRuntimeAsyncTaskOps static abstract ref byte GetResultStorage(T task); } - /// - /// Represents a wrapped runtime async operation. - /// + // Represents execution of a chain of suspended and resuming runtime + // async functions. private sealed class RuntimeAsyncTask : Task, ITaskCompletionAction { public RuntimeAsyncTask() @@ -261,9 +276,8 @@ public static void PostToSyncContext(RuntimeAsyncTask task, SynchronizationCo } } - /// - /// Represents a wrapped runtime async operation. - /// + // Represents execution of a chain of suspended and resuming runtime + // async functions. private sealed class RuntimeAsyncTask : Task, ITaskCompletionAction { public RuntimeAsyncTask() @@ -329,35 +343,63 @@ public static void PostToSyncContext(RuntimeAsyncTask task, SynchronizationConte private static class RuntimeAsyncTaskCore { + [StructLayout(LayoutKind.Explicit)] + private unsafe ref struct DispatcherInfo + { + // Dispatcher info for next dispatcher present on stack, or + // null if none. + [FieldOffset(0)] + public DispatcherInfo* Next; + + // Next continuation the dispatcher will process. +#if TARGET_64BIT + [FieldOffset(8)] +#else + [FieldOffset(4)] +#endif + public Continuation? NextContinuation; + } + + // Information about current task dispatching, to be used for async + // stackwalking. + [ThreadStatic] + private static unsafe DispatcherInfo* t_dispatcherInfo; + public static unsafe void DispatchContinuations(T task) where T : Task, ITaskCompletionAction where TOps : IRuntimeAsyncTaskOps { ExecutionAndSyncBlockStore contexts = default; contexts.Push(); - Continuation? continuation = TOps.GetContinuationState(task); + + DispatcherInfo dispatcherInfo; + dispatcherInfo.Next = t_dispatcherInfo; + dispatcherInfo.NextContinuation = TOps.GetContinuationState(task); + t_dispatcherInfo = &dispatcherInfo; while (true) { - Debug.Assert(continuation != null); + Debug.Assert(dispatcherInfo.NextContinuation != null); try { - ref byte resultLoc = ref continuation.Next != null ? ref continuation.Next.GetResultStorageOrNull() : ref TOps.GetResultStorage(task); - Continuation? newContinuation = continuation.Resume(continuation, ref resultLoc); + Continuation curContinuation = dispatcherInfo.NextContinuation; + Continuation? nextContinuation = curContinuation.Next; + dispatcherInfo.NextContinuation = nextContinuation; + + ref byte resultLoc = ref nextContinuation != null ? ref nextContinuation.GetResultStorageOrNull() : ref TOps.GetResultStorage(task); + Continuation? newContinuation = curContinuation.ResumeInfo->Resume(curContinuation, ref resultLoc); if (newContinuation != null) { - newContinuation.Next = continuation.Next; + newContinuation.Next = nextContinuation; HandleSuspended(task); contexts.Pop(); + t_dispatcherInfo = dispatcherInfo.Next; return; } - - continuation = continuation.Next; } catch (Exception ex) { - Debug.Assert(continuation != null); - Continuation? nextContinuation = UnwindToPossibleHandler(continuation); - if (nextContinuation == null) + Continuation? handlerContinuation = UnwindToPossibleHandler(dispatcherInfo.NextContinuation); + if (handlerContinuation == null) { // Tail of AsyncTaskMethodBuilderT.SetException bool successfullySet = ex is OperationCanceledException oce ? @@ -366,6 +408,8 @@ public static unsafe void DispatchContinuations(T task) where T : Task, contexts.Pop(); + t_dispatcherInfo = dispatcherInfo.Next; + if (!successfullySet) { ThrowHelper.ThrowInvalidOperationException(ExceptionResource.TaskT_TransitionToFinal_AlreadyCompleted); @@ -374,17 +418,18 @@ public static unsafe void DispatchContinuations(T task) where T : Task, return; } - nextContinuation.SetException(ex); - - continuation = nextContinuation; + handlerContinuation.SetException(ex); + dispatcherInfo.NextContinuation = handlerContinuation; } - if (continuation == null) + if (dispatcherInfo.NextContinuation == null) { bool successfullySet = TOps.SetCompleted(task); contexts.Pop(); + t_dispatcherInfo = dispatcherInfo.Next; + if (!successfullySet) { ThrowHelper.ThrowInvalidOperationException(ExceptionResource.TaskT_TransitionToFinal_AlreadyCompleted); @@ -393,26 +438,23 @@ public static unsafe void DispatchContinuations(T task) where T : Task, return; } - if (QueueContinuationFollowUpActionIfNecessary(task, continuation)) + if (QueueContinuationFollowUpActionIfNecessary(task, dispatcherInfo.NextContinuation)) { contexts.Pop(); + t_dispatcherInfo = dispatcherInfo.Next; return; } } } - private static Continuation? UnwindToPossibleHandler(Continuation continuation) + private static Continuation? UnwindToPossibleHandler(Continuation? continuation) { while (true) { - Continuation? nextContinuation = continuation.Next; - if (nextContinuation == null) - return null; - - if ((nextContinuation.Flags & ContinuationFlags.HasException) != 0) - return nextContinuation; + if (continuation == null || (continuation.Flags & ContinuationFlags.HasException) != 0) + return continuation; - continuation = nextContinuation; + continuation = continuation.Next; } } diff --git a/src/coreclr/inc/cordebuginfo.h b/src/coreclr/inc/cordebuginfo.h index b0e813dffd3ab9..914cda4258e9e2 100644 --- a/src/coreclr/inc/cordebuginfo.h +++ b/src/coreclr/inc/cordebuginfo.h @@ -45,7 +45,8 @@ class ICorDebugInfo STACK_EMPTY = 0x02, // The stack is empty here CALL_SITE = 0x04, // This is a call site. NATIVE_END_OFFSET_UNKNOWN = 0x08, // Indicates a epilog endpoint - CALL_INSTRUCTION = 0x10 // The actual instruction of a call. + CALL_INSTRUCTION = 0x10, // The actual instruction of a call. + ASYNC = 0x20, // Indicates suspension/resumption for an async call }; @@ -431,4 +432,30 @@ class ICorDebugInfo // Source information about the IL instruction in the inlinee SourceTypes Source; }; + + struct AsyncContinuationVarInfo + { + // IL number of variable (or one of the special IL numbers, like TYPECTXT_ILNUM) + uint32_t VarNumber; + // Offset in continuation object where this variable is stored + uint32_t Offset; + }; + + struct AsyncSuspensionPoint + { + // Offset of IP stored in ResumeInfo.DiagnosticIP. This offset maps to + // the IL call that resulted in the suspension point through an ASYNC + // mapping. Also used as a unique key for debug information about the + // suspension point. See ResumeInfo.DiagnosticIP in SPC for more info. + uint32_t DiagnosticNativeOffset; + // Count of AsyncContinuationVarInfo in array of locals starting where + // the previous suspension point's locals end. + uint32_t NumContinuationVars; + }; + + struct AsyncInfo + { + // Number of suspension points in the method. + uint32_t NumSuspensionPoints; + }; }; diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h index bdedfa685b37e8..15523518a38657 100644 --- a/src/coreclr/inc/corinfo.h +++ b/src/coreclr/inc/corinfo.h @@ -1734,8 +1734,8 @@ struct CORINFO_ASYNC_INFO CORINFO_CLASS_HANDLE continuationClsHnd; // 'Next' field CORINFO_FIELD_HANDLE continuationNextFldHnd; - // 'Resume' field - CORINFO_FIELD_HANDLE continuationResumeFldHnd; + // 'ResumeInfo' field + CORINFO_FIELD_HANDLE continuationResumeInfoFldHnd; // 'State' field CORINFO_FIELD_HANDLE continuationStateFldHnd; // 'Flags' field @@ -2912,6 +2912,16 @@ class ICorStaticInfo uint32_t numMappings // [IN] Number of rich mappings ) = 0; + // Report async debug information to EE. + // The arrays are expected to be allocated with allocateArray + // and ownership is transferred to the EE with this call. + virtual void reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, // [IN] Async method information + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, // [IN] Array of async suspension points, indexed by state number + ICorDebugInfo::AsyncContinuationVarInfo* vars, // [IN] Array of async continuation variable info + uint32_t numVars // [IN] Number of entries in the async vars array + ) = 0; + // Report back some metadata about the compilation to the EE -- for // example, metrics about the compilation. virtual void reportMetadata( @@ -3340,7 +3350,7 @@ class ICorDynamicInfo : public ICorStaticInfo CORINFO_TAILCALL_HELPERS* pResult ) = 0; - virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub() = 0; + virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub(void** entryPoint) = 0; virtual CORINFO_CLASS_HANDLE getContinuationType( size_t dataSize, diff --git a/src/coreclr/inc/icorjitinfoimpl_generated.h b/src/coreclr/inc/icorjitinfoimpl_generated.h index 0dc4f5fa089732..8ab46ba76c5c30 100644 --- a/src/coreclr/inc/icorjitinfoimpl_generated.h +++ b/src/coreclr/inc/icorjitinfoimpl_generated.h @@ -453,6 +453,12 @@ void reportRichMappings( ICorDebugInfo::RichOffsetMapping* mappings, uint32_t numMappings) override; +void reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) override; + void reportMetadata( const char* key, const void* value, @@ -660,7 +666,8 @@ CORINFO_CLASS_HANDLE getContinuationType( bool* objRefs, size_t objRefsSize) override; -CORINFO_METHOD_HANDLE getAsyncResumptionStub() override; +CORINFO_METHOD_HANDLE getAsyncResumptionStub( + void** entryPoint) override; bool convertPInvokeCalliToCall( CORINFO_RESOLVED_TOKEN* pResolvedToken, diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h index 2b4ee632d71684..ab8004ce707608 100644 --- a/src/coreclr/inc/jiteeversionguid.h +++ b/src/coreclr/inc/jiteeversionguid.h @@ -37,11 +37,11 @@ #include -constexpr GUID JITEEVersionIdentifier = { /* 68e93e9d-dd28-49ca-9ebc-a01a54532bb3 */ - 0x68e93e9d, - 0xdd28, - 0x49ca, - {0x9e, 0xbc, 0xa0, 0x1a, 0x54, 0x53, 0x2b, 0xb3} +constexpr GUID JITEEVersionIdentifier = { /* a802fbbf-3e14-4b34-a348-5fba9fd756d4 */ + 0xa802fbbf, + 0x3e14, + 0x4b34, + {0xa3, 0x48, 0x5f, 0xba, 0x9f, 0xd7, 0x56, 0xd4} }; #endif // JIT_EE_VERSIONING_GUID_H diff --git a/src/coreclr/inc/readytorun.h b/src/coreclr/inc/readytorun.h index 134b685ff9fbe6..93826f5b8ca771 100644 --- a/src/coreclr/inc/readytorun.h +++ b/src/coreclr/inc/readytorun.h @@ -19,10 +19,10 @@ // src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h // If you update this, ensure you run `git grep MINIMUM_READYTORUN_MAJOR_VERSION` // and handle pending work. -#define READYTORUN_MAJOR_VERSION 16 +#define READYTORUN_MAJOR_VERSION 17 #define READYTORUN_MINOR_VERSION 0x0000 -#define MINIMUM_READYTORUN_MAJOR_VERSION 16 +#define MINIMUM_READYTORUN_MAJOR_VERSION 17 // R2R Version 2.1 adds the InliningInfo section // R2R Version 2.2 adds the ProfileDataInfo section @@ -47,6 +47,7 @@ // R2R Version 14 changed x86 code generation to use funclets // R2R Version 15 removes double to int/uint helper calls // R2R Version 16 replaces the compression format for debug boundaries with a new format that is smaller and more efficient to parse +// R2R Version 17 adds support for producing "fat" debug information (that e.g. can include async debug info) struct READYTORUN_CORE_HEADER { diff --git a/src/coreclr/jit/ICorJitInfo_names_generated.h b/src/coreclr/jit/ICorJitInfo_names_generated.h index e58987625b7d9e..ec4b01cf8899cc 100644 --- a/src/coreclr/jit/ICorJitInfo_names_generated.h +++ b/src/coreclr/jit/ICorJitInfo_names_generated.h @@ -112,6 +112,7 @@ DEF_CLR_API(setBoundaries) DEF_CLR_API(getVars) DEF_CLR_API(setVars) DEF_CLR_API(reportRichMappings) +DEF_CLR_API(reportAsyncDebugInfo) DEF_CLR_API(reportMetadata) DEF_CLR_API(allocateArray) DEF_CLR_API(freeArray) diff --git a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp index b1014f476f739e..6ad0a136a09b47 100644 --- a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp +++ b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp @@ -1067,6 +1067,17 @@ void WrapICorJitInfo::reportRichMappings( API_LEAVE(reportRichMappings); } +void WrapICorJitInfo::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + API_ENTER(reportAsyncDebugInfo); + wrapHnd->reportAsyncDebugInfo(asyncInfo, suspensionPoints, vars, numVars); + API_LEAVE(reportAsyncDebugInfo); +} + void WrapICorJitInfo::reportMetadata( const char* key, const void* value, @@ -1547,10 +1558,11 @@ CORINFO_CLASS_HANDLE WrapICorJitInfo::getContinuationType( return temp; } -CORINFO_METHOD_HANDLE WrapICorJitInfo::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE WrapICorJitInfo::getAsyncResumptionStub( + void** entryPoint) { API_ENTER(getAsyncResumptionStub); - CORINFO_METHOD_HANDLE temp = wrapHnd->getAsyncResumptionStub(); + CORINFO_METHOD_HANDLE temp = wrapHnd->getAsyncResumptionStub(entryPoint); API_LEAVE(getAsyncResumptionStub); return temp; } diff --git a/src/coreclr/jit/async.cpp b/src/coreclr/jit/async.cpp index a44b6d92c0832c..f1eb605e1e951c 100644 --- a/src/coreclr/jit/async.cpp +++ b/src/coreclr/jit/async.cpp @@ -647,11 +647,10 @@ PhaseStatus AsyncTransformation::Run() return PhaseStatus::MODIFIED_NOTHING; } - // Ask the VM to create a resumption stub for this specific version of the - // code. It is stored in the continuation as a function pointer, so we need - // the fixed entry point here. - m_resumeStub = m_comp->info.compCompHnd->getAsyncResumptionStub(); - m_comp->info.compCompHnd->getFunctionFixedEntryPoint(m_resumeStub, false, &m_resumeStubLookup); + m_comp->compSuspensionPoints = + new (m_comp, CMK_Async) jitstd::vector(m_comp->getAllocator(CMK_Async)); + m_comp->compAsyncVars = new (m_comp, CMK_Async) + jitstd::vector(m_comp->getAllocator(CMK_Async)); m_returnedContinuationVar = m_comp->lvaGrabTemp(false DEBUGARG("returned continuation")); m_comp->lvaGetDesc(m_returnedContinuationVar)->lvType = TYP_REF; @@ -823,6 +822,8 @@ void AsyncTransformation::Transform( BasicBlock* resumeBB = CreateResumption(block, *remainder, call, callDefInfo, stateNum, layout); m_resumptionBBs.push_back(resumeBB); + + CreateDebugInfoForSuspensionPoint(layout); } //------------------------------------------------------------------------ @@ -1415,6 +1416,14 @@ BasicBlock* AsyncTransformation::CreateSuspension( JITDUMP(" Creating suspension " FMT_BB " for state %u\n", suspendBB->bbNum, stateNum); + GenTreeILOffset* ilOffsetNode = + m_comp->gtNewILOffsetNode(call->GetAsyncInfo().CallAsyncDebugInfo DEBUGARG(BAD_IL_OFFSET)); + + LIR::AsRange(suspendBB).InsertAtEnd(LIR::SeqTree(m_comp, ilOffsetNode)); + + GenTree* recordOffset = new (m_comp, GT_RECORD_ASYNC_RESUME) GenTreeVal(GT_RECORD_ASYNC_RESUME, TYP_VOID, stateNum); + LIR::AsRange(suspendBB).InsertAtEnd(recordOffset); + // Allocate continuation GenTree* returnedContinuation = m_comp->gtNewLclvNode(m_returnedContinuationVar, TYP_REF); @@ -1428,11 +1437,12 @@ BasicBlock* AsyncTransformation::CreateSuspension( GenTree* storeNewContinuation = m_comp->gtNewStoreLclVarNode(m_newContinuationVar, allocContinuation); LIR::AsRange(suspendBB).InsertAtEnd(storeNewContinuation); - // Fill in 'Resume' - GenTree* newContinuation = m_comp->gtNewLclvNode(m_newContinuationVar, TYP_REF); - unsigned resumeOffset = m_comp->info.compCompHnd->getFieldOffset(m_asyncInfo->continuationResumeFldHnd); - GenTree* resumeStubAddr = CreateResumptionStubAddrTree(); - GenTree* storeResume = StoreAtOffset(newContinuation, resumeOffset, resumeStubAddr, TYP_I_IMPL); + // Fill in 'ResumeInfo' + GenTree* newContinuation = m_comp->gtNewLclvNode(m_newContinuationVar, TYP_REF); + unsigned resumeInfoOffset = m_comp->info.compCompHnd->getFieldOffset(m_asyncInfo->continuationResumeInfoFldHnd); + GenTree* resumeInfoAddr = + new (m_comp, GT_ASYNC_RESUME_INFO) GenTreeVal(GT_ASYNC_RESUME_INFO, TYP_I_IMPL, (ssize_t)stateNum); + GenTree* storeResume = StoreAtOffset(newContinuation, resumeInfoOffset, resumeInfoAddr, TYP_I_IMPL); LIR::AsRange(suspendBB).InsertAtEnd(LIR::SeqTree(m_comp, storeResume)); // Fill in 'state' @@ -1703,6 +1713,20 @@ void AsyncTransformation::CreateCheckAndSuspendAfterCall(BasicBlock* *remainder = m_comp->fgSplitBlockAfterNode(block, jtrue); JITDUMP(" Remainder is " FMT_BB "\n", (*remainder)->bbNum); + // For non-inlined calls adjust offset for the split. We have the exact + // offset of the await call, so we can do better than + // fgSplitBlockAfterNode. The previous block contains the call so add 1 to + // include its start offset (the IL offsets are only used for range checks + // in the backend, so having the offset be inside an IL instruction is ok.) + DebugInfo di = call->GetAsyncInfo().CallAsyncDebugInfo.GetRoot(); + DebugInfo par; + if (!di.GetParent(&par)) + { + IL_OFFSET awaitOffset = di.GetLocation().GetOffset(); + block->bbCodeOffsEnd = awaitOffset + 1; + (*remainder)->bbCodeOffs = awaitOffset + 1; + } + FlowEdge* retBBEdge = m_comp->fgAddRefPred(suspendBB, block); block->SetCond(retBBEdge, block->GetTargetEdge()); @@ -1751,6 +1775,11 @@ BasicBlock* AsyncTransformation::CreateResumption(BasicBlock* bloc JITDUMP(" Creating resumption " FMT_BB " for state %u\n", resumeBB->bbNum, stateNum); + GenTreeILOffset* ilOffsetNode = + m_comp->gtNewILOffsetNode(call->GetAsyncInfo().CallAsyncDebugInfo DEBUGARG(BAD_IL_OFFSET)); + + LIR::AsRange(resumeBB).InsertAtEnd(LIR::SeqTree(m_comp, ilOffsetNode)); + SetSuspendedIndicator(resumeBB, block, call); if (layout.Size > 0) @@ -1873,6 +1902,14 @@ BasicBlock* AsyncTransformation::RethrowExceptionOnResumption(BasicBlock* m_comp->fgNewBBinRegion(BBJ_THROW, block, /* runRarely */ true, /* insertAtEnd */ true); JITDUMP(" Created " FMT_BB " to rethrow exception on resumption\n", rethrowExceptionBB->bbNum); + // If this ends up placed after 'block' then ensure it does not break + // debugging. We split 'block' at the call, so a BBF_INTERNAL block after + // it would result in broken debug info. + if ((rethrowExceptionBB->Prev() == block) && !block->HasFlag(BBF_INTERNAL)) + { + rethrowExceptionBB->RemoveFlags(BBF_INTERNAL); + } + BasicBlock* storeResultBB = m_comp->fgNewBBafter(BBJ_ALWAYS, resumeBB, true); JITDUMP(" Created " FMT_BB " to store result when resuming with no exception\n", storeResultBB->bbNum); @@ -2079,6 +2116,36 @@ GenTreeStoreInd* AsyncTransformation::StoreAtOffset( } //------------------------------------------------------------------------ +// AsyncTransformation::CreateDebugInfoForSuspensionPoint: +// Create debug info for the specific suspension point we just created. +// +// Parameters: +// layout - Layout of continuation +// +void AsyncTransformation::CreateDebugInfoForSuspensionPoint(const ContinuationLayout& layout) +{ + uint32_t numLocals = 0; + for (const LiveLocalInfo& local : layout.Locals) + { + unsigned ilVarNum = m_comp->compMap2ILvarNum(local.LclNum); + if (ilVarNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM) + { + continue; + } + + ICorDebugInfo::AsyncContinuationVarInfo varInf; + varInf.VarNumber = ilVarNum; + varInf.Offset = OFFSETOF__CORINFO_Continuation__data + local.Offset; + m_comp->compAsyncVars->push_back(varInf); + numLocals++; + } + + ICorDebugInfo::AsyncSuspensionPoint suspensionPoint; + suspensionPoint.DiagnosticNativeOffset = 0; + suspensionPoint.NumContinuationVars = numLocals; + m_comp->compSuspensionPoints->push_back(suspensionPoint); +} + // AsyncTransformation::GetResultBaseVar: // Create a new local to hold the base address of the incoming result from // the continuation. This local can be validly used for the entire suspension @@ -2118,65 +2185,6 @@ unsigned AsyncTransformation::GetExceptionVar() return m_exceptionVar; } -//------------------------------------------------------------------------ -// AsyncTransformation::CreateResumptionStubAddrTree: -// Create a tree that represents the address of the resumption stub entry -// point. -// -// Returns: -// IR node. -// -GenTree* AsyncTransformation::CreateResumptionStubAddrTree() -{ - switch (m_resumeStubLookup.accessType) - { - case IAT_VALUE: - { - return CreateFunctionTargetAddr(m_resumeStub, m_resumeStubLookup); - } - case IAT_PVALUE: - { - GenTree* tree = CreateFunctionTargetAddr(m_resumeStub, m_resumeStubLookup); - tree = m_comp->gtNewIndir(TYP_I_IMPL, tree, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); - return tree; - } - case IAT_PPVALUE: - { - noway_assert(!"Unexpected IAT_PPVALUE"); - return nullptr; - } - case IAT_RELPVALUE: - { - GenTree* addr = CreateFunctionTargetAddr(m_resumeStub, m_resumeStubLookup); - GenTree* tree = CreateFunctionTargetAddr(m_resumeStub, m_resumeStubLookup); - tree = m_comp->gtNewIndir(TYP_I_IMPL, tree, GTF_IND_NONFAULTING | GTF_IND_INVARIANT); - tree = m_comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, tree, addr); - return tree; - } - default: - { - noway_assert(!"Bad accessType"); - return nullptr; - } - } -} - -//------------------------------------------------------------------------ -// AsyncTransformation::CreateFunctionTargetAddr: -// Create a tree that represents the address of the resumption stub entry -// point. -// -// Returns: -// IR node. -// -GenTree* AsyncTransformation::CreateFunctionTargetAddr(CORINFO_METHOD_HANDLE methHnd, - const CORINFO_CONST_LOOKUP& lookup) -{ - GenTree* con = m_comp->gtNewIconHandleNode((size_t)lookup.addr, GTF_ICON_FTN_ADDR); - INDEBUG(con->AsIntCon()->gtTargetHandle = (size_t)methHnd); - return con; -} - //------------------------------------------------------------------------ // AsyncTransformation::CreateResumptionSwitch: // Create the IR for the entry of the function that checks the continuation diff --git a/src/coreclr/jit/async.h b/src/coreclr/jit/async.h index 6546ba02b7afde..13621666a0163b 100644 --- a/src/coreclr/jit/async.h +++ b/src/coreclr/jit/async.h @@ -49,7 +49,8 @@ struct CallDefinitionInfo { GenTreeLclVarCommon* DefinitionNode = nullptr; - // Where to insert new IR for suspension checks. + // Where to insert new IR after the call in the original block, for + // suspension checks and for the async suspension for diagnostics purposes. GenTree* InsertAfter = nullptr; }; @@ -61,8 +62,6 @@ class AsyncTransformation jitstd::vector m_liveLocalsScratch; CORINFO_ASYNC_INFO* m_asyncInfo; jitstd::vector m_resumptionBBs; - CORINFO_METHOD_HANDLE m_resumeStub = NO_METHOD_HANDLE; - CORINFO_CONST_LOOKUP m_resumeStubLookup; unsigned m_returnedContinuationVar = BAD_VAR_NUM; unsigned m_newContinuationVar = BAD_VAR_NUM; unsigned m_dataArrayVar = BAD_VAR_NUM; @@ -137,12 +136,10 @@ class AsyncTransformation var_types storeType, GenTreeFlags indirFlags = GTF_IND_NONFAULTING); + void CreateDebugInfoForSuspensionPoint(const ContinuationLayout& layout); unsigned GetResultBaseVar(); unsigned GetExceptionVar(); - GenTree* CreateResumptionStubAddrTree(); - GenTree* CreateFunctionTargetAddr(CORINFO_METHOD_HANDLE methHnd, const CORINFO_CONST_LOOKUP& lookup); - void CreateResumptionSwitch(); public: diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index 4eed197d95ab40..727b4688980020 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -186,6 +186,9 @@ class CodeGen final : public CodeGenInterface // the current (pending) label ref, a label which has been referenced but not yet seen BasicBlock* genPendingCallLabel; + emitter::dataSection* genAsyncResumeInfoTable = nullptr; + UNATIVE_OFFSET genAsyncResumeInfoTableOffset = UINT_MAX; + void** codePtr; void* codePtrRW; uint32_t* nativeSizeOfCode; @@ -221,6 +224,8 @@ class CodeGen final : public CodeGenInterface BasicBlock* genCreateTempLabel(); + void genRecordAsyncResume(GenTreeVal* asyncResume); + private: void genLogLabel(BasicBlock* bb); @@ -646,7 +651,6 @@ class CodeGen final : public CodeGenInterface void genAddRichIPMappingHere(const DebugInfo& di); void genReportRichDebugInfo(); - void genRecordRichDebugInfoInlineTree(InlineContext* context, ICorDebugInfo::InlineTreeNode* tree); #ifdef DEBUG @@ -654,6 +658,8 @@ class CodeGen final : public CodeGenInterface void genReportRichDebugInfoInlineTreeToFile(FILE* file, InlineContext* context, bool* first); #endif + void genReportAsyncDebugInfo(); + void genEnsureCodeEmitted(const DebugInfo& di); //------------------------------------------------------------------------- @@ -1198,13 +1204,16 @@ class CodeGen final : public CodeGenInterface void genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgStkNode); #endif - void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); - void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); - void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); - void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); - unsigned genEmitJumpTable(GenTree* treeNode, bool relativeAddr); - void genJumpTable(GenTree* tree); - void genTableBasedSwitch(GenTree* tree); + void genCodeForStoreBlk(GenTreeBlk* storeBlkNode); + void genCodeForInitBlkLoop(GenTreeBlk* initBlkNode); + void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode); + void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode); + unsigned genEmitJumpTable(GenTree* treeNode, bool relativeAddr); + void genJumpTable(GenTree* tree); + void genTableBasedSwitch(GenTree* tree); + void genAsyncResumeInfo(GenTreeVal* tree); + UNATIVE_OFFSET genEmitAsyncResumeInfoTable(emitter::dataSection** dataSec); + CORINFO_FIELD_HANDLE genEmitAsyncResumeInfo(unsigned stateNum); #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) instruction genGetInsForOper(GenTree* treeNode); #else diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index 4af842a2d10c73..4b8a288408c501 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -653,6 +653,21 @@ void CodeGen::genJumpTable(GenTree* treeNode) genProduceReg(treeNode); } +//------------------------------------------------------------------------ +// genAsyncResumeInfo: emits address of async resume info for a specific state +// +// Parameters: +// treeNode - the GT_ASYNC_RESUME_INFO node +// +void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode) +{ + CORINFO_FIELD_HANDLE fieldOffs = genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1); + assert(compiler->eeIsJitDataOffs(fieldOffs)); + genMov32RelocatableDataLabel(compiler->eeGetJitDataOffs(fieldOffs), treeNode->GetRegNum()); + + genProduceReg(treeNode); +} + //------------------------------------------------------------------------ // genGetInsForOper: Return instruction encoding of the operation tree. // diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 4c3bf607017696..9e760801ca2412 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3764,6 +3764,19 @@ void CodeGen::genJumpTable(GenTree* treeNode) genProduceReg(treeNode); } +//------------------------------------------------------------------------ +// genAsyncResumeInfo: emits address of async resume info for a specific state +// +// Parameters: +// treeNode - the GT_ASYNC_RESUME_INFO node +// +void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode) +{ + GetEmitter()->emitIns_R_C(INS_adr, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA, + genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1), 0); + genProduceReg(treeNode); +} + //------------------------------------------------------------------------ // genLockedInstructions: Generate code for a GT_XADD, GT_XAND, GT_XORR or GT_XCHG node. // diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 8d90b0d3766f10..1849c5496126f9 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -514,6 +514,10 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) genCodeForAsyncContinuation(treeNode); break; + case GT_ASYNC_RESUME_INFO: + genAsyncResumeInfo(treeNode->AsVal()); + break; + case GT_PINVOKE_PROLOG: noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask(compiler->info.compCallConv)) == 0); @@ -553,7 +557,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) #endif // TARGET_ARM case GT_IL_OFFSET: - // Do nothing; these nodes are simply markers for debug info. + // Do nothing; this node is a marker for debug info. + break; + + case GT_RECORD_ASYNC_RESUME: + genRecordAsyncResume(treeNode->AsVal()); break; default: @@ -5126,4 +5134,5 @@ void CodeGen::genFnEpilog(BasicBlock* block) compiler->unwindEndEpilog(); } + #endif // TARGET_ARMARCH diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 1bb99f7c9be936..c4c9b18f783c0c 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2198,6 +2198,8 @@ void CodeGen::genEmitUnwindDebugGCandEH() genReportRichDebugInfo(); + genReportAsyncDebugInfo(); + /* Finalize the Local Var info in terms of generated code */ genSetScopeInfo(); @@ -5641,6 +5643,53 @@ unsigned CodeGen::genEmitJumpTable(GenTree* treeNode, bool relativeAddr) return jmpTabBase; } +//---------------------------------------------------------------------------------- +// genEmitAsyncResumeInfoTable: +// Register the singleton async resumption info table if not registered +// before. Return information about it. +// +// Arguments: +// dataSection - [out] The information about the registered data section +// +// Return Value: +// Base offset of the async resumption info table +// +UNATIVE_OFFSET CodeGen::genEmitAsyncResumeInfoTable(emitter::dataSection** dataSection) +{ + assert(compiler->compSuspensionPoints != nullptr); + + if (genAsyncResumeInfoTable == nullptr) + { + GetEmitter()->emitAsyncResumeTable((unsigned)compiler->compSuspensionPoints->size(), + &genAsyncResumeInfoTableOffset, &genAsyncResumeInfoTable); + } + + *dataSection = genAsyncResumeInfoTable; + return genAsyncResumeInfoTableOffset; +} + +//---------------------------------------------------------------------------------- +// genEmitAsyncResumeInfo: +// Obtain a pseudo-CORINFO_FIELD_HANDLE describing how to access the async +// resume information for a specific state number. +// +// Arguments: +// stateNum - The state +// +// Return Value: +// CORINFO_FIELD_HANDLE encoding access of read-only data at a specific +// offset. +// +CORINFO_FIELD_HANDLE CodeGen::genEmitAsyncResumeInfo(unsigned stateNum) +{ + assert(compiler->compSuspensionPoints != nullptr); + assert(stateNum < compiler->compSuspensionPoints->size()); + + emitter::dataSection* dataSection; + UNATIVE_OFFSET baseOffs = genEmitAsyncResumeInfoTable(&dataSection); + return compiler->eeFindJitDataOffs(baseOffs + stateNum * sizeof(emitter::dataAsyncResumeInfo)); +} + //------------------------------------------------------------------------ // getCallTarget - Get the node that evaluates to the call target // @@ -6137,16 +6186,21 @@ void CodeGen::genIPmappingDisp(unsigned mappingNum, const IPmappingDsc* ipMappin case IPmappingDscKind::Normal: const ILLocation& loc = ipMapping->ipmdLoc; Compiler::eeDispILOffs(loc.GetOffset()); - if (loc.IsStackEmpty()) + if ((loc.GetSourceTypes() & ICorDebugInfo::STACK_EMPTY) != 0) { printf(" STACK_EMPTY"); } - if (loc.IsCall()) + if ((loc.GetSourceTypes() & ICorDebugInfo::CALL_INSTRUCTION) != 0) { printf(" CALL_INSTRUCTION"); } + if ((loc.GetSourceTypes() & ICorDebugInfo::ASYNC) != 0) + { + printf(" ASYNC"); + } + break; } @@ -6379,15 +6433,15 @@ void CodeGen::genIPmappingGen() // For managed return values we store all calls. Keep both in this case // too. - if (((prev->ipmdKind == IPmappingDscKind::Normal) && (prev->ipmdLoc.IsCall())) || - ((it->ipmdKind == IPmappingDscKind::Normal) && (it->ipmdLoc.IsCall()))) + if (((prev->ipmdKind == IPmappingDscKind::Normal) && prev->ipmdLoc.IsCallInstruction()) || + ((it->ipmdKind == IPmappingDscKind::Normal) && it->ipmdLoc.IsCallInstruction())) { ++it; continue; } // Otherwise report the higher offset unless the previous mapping is a - // label. + // label coming from IL. if (prev->ipmdIsLabel) { it = compiler->genIPmappings.erase(it); @@ -6480,7 +6534,7 @@ void CodeGen::genReportRichDebugInfoInlineTreeToFile(FILE* file, InlineContext* fprintf(file, "{\"Ordinal\":%u,", context->GetOrdinal()); fprintf(file, "\"MethodID\":%lld,", (int64_t)context->GetCallee()); fprintf(file, "\"ILOffset\":%u,", context->GetLocation().GetOffset()); - fprintf(file, "\"LocationFlags\":%u,", (uint32_t)context->GetLocation().EncodeSourceTypes()); + fprintf(file, "\"LocationFlags\":%u,", (uint32_t)context->GetLocation().GetSourceTypes()); fprintf(file, "\"ExactILOffset\":%u,", context->GetActualCallOffset()); auto append = [&]() { char buffer[256]; @@ -6645,7 +6699,7 @@ void CodeGen::genReportRichDebugInfo() mapping->NativeOffset = richMapping.nativeLoc.CodeOffset(GetEmitter()); mapping->Inlinee = richMapping.debugInfo.GetInlineContext()->GetOrdinal(); mapping->ILOffset = richMapping.debugInfo.GetLocation().GetOffset(); - mapping->Source = richMapping.debugInfo.GetLocation().EncodeSourceTypes(); + mapping->Source = richMapping.debugInfo.GetLocation().GetSourceTypes(); mappingIndex++; } @@ -6691,6 +6745,65 @@ void CodeGen::genAddRichIPMappingHere(const DebugInfo& di) compiler->genRichIPmappings.push_back(mapping); } +//------------------------------------------------------------------------ +// genReportAsyncDebugInfo: +// Report async debug info back to EE. +// +void CodeGen::genReportAsyncDebugInfo() +{ + if (!compiler->opts.compDbgInfo) + { + return; + } + + jitstd::vector* suspPoints = compiler->compSuspensionPoints; + if (suspPoints == nullptr) + { + return; + } + + assert(genAsyncResumeInfoTable != nullptr); + for (size_t i = 0; i < suspPoints->size(); i++) + { + emitLocation& emitLoc = ((emitLocation*)genAsyncResumeInfoTable->dsCont)[i]; + (*suspPoints)[i].DiagnosticNativeOffset = emitLoc.CodeOffset(GetEmitter()); + } + + ICorDebugInfo::AsyncInfo asyncInfo; + asyncInfo.NumSuspensionPoints = static_cast(suspPoints->size()); + + ICorDebugInfo::AsyncSuspensionPoint* hostSuspensionPoints = static_cast( + compiler->info.compCompHnd->allocateArray(suspPoints->size() * sizeof(ICorDebugInfo::AsyncSuspensionPoint))); + for (size_t i = 0; i < suspPoints->size(); i++) + hostSuspensionPoints[i] = (*suspPoints)[i]; + + jitstd::vector* asyncVars = compiler->compAsyncVars; + ICorDebugInfo::AsyncContinuationVarInfo* hostVars = static_cast( + compiler->info.compCompHnd->allocateArray(asyncVars->size() * sizeof(ICorDebugInfo::AsyncContinuationVarInfo))); + for (size_t i = 0; i < asyncVars->size(); i++) + hostVars[i] = (*asyncVars)[i]; + + compiler->info.compCompHnd->reportAsyncDebugInfo(&asyncInfo, hostSuspensionPoints, hostVars, + static_cast(asyncVars->size())); + +#ifdef DEBUG + if (verbose) + { + printf("Reported async suspension points:\n"); + for (size_t i = 0; i < suspPoints->size(); i++) + { + printf(" [%zu] NumAsyncVars = %u\n", i, hostSuspensionPoints[i].NumContinuationVars); + } + + printf("Reported async vars:\n"); + for (size_t i = 0; i < asyncVars->size(); i++) + { + printf(" [%zu] VarNumber = %u, Offset = %x\n", i, hostVars[i].VarNumber, hostVars[i].Offset); + } + } +#endif +} + /*============================================================================ * * These are empty stubs to help the late dis-assembler to compile @@ -6899,7 +7012,7 @@ void CodeGen::genReturn(GenTree* treeNode) // Reason for not materializing Leave callback as a GT_PROF_HOOK node after GT_RETURN: // In flowgraph and other places assert that the last node of a block marked as - // BBJ_RETURN is either a GT_RETURN or GT_JMP or a tail call. It would be nice to + // BBJ_RETURN is either a GT_RETURN, GT_JMP or a tail call. It would be nice to // maintain such an invariant irrespective of whether profiler hook needed or not. // Also, there is not much to be gained by materializing it as an explicit node. // diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 339ac1eab1610d..b5b978cfd99fab 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -368,8 +368,6 @@ void CodeGen::genCodeForBBlist() genIPmappingAdd(IPmappingDscKind::NoMapping, DebugInfo(), true); } - bool firstMapping = true; - if (compiler->bbIsFuncletBeg(block)) { genUpdateCurrentFunclet(block); @@ -422,7 +420,8 @@ void CodeGen::genCodeForBBlist() } #endif // DEBUG - bool addRichMappings = JitConfig.RichDebugInfo() != 0; + bool producedLabelMapping = false; + bool addRichMappings = JitConfig.RichDebugInfo() != 0; INDEBUG(addRichMappings |= JitConfig.JitDisasmWithDebugInfo() != 0); INDEBUG(addRichMappings |= JitConfig.WriteRichDebugInfoFile() != nullptr); @@ -439,8 +438,15 @@ void CodeGen::genCodeForBBlist() { genEnsureCodeEmitted(currentDI); currentDI = rootDI; - genIPmappingAdd(IPmappingDscKind::Normal, currentDI, firstMapping); - firstMapping = false; + + // We need a tie breaker when we have multiple IL offsets that map to the same native offset. + // Normally we pick the latest, but for block joins we pick the earliest to ensure end up with + // a mapping to that IL offset. Async mappings should not participate in this -- they are + // internally produced and never fall on the join point in the IL. + // See genIPmappingGen for the tiebreaker. + bool isLabel = !producedLabelMapping && !currentDI.GetLocation().IsAsync(); + genIPmappingAdd(IPmappingDscKind::Normal, currentDI, isLabel); + producedLabelMapping |= isLabel; } if (addRichMappings && ilOffset->gtStmtDI.IsValid()) @@ -882,11 +888,11 @@ void CodeGen::genCodeForBBlist() // This call is for cleaning the GC refs genUpdateLife(VarSetOps::MakeEmpty(compiler)); - /* Finalize the spill tracking logic */ + // Finalize the spill tracking logic regSet.rsSpillEnd(); - /* Finalize the temp tracking logic */ + // Finalize the temp tracking logic regSet.tmpEnd(); @@ -901,6 +907,25 @@ void CodeGen::genCodeForBBlist() #endif } +//------------------------------------------------------------------------ +// genRecordAsyncResume: +// Record information about an async resume point in the async resume info tabl.e +// +// Arguments: +// asyncResume - GT_RECORD_ASYNC_RESUME node +// +void CodeGen::genRecordAsyncResume(GenTreeVal* asyncResume) +{ + size_t index = asyncResume->gtVal1; + assert(compiler->compSuspensionPoints != nullptr); + assert(index < compiler->compSuspensionPoints->size()); + + emitter::dataSection* asyncResumeInfo; + genEmitAsyncResumeInfoTable(&asyncResumeInfo); + + ((emitLocation*)asyncResumeInfo->dsCont)[index] = emitLocation(GetEmitter()); +} + /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index 36bc8f78b6dc01..21efe701971365 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -2278,6 +2278,19 @@ void CodeGen::genJumpTable(GenTree* treeNode) genProduceReg(treeNode); } +//------------------------------------------------------------------------ +// genAsyncResumeInfo: emits address of async resume info for a specific state +// +// Parameters: +// treeNode - the GT_ASYNC_RESUME_INFO node +// +void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode) +{ + GetEmitter()->emitIns_R_C(INS_bl, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA, + genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1), 0); + genProduceReg(treeNode); +} + //------------------------------------------------------------------------ // genLockedInstructions: Generate code for a GT_XADD or GT_XCHG node. // @@ -4367,6 +4380,10 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) emit->emitIns_R_L(INS_ld_d, EA_PTRSIZE, genPendingCallLabel, targetReg); break; + case GT_ASYNC_RESUME_INFO: + genAsyncResumeInfo(treeNode->AsVal()); + break; + case GT_STORE_BLK: genCodeForStoreBlk(treeNode->AsBlk()); break; @@ -4380,7 +4397,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_IL_OFFSET: - // Do nothing; these nodes are simply markers for debug info. + // Do nothing; this node is a marker for debug info. + break; + + case GT_RECORD_ASYNC_RESUME: + genRecordAsyncResume(treeNode->AsVal()); break; default: diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index eab44ce3687079..dc2f177b97cdf7 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -2221,6 +2221,19 @@ void CodeGen::genJumpTable(GenTree* treeNode) genProduceReg(treeNode); } +//------------------------------------------------------------------------ +// genAsyncResumeInfo: emits address of async resume info for a specific state +// +// Parameters: +// treeNode - the GT_ASYNC_RESUME_INFO node +// +void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode) +{ + GetEmitter()->emitIns_R_C(INS_addi, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA, + genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1)); + genProduceReg(treeNode); +} + //------------------------------------------------------------------------ // genLockedInstructions: Generate code for a GT_XADD, GT_XAND, GT_XORR or GT_XCHG node. // @@ -4148,6 +4161,10 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) emit->emitIns_R_L(INS_ld, EA_PTRSIZE, genPendingCallLabel, targetReg); break; + case GT_ASYNC_RESUME_INFO: + genAsyncResumeInfo(treeNode->AsVal()); + break; + case GT_STORE_BLK: genCodeForStoreBlk(treeNode->AsBlk()); break; @@ -4161,7 +4178,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) break; case GT_IL_OFFSET: - // Do nothing; these nodes are simply markers for debug info. + // Do nothing; this node is a marker for debug info. + break; + + case GT_RECORD_ASYNC_RESUME: + genRecordAsyncResume(treeNode->AsVal()); break; case GT_SH1ADD: diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index ea237262532ff3..cfe344c76b0f0d 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -2265,6 +2265,10 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) emit->emitIns_R_L(INS_lea, EA_PTR_DSP_RELOC, genPendingCallLabel, treeNode->GetRegNum()); break; + case GT_ASYNC_RESUME_INFO: + genAsyncResumeInfo(treeNode->AsVal()); + break; + case GT_STORE_BLK: genCodeForStoreBlk(treeNode->AsBlk()); break; @@ -2285,7 +2289,11 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode) #endif case GT_IL_OFFSET: - // Do nothing; these nodes are simply markers for debug info. + // Do nothing; this node is a marker for debug info. + break; + + case GT_RECORD_ASYNC_RESUME: + genRecordAsyncResume(treeNode->AsVal()); break; #if defined(TARGET_AMD64) @@ -4415,6 +4423,19 @@ void CodeGen::genJumpTable(GenTree* treeNode) genProduceReg(treeNode); } +//------------------------------------------------------------------------ +// genAsyncResumeInfo: emits address of async resume info for a specific state +// +// Parameters: +// treeNode - the GT_ASYNC_RESUME_INFO node +// +void CodeGen::genAsyncResumeInfo(GenTreeVal* treeNode) +{ + GetEmitter()->emitIns_R_C(INS_lea, emitTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), + genEmitAsyncResumeInfo((unsigned)treeNode->gtVal1), 0); + genProduceReg(treeNode); +} + //------------------------------------------------------------------------ // genCodeForLockAdd: Generate code for a GT_LOCKADD node // diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index e7f3dae76f4102..5f51bd68e138db 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -2996,6 +2996,8 @@ class Compiler GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); + GenTreeILOffset* gtNewILOffsetNode(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset)); + GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); @@ -4601,7 +4603,7 @@ class Compiler CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); - void impSetupAndSpillForAsyncCall(GenTreeCall* call, OPCODE opcode, unsigned prefixFlags); + void impSetupAndSpillForAsyncCall(GenTreeCall* call, OPCODE opcode, unsigned prefixFlags, const DebugInfo& callDI); void impInsertAsyncContinuationForLdvirtftnCall(GenTreeCall* call); @@ -4869,7 +4871,7 @@ class Compiler bool impMatchIsInstBooleanConversion(const BYTE* codeAddr, const BYTE* codeEndp, int* consumed); - const BYTE* impMatchTaskAwaitPattern(const BYTE * codeAddr, const BYTE * codeEndp, int* configVal); + const BYTE* impMatchTaskAwaitPattern(const BYTE* codeAddr, const BYTE* codeEndp, int* configVal, IL_OFFSET* awaitOffset); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, bool* booleanCheck, IL_OFFSET ilOffset); @@ -8631,6 +8633,9 @@ class Compiler jitstd::list genIPmappings; jitstd::list genRichIPmappings; + jitstd::vector* compSuspensionPoints = nullptr; + jitstd::vector* compAsyncVars = nullptr; + // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the @@ -11696,6 +11701,7 @@ class GenTreeVisitor // Leaf nodes case GT_CATCH_ARG: case GT_ASYNC_CONTINUATION: + case GT_ASYNC_RESUME_INFO: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: @@ -11727,6 +11733,7 @@ class GenTreeVisitor case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: case GT_NOP: case GT_SWIFT_ERROR: case GT_GCPOLL: diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 12280cde617228..36406e9fa8ef15 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -4491,6 +4491,7 @@ GenTree::VisitResult GenTree::VisitOperands(TVisitor visitor) case GT_LCL_ADDR: case GT_CATCH_ARG: case GT_ASYNC_CONTINUATION: + case GT_ASYNC_RESUME_INFO: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: @@ -4522,6 +4523,7 @@ GenTree::VisitResult GenTree::VisitOperands(TVisitor visitor) case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: case GT_NOP: case GT_SWIFT_ERROR: case GT_GCPOLL: diff --git a/src/coreclr/jit/debuginfo.cpp b/src/coreclr/jit/debuginfo.cpp index 6b4e25635f2d38..15cb567bbdeb9b 100644 --- a/src/coreclr/jit/debuginfo.cpp +++ b/src/coreclr/jit/debuginfo.cpp @@ -4,32 +4,6 @@ #include "jitpch.h" #include "debuginfo.h" -//------------------------------------------------------------------------ -// EncodeSourceTypes: -// Encode the JIT-EE source type for an ILLocation. -// -// Returns: -// The JIT-EE interface source type. -// -// Remarks: -// We currently encode only calls and stack empty location. -// -ICorDebugInfo::SourceTypes ILLocation::EncodeSourceTypes() const -{ - int source = 0; - if (IsStackEmpty()) - { - source |= ICorDebugInfo::STACK_EMPTY; - } - - if (IsCall()) - { - source |= ICorDebugInfo::CALL_INSTRUCTION; - } - - return static_cast(source); -} - #ifdef DEBUG //------------------------------------------------------------------------ // Dump: Print a textual representation of this ILLocation. @@ -47,8 +21,9 @@ void ILLocation::Dump() const else { printf("0x%03X[", GetOffset()); - printf("%c", IsStackEmpty() ? 'E' : '-'); - printf("%c", IsCall() ? 'C' : '-'); + printf("%c", ((m_sourceTypes & ICorDebugInfo::STACK_EMPTY) != 0) ? 'E' : '-'); + printf("%c", ((m_sourceTypes & ICorDebugInfo::CALL_INSTRUCTION) != 0) ? 'C' : '-'); + printf("%c", ((m_sourceTypes & ICorDebugInfo::ASYNC) != 0) ? 'A' : '-'); printf("]"); } } diff --git a/src/coreclr/jit/debuginfo.h b/src/coreclr/jit/debuginfo.h index 72119b905c948a..39ab3e9a6bd07c 100644 --- a/src/coreclr/jit/debuginfo.h +++ b/src/coreclr/jit/debuginfo.h @@ -13,16 +13,12 @@ class ILLocation { public: ILLocation() - : m_offset(BAD_IL_OFFSET) - , m_isStackEmpty(false) - , m_isCall(false) { } - ILLocation(IL_OFFSET offset, bool isStackEmpty, bool isCall) + ILLocation(IL_OFFSET offset, ICorDebugInfo::SourceTypes sourceTypes) : m_offset(offset) - , m_isStackEmpty(isStackEmpty) - , m_isCall(isCall) + , m_sourceTypes(sourceTypes) { } @@ -31,18 +27,19 @@ class ILLocation return m_offset; } - // Is this source location at a stack empty point? We need to be able to - // report this information back to the debugger since we only allow EnC - // transitions at stack empty points. - bool IsStackEmpty() const + ICorDebugInfo::SourceTypes GetSourceTypes() const { - return m_isStackEmpty; + return m_sourceTypes; } - // Is this a call instruction? Used for managed return values. - bool IsCall() const + bool IsCallInstruction() const { - return m_isCall; + return (m_sourceTypes & ICorDebugInfo::CALL_INSTRUCTION) != 0; + } + + bool IsAsync() const + { + return (m_sourceTypes & ICorDebugInfo::ASYNC) != 0; } bool IsValid() const @@ -52,7 +49,7 @@ class ILLocation inline bool operator==(const ILLocation& other) const { - return (m_offset == other.m_offset) && (m_isStackEmpty == other.m_isStackEmpty) && (m_isCall == other.m_isCall); + return (m_offset == other.m_offset) && (m_sourceTypes == other.m_sourceTypes); } inline bool operator!=(const ILLocation& other) const @@ -60,17 +57,14 @@ class ILLocation return !(*this == other); } - ICorDebugInfo::SourceTypes EncodeSourceTypes() const; - #ifdef DEBUG // Dump textual representation of this ILLocation to jitstdout. void Dump() const; #endif private: - IL_OFFSET m_offset; - bool m_isStackEmpty : 1; - bool m_isCall : 1; + IL_OFFSET m_offset = BAD_IL_OFFSET; + ICorDebugInfo::SourceTypes m_sourceTypes = ICorDebugInfo::SOURCE_TYPE_INVALID; }; // Represents debug information about a statement. diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp index b419d44cc3003f..b0959b9a9d95e5 100644 --- a/src/coreclr/jit/ee_il_dll.cpp +++ b/src/coreclr/jit/ee_il_dll.cpp @@ -1003,7 +1003,7 @@ void Compiler::eeSetLIinfo(unsigned which, UNATIVE_OFFSET nativeOffset, IPmappin { case IPmappingDscKind::Normal: eeBoundaries[which].ilOffset = loc.GetOffset(); - eeBoundaries[which].source = loc.EncodeSourceTypes(); + eeBoundaries[which].source = loc.GetSourceTypes(); break; case IPmappingDscKind::Prolog: eeBoundaries[which].ilOffset = ICorDebugInfo::PROLOG; @@ -1096,12 +1096,17 @@ void Compiler::eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line) { printf("CALL_SITE "); } + if ((line->source & ICorDebugInfo::ASYNC) != 0) + { + printf("ASYNC "); + } printf(")"); } printf("\n"); // We don't expect to see any other bits. - assert((line->source & ~(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION)) == 0); + assert((line->source & ~(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION | ICorDebugInfo::ASYNC)) == + 0); } void Compiler::eeDispLineInfos() diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index 88cf1c67337c61..770e3b449460cc 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -7965,6 +7965,54 @@ UNATIVE_OFFSET emitter::emitBBTableDataGenBeg(unsigned numEntries, bool relative return secOffs; } +//--------------------------------------------------------------------------- +// emitAsyncResumeTable: +// Allocate space for an async resumption info table in the data sections. +// +// Arguments: +// numEntries - Number of entries in the table +// dataSecOffset - [out] Offset of the data section that was allocated +// dataSec - [out] Information about the data section that was allocated +// +void emitter::emitAsyncResumeTable(unsigned numEntries, UNATIVE_OFFSET* dataSecOffs, emitter::dataSection** dataSec) +{ + UNATIVE_OFFSET secOffs = emitConsDsc.dsdOffs; + unsigned emittedSize = sizeof(emitter::dataAsyncResumeInfo) * numEntries; + emitConsDsc.dsdOffs += emittedSize; + + dataSection* secDesc = (dataSection*)emitGetMem(roundUp(sizeof(dataSection) + numEntries * sizeof(emitLocation))); + + for (unsigned i = 0; i < numEntries; i++) + new (secDesc->dsCont + i * sizeof(emitLocation), jitstd::placement_t()) emitLocation(); + + secDesc->dsSize = emittedSize; + secDesc->dsType = dataSection::asyncResumeInfo; + secDesc->dsDataType = TYP_UNKNOWN; + secDesc->dsNext = nullptr; + + if (emitConsDsc.dsdLast) + { + emitConsDsc.dsdLast->dsNext = secDesc; + } + else + { + emitConsDsc.dsdList = secDesc; + } + + emitConsDsc.dsdLast = secDesc; + emitConsDsc.alignment = std::max(emitConsDsc.alignment, (UNATIVE_OFFSET)TARGET_POINTER_SIZE); + + *dataSecOffs = secOffs; + *dataSec = secDesc; + + // We will need the resume stub. Get it from the EE now so we can display + // it before we emit the actual table later. + if (emitAsyncResumeStub == NO_METHOD_HANDLE) + { + emitAsyncResumeStub = emitCmpHandle->getAsyncResumptionStub(&emitAsyncResumeStubEntryPoint); + } +} + /***************************************************************************** * * Emit the given block of bits into the current data section. @@ -8435,7 +8483,8 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst) bDstRW[i] = (target_size_t)(size_t)target; if (emitComp->opts.compReloc) { - emitRecordRelocation(&(bDstRW[i]), target, IMAGE_REL_BASED_HIGHLOW); + uint16_t relocType = TARGET_POINTER_SIZE == 8 ? IMAGE_REL_BASED_DIR64 : IMAGE_REL_BASED_HIGHLOW; + emitRecordRelocation(&(bDstRW[i]), target, relocType); } JITDUMP(" " FMT_BB ": 0x%p\n", block->bbNum, bDstRW[i]); @@ -8463,6 +8512,30 @@ void emitter::emitOutputDataSec(dataSecDsc* sec, BYTE* dst) JITDUMP(" " FMT_BB ": 0x%x\n", block->bbNum, uDstRW[i]); } } + else if (dsc->dsType == dataSection::asyncResumeInfo) + { + JITDUMP(" section %u, size %u, async resume info\n", secNum++, dscSize); + + size_t numElems = dscSize / sizeof(emitter::dataAsyncResumeInfo); + + emitter::dataAsyncResumeInfo* aDstRW = (emitter::dataAsyncResumeInfo*)dstRW; + for (size_t i = 0; i < numElems; i++) + { + emitLocation* emitLoc = &((emitLocation*)dsc->dsCont)[i]; + + BYTE* target = emitOffsetToPtr(emitLoc->CodeOffset(this)); + aDstRW[i].Resume = (target_size_t)(uintptr_t)emitAsyncResumeStubEntryPoint; + aDstRW[i].DiagnosticIP = (target_size_t)(uintptr_t)target; + if (emitComp->opts.compReloc) + { + uint16_t relocType = TARGET_POINTER_SIZE == 8 ? IMAGE_REL_BASED_DIR64 : IMAGE_REL_BASED_HIGHLOW; + emitRecordRelocation(&aDstRW[i].Resume, emitAsyncResumeStubEntryPoint, relocType); + emitRecordRelocation(&aDstRW[i].DiagnosticIP, target, relocType); + } + + JITDUMP(" Resume=%p, FinalResumeIP=%p\n", emitAsyncResumeStubEntryPoint, (void*)target); + } + } else { // Simple binary data: copy the bytes to the target @@ -8600,6 +8673,40 @@ void emitter::emitDispDataSec(dataSecDsc* section, BYTE* dst) } } } + else if (data->dsType == dataSection::asyncResumeInfo) + { + assert(emitAsyncResumeStub != NO_METHOD_HANDLE); + assert(emitAsyncResumeStubEntryPoint != nullptr); + + char nameBuffer[256]; + const char* resumeStubName = + emitComp->eeGetMethodFullName(emitAsyncResumeStub, true, true, nameBuffer, sizeof(nameBuffer)); + + size_t infoCount = data->dsSize / sizeof(emitLocation); + for (size_t i = 0; i < infoCount; i++) + { + if (i > 0) + { + sprintf_s(label, ArrLen(label), "RWD%02zu", i * sizeof(dataAsyncResumeInfo)); + printf(labelFormat, label); + } + + emitLocation* emitLoc = &((emitLocation*)data->dsCont)[i]; + + printf("\tdq\t%s\n", resumeStubName); + + UNATIVE_OFFSET codeOffset = emitLoc->CodeOffset(this); + if (codeOffset != emitLoc->GetIG()->igOffs) + { + printf("\tdq\t%s + %zu\n", emitLabelString(emitLoc->GetIG()), + static_cast(codeOffset - emitLoc->GetIG()->igOffs)); + } + else + { + printf("\tdq\t%s\n", emitLabelString(emitLoc->GetIG())); + } + } + } else { assert(data->dsType == dataSection::data); diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h index 398671531bfaa7..876ae476425b4a 100644 --- a/src/coreclr/jit/emit.h +++ b/src/coreclr/jit/emit.h @@ -3304,6 +3304,9 @@ class emitter int emitSyncThisObjOffs; // what is the offset of "this" for synchronized methods? + CORINFO_METHOD_HANDLE emitAsyncResumeStub = NO_METHOD_HANDLE; + void* emitAsyncResumeStubEntryPoint = nullptr; + public: void emitSetFrameRangeGCRs(int offsLo, int offsHi); void emitSetFrameRangeLcls(int offsLo, int offsHi); @@ -3465,6 +3468,17 @@ class emitter /* The following logic keeps track of initialized data sections */ /************************************************************************/ + // Note: Keep synchronized with AsyncHelpers.ResumeInfo + struct dataAsyncResumeInfo + { + // delegate* + target_size_t Resume; + // Pointer in main code for diagnostics. See comments on + // ICorDebugInfo::AsyncSuspensionPoint::DiagnosticNativeOffset and + // ResumeInfo.DiagnosticIP in SPC. + target_size_t DiagnosticIP; + }; + /* One of these is allocated for every blob of initialized data */ struct dataSection @@ -3479,7 +3493,8 @@ class emitter { data, blockAbsoluteAddr, - blockRelative32 + blockRelative32, + asyncResumeInfo, }; dataSection* dsNext; @@ -3487,8 +3502,9 @@ class emitter sectionType dsType; var_types dsDataType; - // variable-sized array used to store the constant data - // or BasicBlock* array in the block cases. + // variable-sized array used to store the constant data, BasicBlock* + // array in the block cases, or emitLocation for the asyncResumeInfo + // case. BYTE dsCont[0]; }; @@ -3516,6 +3532,7 @@ class emitter void emitOutputDataSec(dataSecDsc* sec, BYTE* dst); void emitDispDataSec(dataSecDsc* section, BYTE* dst); + void emitAsyncResumeTable(unsigned numEntries, UNATIVE_OFFSET* dataOffset, dataSection** dataSection); /************************************************************************/ /* Handles to the current class and method. */ diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 0178826d918017..c2790f129df370 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -2734,6 +2734,7 @@ bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK) case GT_NOP: case GT_LABEL: + case GT_ASYNC_RESUME_INFO: case GT_SWIFT_ERROR: case GT_GCPOLL: return true; @@ -6693,6 +6694,7 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse) case GT_LCL_ADDR: case GT_CATCH_ARG: case GT_ASYNC_CONTINUATION: + case GT_ASYNC_RESUME_INFO: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: @@ -6724,6 +6726,7 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse) case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: case GT_NOP: case GT_SWIFT_ERROR: case GT_GCPOLL: @@ -7746,6 +7749,22 @@ GenTreeIntCon* Compiler::gtNewFalse() return gtNewIconNode(0, TYP_INT); } +//----------------------------------------------------------------------------------------- +// gtNewILOffsetNode: +// Create a GT_IL_OFFSET node with the specified debug information. +// +// Arguments: +// di - The debug information +// lastOffset - Offset corresponding to the IL instruction after this one +// +// Return Value: +// New node. +// +GenTreeILOffset* Compiler::gtNewILOffsetNode(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset)) +{ + return new (this, GT_IL_OFFSET) GenTreeILOffset(di DEBUGARG(lastOffset)); +} + // return a new node representing the value in a physical register GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type) { @@ -9580,6 +9599,8 @@ GenTree* Compiler::gtCloneExpr(GenTree* tree) case GT_END_LFIN: #endif // FEATURE_EH_WINDOWS_X86 case GT_JMP: + case GT_RECORD_ASYNC_RESUME: + case GT_ASYNC_RESUME_INFO: copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1); goto DONE; @@ -10326,6 +10347,7 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_LCL_ADDR: case GT_CATCH_ARG: case GT_ASYNC_CONTINUATION: + case GT_ASYNC_RESUME_INFO: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: @@ -10357,6 +10379,7 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: case GT_NOP: case GT_SWIFT_ERROR: case GT_GCPOLL: @@ -12443,6 +12466,11 @@ void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack) tree->AsILOffset()->gtStmtDI.Dump(true); break; + case GT_RECORD_ASYNC_RESUME: + case GT_ASYNC_RESUME_INFO: + printf(" state=%zu", tree->AsVal()->gtVal1); + break; + case GT_JCC: case GT_SETCC: printf(" cond=%s", tree->AsCC()->gtCondition.Name()); diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index 100f17301ea267..794b6f66b09203 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -4366,6 +4366,9 @@ enum class ContinuationContextHandling // Additional async call info. struct AsyncCallInfo { + // DebugInfo with SOURCE_TYPE_ASYNC pointing at the await call IL instruction + DebugInfo CallAsyncDebugInfo; + // The following information is used to implement the proper observable handling of `ExecutionContext`, // `SynchronizationContext` and `TaskScheduler` in async methods. // diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index 93fb4dfd358b37..489239d879fb0d 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -39,6 +39,7 @@ GTNODE(JMP , GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE) // Jump GTNODE(FTN_ADDR , GenTreeFptrVal ,0,0,GTK_LEAF) // Address of a function GTNODE(RET_EXPR , GenTreeRetExpr ,0,0,GTK_LEAF|DBK_NOTLIR) // Place holder for the return expression from an inline candidate GTNODE(GCPOLL , GenTree ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTLIR) +GTNODE(ASYNC_RESUME_INFO, GenTreeVal ,0,0,GTK_LEAF) // Address of async resume info for a state //----------------------------------------------------------------------------- // Constant nodes: @@ -358,6 +359,7 @@ GTNODE(SWAP , GenTreeOp ,0,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTH GTNODE(COPY , GenTreeCopyOrReload,0,0,GTK_UNOP|DBK_NOTHIR) // Copies a variable from its current location to a register that satisfies GTNODE(RELOAD , GenTreeCopyOrReload,0,0,GTK_UNOP|DBK_NOTHIR) // code generation constraints. The operand is the actual lclVar node. GTNODE(IL_OFFSET , GenTreeILOffset ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // marks an IL offset for debugging purposes +GTNODE(RECORD_ASYNC_RESUME, GenTreeVal ,0,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // record native offset for async resumption info /*****************************************************************************/ #undef GTNODE diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h index b785279801c778..b9cf9839f31f8d 100644 --- a/src/coreclr/jit/gtstructs.h +++ b/src/coreclr/jit/gtstructs.h @@ -51,9 +51,9 @@ GTSTRUCT_0(UnOp , GT_OP) GTSTRUCT_0(Op , GT_OP) #if defined(FEATURE_EH_WINDOWS_X86) -GTSTRUCT_2(Val , GT_END_LFIN, GT_JMP) +GTSTRUCT_N(Val , GT_END_LFIN, GT_JMP, GT_RECORD_ASYNC_RESUME, GT_ASYNC_RESUME_INFO) #else -GTSTRUCT_1(Val , GT_JMP) +GTSTRUCT_N(Val , GT_JMP, GT_RECORD_ASYNC_RESUME, GT_ASYNC_RESUME_INFO) #endif GTSTRUCT_2_SPECIAL(IntConCommon, GT_CNS_INT, GT_CNS_LNG) GTSTRUCT_1(IntCon , GT_CNS_INT) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 3bd72cb08e609a..c473b11a947508 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2100,11 +2100,10 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. - // TODO-DEBUGINFO: Previous code always set stack as non-empty - // here. Can we not just use impCurStmtOffsSet? Are we out of sync - // here with the stack? - impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); - argStmt = gtNewStmt(argStore, impCurStmtDI); + // TODO-Bug: Should be reported with ICorDebugInfo::CALL_SITE? + impCurStmtDI = + DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, ICorDebugInfo::SOURCE_TYPE_INVALID)); + argStmt = gtNewStmt(argStore, impCurStmtDI); } else { @@ -2175,8 +2174,18 @@ DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); - bool isStackEmpty = stackState.esStackDepth <= 0; - return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); + unsigned sourceTypes = 0; + + if (isCall) + { + sourceTypes |= ICorDebugInfo::CALL_INSTRUCTION; + } + if (stackState.esStackDepth <= 0) + { + sourceTypes |= ICorDebugInfo::STACK_EMPTY; + } + + return DebugInfo(compInlineContext, ILLocation(offs, (ICorDebugInfo::SourceTypes)sourceTypes)); } //------------------------------------------------------------------------ @@ -5980,15 +5989,19 @@ bool Compiler::impBlockIsInALoop(BasicBlock* block) // optimized for runtime async // // Arguments: -// codeAddr - IL after call[virt] NB: pointing at unconsumed token. -// codeEndp - End of IL code stream -// configVal - [out] set to 0 or 1, accordingly, if we saw ConfigureAwait(0|1) +// codeAddr - IL after call[virt] NB: pointing at unconsumed token. +// codeEndp - End of IL code stream +// configVal - [out] set to 0 or 1, accordingly, if we saw ConfigureAwait(0|1) +// awaitOffset - [out] IL offset of await call // // Returns: -// NULL if we did not recognise an Await pattern that we can optimize +// nullptr if we did not recognise an Await pattern that we can optimize // Otherwise returns position at the end of the Await pattern with one token left unconsumed. // -const BYTE* Compiler::impMatchTaskAwaitPattern(const BYTE* codeAddr, const BYTE* codeEndp, int* configVal) +const BYTE* Compiler::impMatchTaskAwaitPattern(const BYTE* codeAddr, + const BYTE* codeEndp, + int* configVal, + IL_OFFSET* awaitOffset) { // If we see the following code pattern in runtime async methods: // @@ -6134,6 +6147,7 @@ const BYTE* Compiler::impMatchTaskAwaitPattern(const BYTE* codeAddr, const BYTE* if (eeIsIntrinsic(nextCallTok.hMethod) && lookupNamedIntrinsic(nextCallTok.hMethod) == NI_System_Runtime_CompilerServices_AsyncHelpers_Await) { + *awaitOffset = (IL_OFFSET)(nextOpcode - info.compCode); // yes, this is an Await // Consume the call opcode, but not the token. // The call importer always consumes one token before moving to the next opcode. @@ -9178,15 +9192,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { bool isAwait = false; int configVal = -1; // -1 not configured, 0/1 configured to false/true - const BYTE* codeAddrAfterMatch = NULL; + const BYTE* codeAddrAfterMatch = nullptr; + IL_OFFSET awaitOffset = BAD_IL_OFFSET; #ifdef DEBUG if (compIsAsync() && JitConfig.JitOptimizeAwait()) #else if (compIsAsync()) #endif { - codeAddrAfterMatch = impMatchTaskAwaitPattern(codeAddr, codeEndp, &configVal); - if (codeAddrAfterMatch != NULL) + codeAddrAfterMatch = impMatchTaskAwaitPattern(codeAddr, codeEndp, &configVal, &awaitOffset); + if (codeAddrAfterMatch != nullptr) { isAwait = true; prefixFlags |= PREFIX_IS_TASK_AWAIT; @@ -9200,11 +9215,12 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (isAwait) { _impResolveToken(CORINFO_TOKENKIND_Await); - if (resolvedToken.hMethod != NULL) + if (resolvedToken.hMethod != nullptr) { // There is a runtime async variant that is implicitly awaitable, just call that. // skip the await pattern to the last token. - codeAddr = codeAddrAfterMatch; + codeAddr = codeAddrAfterMatch; + opcodeOffs = awaitOffset; } else { diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index beca2374c6713a..e5fd8229af4d4f 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -13,7 +13,7 @@ // newObjThis - tree for this pointer or uninitialized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call -// rawILOffset - IL offset of the opcode, used for guarded devirtualization. +// rawILOffset - IL offset of the opcode // // Returns: // Type of the call's return value. @@ -386,7 +386,7 @@ var_types Compiler::impImportCall(OPCODE opcode, if (sig->isAsyncCall()) { - impSetupAndSpillForAsyncCall(call->AsCall(), opcode, prefixFlags); + impSetupAndSpillForAsyncCall(call->AsCall(), opcode, prefixFlags, di); } impPopCallArgs(sig, call->AsCall()); @@ -691,7 +691,7 @@ var_types Compiler::impImportCall(OPCODE opcode, if (sig->isAsyncCall()) { - impSetupAndSpillForAsyncCall(call->AsCall(), opcode, prefixFlags); + impSetupAndSpillForAsyncCall(call->AsCall(), opcode, prefixFlags, di); } // Now create the argument list. @@ -6814,11 +6814,20 @@ void Compiler::impCheckForPInvokeCall( // call - The call // opcode - The IL opcode for the call // prefixFlags - Flags containing context handling information from IL +// callDI - Debug info for the async call // -void Compiler::impSetupAndSpillForAsyncCall(GenTreeCall* call, OPCODE opcode, unsigned prefixFlags) +void Compiler::impSetupAndSpillForAsyncCall(GenTreeCall* call, + OPCODE opcode, + unsigned prefixFlags, + const DebugInfo& callDI) { AsyncCallInfo asyncInfo; + unsigned newSourceTypes = ICorDebugInfo::ASYNC; + newSourceTypes |= (unsigned)callDI.GetLocation().GetSourceTypes() & ~ICorDebugInfo::CALL_INSTRUCTION; + ILLocation newILLocation(callDI.GetLocation().GetOffset(), (ICorDebugInfo::SourceTypes)newSourceTypes); + asyncInfo.CallAsyncDebugInfo = DebugInfo(callDI.GetInlineContext(), newILLocation); + if ((prefixFlags & PREFIX_IS_TASK_AWAIT) != 0) { JITDUMP("Call is an async task await\n"); diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 4e6b559c989baf..109afd011d8190 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -1109,14 +1109,14 @@ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const } // Is this a varargs function? - if (info.compIsVarArgs && varNum == lvaVarargsHandleArg) + if (info.compIsVarArgs && (varNum == lvaVarargsHandleArg)) { return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM; } // We create an extra argument for the type context parameter // needed for shared generic code. - if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == info.compTypeCtxtArg) + if (((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0) && (varNum == info.compTypeCtxtArg)) { return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM; } @@ -1128,21 +1128,31 @@ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const } #endif // FEATURE_FIXED_OUT_ARGS + if (varNum == lvaAsyncContinuationArg) + { + return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; + } + // Now mutate varNum to remove extra parameters from the count. - if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > info.compTypeCtxtArg) + if (((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0) && (varNum > info.compTypeCtxtArg)) + { + varNum--; + } + + if (info.compIsVarArgs && (varNum > lvaVarargsHandleArg)) { varNum--; } - if (info.compIsVarArgs && varNum > lvaVarargsHandleArg) + if ((lvaAsyncContinuationArg != BAD_VAR_NUM) && (varNum > lvaAsyncContinuationArg)) { varNum--; } - /* Is there a hidden argument for the return buffer. - Note that this code works because if the RetBuffArg is not present, - compRetBuffArg will be BAD_VAR_NUM */ - if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg) + // Is there a hidden argument for the return buffer. Note that this code + // works because if the RetBuffArg is not present, compRetBuffArg will be + // BAD_VAR_NUM + if ((info.compRetBuffArg != BAD_VAR_NUM) && (varNum > info.compRetBuffArg)) { varNum--; } diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 05786eb1a9045e..6d91e69178a57f 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -1501,6 +1501,7 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR case GT_RETURNTRAP: case GT_PUTARG_STK: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: case GT_KEEPALIVE: case GT_SWIFT_ERROR_RET: case GT_GCPOLL: diff --git a/src/coreclr/jit/lsraarm.cpp b/src/coreclr/jit/lsraarm.cpp index 6fe1c76635dee3..1699ed07f2d397 100644 --- a/src/coreclr/jit/lsraarm.cpp +++ b/src/coreclr/jit/lsraarm.cpp @@ -689,6 +689,8 @@ int LinearScan::BuildNode(GenTree* tree) case GT_LCL_ADDR: case GT_PHYSREG: case GT_IL_OFFSET: + case GT_RECORD_ASYNC_RESUME: + case GT_ASYNC_RESUME_INFO: case GT_LABEL: case GT_PINVOKE_PROLOG: case GT_JCC: diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp index 2ae7bef6c5c42a..dcec6f6c608bf3 100644 --- a/src/coreclr/jit/rationalize.cpp +++ b/src/coreclr/jit/rationalize.cpp @@ -1951,8 +1951,7 @@ PhaseStatus Rationalizer::DoPhase() DebugInfo di = statement->GetDebugInfo(); if (di.IsValid() || di.GetRoot().IsValid()) { - GenTreeILOffset* ilOffset = - new (comp, GT_IL_OFFSET) GenTreeILOffset(di DEBUGARG(statement->GetLastILOffset())); + GenTreeILOffset* ilOffset = comp->gtNewILOffsetNode(di DEBUGARG(statement->GetLastILOffset())); BlockRange().InsertBefore(statement->GetTreeList(), ilOffset); } diff --git a/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h b/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h index 59ea64bf324bb0..f634629cfbada3 100644 --- a/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h +++ b/src/coreclr/nativeaot/Runtime/inc/ModuleHeaders.h @@ -11,7 +11,7 @@ struct ReadyToRunHeaderConstants { static const uint32_t Signature = 0x00525452; // 'RTR' - static const uint32_t CurrentMajorVersion = 16; + static const uint32_t CurrentMajorVersion = 17; static const uint32_t CurrentMinorVersion = 0; }; diff --git a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs index 1ea99252052d90..090d62bad8cc17 100644 --- a/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs +++ b/src/coreclr/tools/Common/Internal/Runtime/ModuleHeaders.cs @@ -15,7 +15,7 @@ internal struct ReadyToRunHeaderConstants { public const uint Signature = 0x00525452; // 'RTR' - public const ushort CurrentMajorVersion = 16; + public const ushort CurrentMajorVersion = 17; public const ushort CurrentMinorVersion = 0; } #if READYTORUN diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs index 4e574ed7641083..9ee93e31af0e27 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs @@ -3215,6 +3215,14 @@ private void reportRichMappings(InlineTreeNode* inlineTree, uint numInlineTree, NativeMemory.Free(mappings); } +#pragma warning disable CA1822 // Mark members as static + private void reportAsyncDebugInfo(AsyncInfo* asyncInfo, AsyncSuspensionPoint* suspensionPoints, AsyncContinuationVarInfo* vars, uint numVars) +#pragma warning restore CA1822 // Mark members as static + { + NativeMemory.Free(suspensionPoints); + NativeMemory.Free(vars); + } + #pragma warning disable CA1822 // Mark members as static private void reportMetadata(byte* key, void* value, nuint length) #pragma warning restore CA1822 // Mark members as static @@ -3361,7 +3369,7 @@ private void getAsyncInfo(ref CORINFO_ASYNC_INFO pAsyncInfoOut) DefType continuation = _compilation.TypeSystemContext.SystemModule.GetKnownType("System.Runtime.CompilerServices"u8, "Continuation"u8); pAsyncInfoOut.continuationClsHnd = ObjectToHandle(continuation); pAsyncInfoOut.continuationNextFldHnd = ObjectToHandle(continuation.GetKnownField("Next"u8)); - pAsyncInfoOut.continuationResumeFldHnd = ObjectToHandle(continuation.GetKnownField("Resume"u8)); + pAsyncInfoOut.continuationResumeInfoFldHnd = ObjectToHandle(continuation.GetKnownField("ResumeInfo"u8)); pAsyncInfoOut.continuationStateFldHnd = ObjectToHandle(continuation.GetKnownField("State"u8)); pAsyncInfoOut.continuationFlagsFldHnd = ObjectToHandle(continuation.GetKnownField("Flags"u8)); DefType asyncHelpers = _compilation.TypeSystemContext.SystemModule.GetKnownType("System.Runtime.CompilerServices"u8, "AsyncHelpers"u8); @@ -3728,7 +3736,7 @@ private bool getTailCallHelpers(ref CORINFO_RESOLVED_TOKEN callToken, CORINFO_SI } #pragma warning disable CA1822 // Mark members as static - private CORINFO_METHOD_STRUCT_* getAsyncResumptionStub() + private CORINFO_METHOD_STRUCT_* getAsyncResumptionStub(ref void* entryPoint) #pragma warning restore CA1822 // Mark members as static { throw new NotImplementedException("Crossgen2 does not support runtime-async yet"); diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs index b0a1781661517d..73a82ead1edd1d 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs @@ -1607,6 +1607,20 @@ private static void _reportRichMappings(IntPtr thisHandle, IntPtr* ppException, } } + [UnmanagedCallersOnly] + private static void _reportAsyncDebugInfo(IntPtr thisHandle, IntPtr* ppException, AsyncInfo* asyncInfo, AsyncSuspensionPoint* suspensionPoints, AsyncContinuationVarInfo* vars, uint numVars) + { + var _this = GetThis(thisHandle); + try + { + _this.reportAsyncDebugInfo(asyncInfo, suspensionPoints, vars, numVars); + } + catch (Exception ex) + { + *ppException = _this.AllocException(ex); + } + } + [UnmanagedCallersOnly] private static void _reportMetadata(IntPtr thisHandle, IntPtr* ppException, byte* key, void* value, nuint length) { @@ -2313,12 +2327,12 @@ private static byte _getTailCallHelpers(IntPtr thisHandle, IntPtr* ppException, } [UnmanagedCallersOnly] - private static CORINFO_METHOD_STRUCT_* _getAsyncResumptionStub(IntPtr thisHandle, IntPtr* ppException) + private static CORINFO_METHOD_STRUCT_* _getAsyncResumptionStub(IntPtr thisHandle, IntPtr* ppException, void** entryPoint) { var _this = GetThis(thisHandle); try { - return _this.getAsyncResumptionStub(); + return _this.getAsyncResumptionStub(ref *entryPoint); } catch (Exception ex) { @@ -2621,7 +2635,7 @@ private static uint _getJitFlags(IntPtr thisHandle, IntPtr* ppException, CORJIT_ private static IntPtr GetUnmanagedCallbacks() { - void** callbacks = (void**)NativeMemory.Alloc((nuint)(sizeof(void*) * 177)); + void** callbacks = (void**)NativeMemory.Alloc((nuint)(sizeof(void*) * 178)); callbacks[0] = (delegate* unmanaged)&_isIntrinsic; callbacks[1] = (delegate* unmanaged)&_notifyMethodInfoUsage; @@ -2731,75 +2745,76 @@ private static IntPtr GetUnmanagedCallbacks() callbacks[105] = (delegate* unmanaged)&_getVars; callbacks[106] = (delegate* unmanaged)&_setVars; callbacks[107] = (delegate* unmanaged)&_reportRichMappings; - callbacks[108] = (delegate* unmanaged)&_reportMetadata; - callbacks[109] = (delegate* unmanaged)&_allocateArray; - callbacks[110] = (delegate* unmanaged)&_freeArray; - callbacks[111] = (delegate* unmanaged)&_getArgNext; - callbacks[112] = (delegate* unmanaged)&_getArgType; - callbacks[113] = (delegate* unmanaged)&_getExactClasses; - callbacks[114] = (delegate* unmanaged)&_getArgClass; - callbacks[115] = (delegate* unmanaged)&_getHFAType; - callbacks[116] = (delegate* unmanaged)&_runWithErrorTrap; - callbacks[117] = (delegate* unmanaged)&_runWithSPMIErrorTrap; - callbacks[118] = (delegate* unmanaged)&_getEEInfo; - callbacks[119] = (delegate* unmanaged)&_getAsyncInfo; - callbacks[120] = (delegate* unmanaged)&_getMethodDefFromMethod; - callbacks[121] = (delegate* unmanaged)&_printMethodName; - callbacks[122] = (delegate* unmanaged)&_getMethodNameFromMetadata; - callbacks[123] = (delegate* unmanaged)&_getMethodHash; - callbacks[124] = (delegate* unmanaged)&_getSystemVAmd64PassStructInRegisterDescriptor; - callbacks[125] = (delegate* unmanaged)&_getSwiftLowering; - callbacks[126] = (delegate* unmanaged)&_getFpStructLowering; - callbacks[127] = (delegate* unmanaged)&_getThreadTLSIndex; - callbacks[128] = (delegate* unmanaged)&_getAddrOfCaptureThreadGlobal; - callbacks[129] = (delegate* unmanaged)&_getHelperFtn; - callbacks[130] = (delegate* unmanaged)&_getFunctionEntryPoint; - callbacks[131] = (delegate* unmanaged)&_getFunctionFixedEntryPoint; - callbacks[132] = (delegate* unmanaged)&_getLazyStringLiteralHelper; - callbacks[133] = (delegate* unmanaged)&_embedModuleHandle; - callbacks[134] = (delegate* unmanaged)&_embedClassHandle; - callbacks[135] = (delegate* unmanaged)&_embedMethodHandle; - callbacks[136] = (delegate* unmanaged)&_embedFieldHandle; - callbacks[137] = (delegate* unmanaged)&_embedGenericHandle; - callbacks[138] = (delegate* unmanaged)&_getLocationOfThisType; - callbacks[139] = (delegate* unmanaged)&_getAddressOfPInvokeTarget; - callbacks[140] = (delegate* unmanaged)&_GetCookieForPInvokeCalliSig; - callbacks[141] = (delegate* unmanaged)&_GetCookieForInterpreterCalliSig; - callbacks[142] = (delegate* unmanaged)&_getJustMyCodeHandle; - callbacks[143] = (delegate* unmanaged)&_GetProfilingHandle; - callbacks[144] = (delegate* unmanaged)&_getCallInfo; - callbacks[145] = (delegate* unmanaged)&_getStaticFieldContent; - callbacks[146] = (delegate* unmanaged)&_getObjectContent; - callbacks[147] = (delegate* unmanaged)&_getStaticFieldCurrentClass; - callbacks[148] = (delegate* unmanaged)&_getVarArgsHandle; - callbacks[149] = (delegate* unmanaged)&_constructStringLiteral; - callbacks[150] = (delegate* unmanaged)&_emptyStringLiteral; - callbacks[151] = (delegate* unmanaged)&_getFieldThreadLocalStoreID; - callbacks[152] = (delegate* unmanaged)&_GetDelegateCtor; - callbacks[153] = (delegate* unmanaged)&_MethodCompileComplete; - callbacks[154] = (delegate* unmanaged)&_getTailCallHelpers; - callbacks[155] = (delegate* unmanaged)&_getContinuationType; - callbacks[156] = (delegate* unmanaged)&_getAsyncResumptionStub; - callbacks[157] = (delegate* unmanaged)&_convertPInvokeCalliToCall; - callbacks[158] = (delegate* unmanaged)&_notifyInstructionSetUsage; - callbacks[159] = (delegate* unmanaged)&_updateEntryPointForTailCall; - callbacks[160] = (delegate* unmanaged)&_allocMem; - callbacks[161] = (delegate* unmanaged)&_reserveUnwindInfo; - callbacks[162] = (delegate* unmanaged)&_allocUnwindInfo; - callbacks[163] = (delegate* unmanaged)&_allocGCInfo; - callbacks[164] = (delegate* unmanaged)&_setEHcount; - callbacks[165] = (delegate* unmanaged)&_setEHinfo; - callbacks[166] = (delegate* unmanaged)&_logMsg; - callbacks[167] = (delegate* unmanaged)&_doAssert; - callbacks[168] = (delegate* unmanaged)&_reportFatalError; - callbacks[169] = (delegate* unmanaged)&_getPgoInstrumentationResults; - callbacks[170] = (delegate* unmanaged)&_allocPgoInstrumentationBySchema; - callbacks[171] = (delegate* unmanaged)&_recordCallSite; - callbacks[172] = (delegate* unmanaged)&_recordRelocation; - callbacks[173] = (delegate* unmanaged)&_getRelocTypeHint; - callbacks[174] = (delegate* unmanaged)&_getExpectedTargetArchitecture; - callbacks[175] = (delegate* unmanaged)&_getJitFlags; - callbacks[176] = (delegate* unmanaged)&_getSpecialCopyHelper; + callbacks[108] = (delegate* unmanaged)&_reportAsyncDebugInfo; + callbacks[109] = (delegate* unmanaged)&_reportMetadata; + callbacks[110] = (delegate* unmanaged)&_allocateArray; + callbacks[111] = (delegate* unmanaged)&_freeArray; + callbacks[112] = (delegate* unmanaged)&_getArgNext; + callbacks[113] = (delegate* unmanaged)&_getArgType; + callbacks[114] = (delegate* unmanaged)&_getExactClasses; + callbacks[115] = (delegate* unmanaged)&_getArgClass; + callbacks[116] = (delegate* unmanaged)&_getHFAType; + callbacks[117] = (delegate* unmanaged)&_runWithErrorTrap; + callbacks[118] = (delegate* unmanaged)&_runWithSPMIErrorTrap; + callbacks[119] = (delegate* unmanaged)&_getEEInfo; + callbacks[120] = (delegate* unmanaged)&_getAsyncInfo; + callbacks[121] = (delegate* unmanaged)&_getMethodDefFromMethod; + callbacks[122] = (delegate* unmanaged)&_printMethodName; + callbacks[123] = (delegate* unmanaged)&_getMethodNameFromMetadata; + callbacks[124] = (delegate* unmanaged)&_getMethodHash; + callbacks[125] = (delegate* unmanaged)&_getSystemVAmd64PassStructInRegisterDescriptor; + callbacks[126] = (delegate* unmanaged)&_getSwiftLowering; + callbacks[127] = (delegate* unmanaged)&_getFpStructLowering; + callbacks[128] = (delegate* unmanaged)&_getThreadTLSIndex; + callbacks[129] = (delegate* unmanaged)&_getAddrOfCaptureThreadGlobal; + callbacks[130] = (delegate* unmanaged)&_getHelperFtn; + callbacks[131] = (delegate* unmanaged)&_getFunctionEntryPoint; + callbacks[132] = (delegate* unmanaged)&_getFunctionFixedEntryPoint; + callbacks[133] = (delegate* unmanaged)&_getLazyStringLiteralHelper; + callbacks[134] = (delegate* unmanaged)&_embedModuleHandle; + callbacks[135] = (delegate* unmanaged)&_embedClassHandle; + callbacks[136] = (delegate* unmanaged)&_embedMethodHandle; + callbacks[137] = (delegate* unmanaged)&_embedFieldHandle; + callbacks[138] = (delegate* unmanaged)&_embedGenericHandle; + callbacks[139] = (delegate* unmanaged)&_getLocationOfThisType; + callbacks[140] = (delegate* unmanaged)&_getAddressOfPInvokeTarget; + callbacks[141] = (delegate* unmanaged)&_GetCookieForPInvokeCalliSig; + callbacks[142] = (delegate* unmanaged)&_GetCookieForInterpreterCalliSig; + callbacks[143] = (delegate* unmanaged)&_getJustMyCodeHandle; + callbacks[144] = (delegate* unmanaged)&_GetProfilingHandle; + callbacks[145] = (delegate* unmanaged)&_getCallInfo; + callbacks[146] = (delegate* unmanaged)&_getStaticFieldContent; + callbacks[147] = (delegate* unmanaged)&_getObjectContent; + callbacks[148] = (delegate* unmanaged)&_getStaticFieldCurrentClass; + callbacks[149] = (delegate* unmanaged)&_getVarArgsHandle; + callbacks[150] = (delegate* unmanaged)&_constructStringLiteral; + callbacks[151] = (delegate* unmanaged)&_emptyStringLiteral; + callbacks[152] = (delegate* unmanaged)&_getFieldThreadLocalStoreID; + callbacks[153] = (delegate* unmanaged)&_GetDelegateCtor; + callbacks[154] = (delegate* unmanaged)&_MethodCompileComplete; + callbacks[155] = (delegate* unmanaged)&_getTailCallHelpers; + callbacks[156] = (delegate* unmanaged)&_getContinuationType; + callbacks[157] = (delegate* unmanaged)&_getAsyncResumptionStub; + callbacks[158] = (delegate* unmanaged)&_convertPInvokeCalliToCall; + callbacks[159] = (delegate* unmanaged)&_notifyInstructionSetUsage; + callbacks[160] = (delegate* unmanaged)&_updateEntryPointForTailCall; + callbacks[161] = (delegate* unmanaged)&_allocMem; + callbacks[162] = (delegate* unmanaged)&_reserveUnwindInfo; + callbacks[163] = (delegate* unmanaged)&_allocUnwindInfo; + callbacks[164] = (delegate* unmanaged)&_allocGCInfo; + callbacks[165] = (delegate* unmanaged)&_setEHcount; + callbacks[166] = (delegate* unmanaged)&_setEHinfo; + callbacks[167] = (delegate* unmanaged)&_logMsg; + callbacks[168] = (delegate* unmanaged)&_doAssert; + callbacks[169] = (delegate* unmanaged)&_reportFatalError; + callbacks[170] = (delegate* unmanaged)&_getPgoInstrumentationResults; + callbacks[171] = (delegate* unmanaged)&_allocPgoInstrumentationBySchema; + callbacks[172] = (delegate* unmanaged)&_recordCallSite; + callbacks[173] = (delegate* unmanaged)&_recordRelocation; + callbacks[174] = (delegate* unmanaged)&_getRelocTypeHint; + callbacks[175] = (delegate* unmanaged)&_getExpectedTargetArchitecture; + callbacks[176] = (delegate* unmanaged)&_getJitFlags; + callbacks[177] = (delegate* unmanaged)&_getSpecialCopyHelper; return (IntPtr)callbacks; } diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs index ff37fc2f9bc60b..29f4dd4c82b75b 100644 --- a/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs +++ b/src/coreclr/tools/Common/JitInterface/CorInfoTypes.cs @@ -878,8 +878,8 @@ public unsafe struct CORINFO_ASYNC_INFO public CORINFO_CLASS_STRUCT_* continuationClsHnd; // 'Next' field public CORINFO_FIELD_STRUCT_* continuationNextFldHnd; - // 'Resume' field - public CORINFO_FIELD_STRUCT_* continuationResumeFldHnd; + // 'ResumeInfo' field + public CORINFO_FIELD_STRUCT_* continuationResumeInfoFldHnd; // 'State' field public CORINFO_FIELD_STRUCT_* continuationStateFldHnd; // 'Flags' field @@ -1271,7 +1271,8 @@ public enum SourceTypes STACK_EMPTY = 0x02, // The stack is empty here CALL_SITE = 0x04, // This is a call site. NATIVE_END_OFFSET_UNKNOWN = 0x08, // Indicates a epilog endpoint - CALL_INSTRUCTION = 0x10 // The actual instruction of a call. + CALL_INSTRUCTION = 0x10, // The actual instruction of a call. + ASYNC = 0x20, // async suspension/resumption code for a specific async call }; public struct OffsetMapping @@ -1328,6 +1329,32 @@ public struct RichOffsetMapping public SourceTypes Source; } + public struct AsyncContinuationVarInfo + { + // IL number of variable (or one of the special IL numbers, like TYPECTXT_ILNUM) + public uint VarNumber; + // Offset in continuation object where this variable is stored + public uint Offset; + } + + public struct AsyncSuspensionPoint + { + // Offset of IP stored in ResumeInfo.DiagnosticIP. This offset maps to + // the IL call that resulted in the suspension point through an ASYNC + // mapping. Also used as a unique key for debug information about the + // suspension point. See ResumeInfo.DiagnosticIP in SPC for more info. + public uint DiagnosticNativeOffset; + // Count of AsyncContinuationVarInfo in array of locals starting where + // the previous suspension point's locals end. + public uint NumContinuationVars; + } + + public struct AsyncInfo + { + // Number of suspension points in the method. + public uint NumSuspensionPoints; + } + // This enum is used for JIT to tell EE where this token comes from. // E.g. Depending on different opcodes, we might allow/disallow certain types of tokens or // return different types of handles (e.g. boxed vs. regular entrypoints) diff --git a/src/coreclr/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt b/src/coreclr/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt index 2a06ed81d01bd5..fff938a53009d5 100644 --- a/src/coreclr/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt +++ b/src/coreclr/tools/Common/JitInterface/ThunkGenerator/ThunkInput.txt @@ -161,6 +161,9 @@ ICorDebugInfo::NativeVarInfo*,NativeVarInfo* ICorDebugInfo::BoundaryTypes*,BoundaryTypes* ICorDebugInfo::InlineTreeNode*,InlineTreeNode* ICorDebugInfo::RichOffsetMapping*,RichOffsetMapping* +ICorDebugInfo::AsyncInfo*,AsyncInfo* +ICorDebugInfo::AsyncSuspensionPoint*,AsyncSuspensionPoint* +ICorDebugInfo::AsyncContinuationVarInfo*,AsyncContinuationVarInfo* struct _EXCEPTION_POINTERS*,_EXCEPTION_POINTERS* ICorJitInfo::errorTrapFunction,void* @@ -274,6 +277,7 @@ FUNCTIONS void getVars(CORINFO_METHOD_HANDLE ftn, uint32_t* cVars, ICorDebugInfo::ILVarInfo** vars, bool* extendOthers) void setVars(CORINFO_METHOD_HANDLE ftn, uint32_t cVars, ICorDebugInfo::NativeVarInfo* vars) void reportRichMappings(ICorDebugInfo::InlineTreeNode* inlineTreeNodes, uint32_t numInlineTreeNodes, ICorDebugInfo::RichOffsetMapping* mappings, uint32_t numMappings) + void reportAsyncDebugInfo(ICorDebugInfo::AsyncInfo* asyncInfo, ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, ICorDebugInfo::AsyncContinuationVarInfo* vars, uint32_t numVars) void reportMetadata(const char* key, const void* value, size_t length) void*allocateArray(size_t cBytes); void freeArray(void*array); @@ -322,7 +326,7 @@ FUNCTIONS void MethodCompileComplete(CORINFO_METHOD_HANDLE methHnd); bool getTailCallHelpers(CORINFO_RESOLVED_TOKEN* callToken, CORINFO_SIG_INFO* sig, CORINFO_GET_TAILCALL_HELPERS_FLAGS flags, CORINFO_TAILCALL_HELPERS* pResult); CORINFO_CLASS_HANDLE getContinuationType(size_t dataSize, bool* objRefs, size_t objRefsSize); - CORINFO_METHOD_HANDLE getAsyncResumptionStub(); + CORINFO_METHOD_HANDLE getAsyncResumptionStub(void **entryPoint); bool convertPInvokeCalliToCall(CORINFO_RESOLVED_TOKEN * pResolvedToken, bool mustConvert); bool notifyInstructionSetUsage(CORINFO_InstructionSet instructionSet,bool supportEnabled); void updateEntryPointForTailCall(REF_CORINFO_CONST_LOOKUP entryPoint); diff --git a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DebugInfoTableNode.cs b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DebugInfoTableNode.cs index b809ef074b9af0..f2a1484bdeea91 100644 --- a/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DebugInfoTableNode.cs +++ b/src/coreclr/tools/aot/ILCompiler.ReadyToRun/Compiler/DependencyAnalysis/ReadyToRun/DebugInfoTableNode.cs @@ -90,8 +90,24 @@ public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) byte[] vars = method.DebugVarInfos; NibbleWriter nibbleWriter = new NibbleWriter(); - nibbleWriter.WriteUInt((uint)(bounds?.Length ?? 0)); - nibbleWriter.WriteUInt((uint)(vars?.Length ?? 0)); + uint boundsLength = (uint)(bounds?.Length ?? 0); + bool isFatHeader = boundsLength == DebugInfoFat; + + if (isFatHeader) + { + nibbleWriter.WriteUInt(DebugInfoFat); + nibbleWriter.WriteUInt(boundsLength); + nibbleWriter.WriteUInt((uint)(vars?.Length ?? 0)); + nibbleWriter.WriteUInt(0); // cbUninstrumentedBounds + nibbleWriter.WriteUInt(0); // cbPatchpointInfo + nibbleWriter.WriteUInt(0); // cbRichDebugInfo + nibbleWriter.WriteUInt(0); // cbAsyncInfo + } + else + { + nibbleWriter.WriteUInt(boundsLength); + nibbleWriter.WriteUInt((uint)(vars?.Length ?? 0)); + } byte[] header = nibbleWriter.ToArray(); methodDebugBlob.Write(header, 0, header.Length); @@ -151,9 +167,10 @@ public static byte[] CreateBoundsBlobForMethod(OffsetMapping[] offsetMapping) writer2.WriteUInt((uint)offsetMapping.Length); // We need the total count writer2.WriteUInt((uint)bitWidthReportForNativeDelta); // Number of bits needed for native deltas writer2.WriteUInt((uint)bitWidthReportForILOffset); // How many bits needed for IL offsets + const int BitsForSourceType = 3; int bitWidth = bitWidthForNativeDelta + bitWidthForILOffset + - 2; // for the source data + BitsForSourceType; int totalBits = bitWidth * offsetMapping.Length; int bytesNeededForArray = (totalBits + 7) / 8; @@ -174,28 +191,22 @@ public static byte[] CreateBoundsBlobForMethod(OffsetMapping[] offsetMapping) uint nativeOffsetDelta = bound.nativeOffset - prevNativeOffset; uint sourceBits = 0; - switch ((int)bound.source) - { - case (int)Internal.JitInterface.SourceTypes.SOURCE_TYPE_INVALID: - sourceBits = 0; - break; - case (int)Internal.JitInterface.SourceTypes.CALL_INSTRUCTION: - sourceBits = 1; - break; - case (int)Internal.JitInterface.SourceTypes.STACK_EMPTY: - sourceBits = 2; - break; - case (int)(Internal.JitInterface.SourceTypes.CALL_INSTRUCTION | Internal.JitInterface.SourceTypes.STACK_EMPTY): - sourceBits = 3; - break; - default: - throw new InternalCompilerErrorException("Unknown source type"); - } + if ((bound.source & SourceTypes.CALL_INSTRUCTION) != 0) + sourceBits |= 1; + if ((bound.source & SourceTypes.STACK_EMPTY) != 0) + sourceBits |= 2; + if ((bound.source & SourceTypes.ASYNC) != 0) + sourceBits |= 4; + + if ((bound.source & ~(SourceTypes.CALL_INSTRUCTION | SourceTypes.STACK_EMPTY | SourceTypes.ASYNC)) != 0) + throw new InternalCompilerErrorException("Unknown source type " + (uint)bound.source); + if ((sourceBits & ~((1u << BitsForSourceType) - 1)) != 0) + throw new InternalCompilerErrorException("Unencodable source type " + sourceBits + " (for " + (uint)bound.source + ")"); ulong mappingDataEncoded = (ulong)sourceBits | - ((ulong)nativeOffsetDelta << 2) | - ((ulong)((int)bound.ilOffset - (int)MappingTypes.EPILOG) << (2 + bitWidthForNativeDelta)); + ((ulong)nativeOffsetDelta << BitsForSourceType) | + ((ulong)((int)bound.ilOffset - (int)MappingTypes.EPILOG) << (BitsForSourceType + bitWidthForNativeDelta)); for (byte bitsToWrite = (byte)bitWidth; bitsToWrite > 0;) { @@ -307,5 +318,7 @@ static void WriteEncodedStackOffset(NibbleWriter _writer, int offset, bool assum return writer.ToArray(); } + + private const int DebugInfoFat = 0; } } diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfo.cs b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfo.cs index 019b74c714b3a9..d2c0e7af16732f 100644 --- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfo.cs +++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfo.cs @@ -116,8 +116,28 @@ private void EnsureInitialized() } NibbleReader reader = new NibbleReader(imageReader, (int)debugInfoOffset); - uint boundsByteCount = reader.ReadUInt(); - uint variablesByteCount = reader.ReadUInt(); + + uint boundsByteCountOrIndicator = reader.ReadUInt(); + + uint boundsByteCount = 0; + uint variablesByteCount = 0; + + const int DebugInfoFat = 0; + if (_runtimeFunction.ReadyToRunReader.ReadyToRunHeader.MajorVersion >= 17 && boundsByteCountOrIndicator == DebugInfoFat) + { + boundsByteCount = reader.ReadUInt(); + variablesByteCount = reader.ReadUInt(); + reader.ReadUInt(); // uninstrumented bounds + reader.ReadUInt(); // patchpoint info + reader.ReadUInt(); // rich debug info + reader.ReadUInt(); // async info + } + else + { + boundsByteCount = boundsByteCountOrIndicator; + variablesByteCount = reader.ReadUInt(); + } + int boundsOffset = reader.GetNextByteOffset(); int variablesOffset = (int)(boundsOffset + boundsByteCount); @@ -143,7 +163,8 @@ private void ParseBounds(NativeReader imageReader, int offset) // - IL offsets aren't sorted // They may also include a sentinel value from MappingTypes. // - flags is 3 independent bits. - if (_runtimeFunction.ReadyToRunReader.ReadyToRunHeader.MajorVersion >= 16) + int version = _runtimeFunction.ReadyToRunReader.ReadyToRunHeader.MajorVersion; + if (version >= 16) { NibbleReader reader = new NibbleReader(imageReader, offset); uint boundsEntryCount = reader.ReadUInt(); @@ -151,7 +172,8 @@ private void ParseBounds(NativeReader imageReader, int offset) uint bitsForNativeDelta = reader.ReadUInt() + 1; // Number of bits needed for native deltas uint bitsForILOffsets = reader.ReadUInt() + 1; // Number of bits needed for IL offsets - uint bitsPerEntry = bitsForNativeDelta + bitsForILOffsets + 2; // 2 bits for source type + uint bitsForSourceType = version >= 17 ? 3u : 2u; + uint bitsPerEntry = bitsForNativeDelta + bitsForILOffsets + bitsForSourceType; ulong bitsMeaningfulMask = (1UL << ((int)bitsPerEntry)) - 1; int offsetOfActualBoundsData = reader.GetNextByteOffset(); @@ -172,22 +194,14 @@ private void ParseBounds(NativeReader imageReader, int offset) bitsCollected -= bitsPerEntry; var entry = new DebugInfoBoundsEntry(); - switch (mappingDataEncoded & 0x3) - { - case 0: - entry.SourceTypes = SourceTypes.SourceTypeInvalid; - break; - case 1: - entry.SourceTypes = SourceTypes.CallInstruction; - break; - case 2: - entry.SourceTypes = SourceTypes.StackEmpty; - break; - case 3: - entry.SourceTypes = SourceTypes.StackEmpty | SourceTypes.CallInstruction; - break; - } - mappingDataEncoded >>= 2; + if ((mappingDataEncoded & 0x1) != 0) + entry.SourceTypes |= SourceTypes.CallInstruction; + if ((mappingDataEncoded & 0x2) != 0) + entry.SourceTypes |= SourceTypes.StackEmpty; + if (version >= 17 && (mappingDataEncoded & 0x4) != 0) + entry.SourceTypes |= SourceTypes.Async; + + mappingDataEncoded >>= (int)bitsForSourceType; uint nativeOffsetDelta = (uint)(mappingDataEncoded & ((1UL << (int)bitsForNativeDelta) - 1)); previousNativeOffset += nativeOffsetDelta; entry.NativeOffset = previousNativeOffset; diff --git a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfoTypes.cs b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfoTypes.cs index eef0ab4673cbe1..9232c0000a69d0 100644 --- a/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfoTypes.cs +++ b/src/coreclr/tools/aot/ILCompiler.Reflection.ReadyToRun/DebugInfoTypes.cs @@ -62,7 +62,11 @@ public enum SourceTypes /// /// The actual instruction of a call /// - CallInstruction = 0x10 + CallInstruction = 0x10, + /// + /// Suspension or resumption code for a call + /// + Async = 0x20, } public enum DebugInfoBoundsType : uint diff --git a/src/coreclr/tools/aot/crossgen2.slnx b/src/coreclr/tools/aot/crossgen2.slnx index bc39e9cb4ffee7..f404d3a841f868 100644 --- a/src/coreclr/tools/aot/crossgen2.slnx +++ b/src/coreclr/tools/aot/crossgen2.slnx @@ -7,6 +7,9 @@ + + + diff --git a/src/coreclr/tools/aot/jitinterface/jitinterface_generated.h b/src/coreclr/tools/aot/jitinterface/jitinterface_generated.h index cf8ea9deb5b2ff..ac78ed4c2a1144 100644 --- a/src/coreclr/tools/aot/jitinterface/jitinterface_generated.h +++ b/src/coreclr/tools/aot/jitinterface/jitinterface_generated.h @@ -119,6 +119,7 @@ struct JitInterfaceCallbacks void (* getVars)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_METHOD_HANDLE ftn, uint32_t* cVars, ICorDebugInfo::ILVarInfo** vars, bool* extendOthers); void (* setVars)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_METHOD_HANDLE ftn, uint32_t cVars, ICorDebugInfo::NativeVarInfo* vars); void (* reportRichMappings)(void * thisHandle, CorInfoExceptionClass** ppException, ICorDebugInfo::InlineTreeNode* inlineTreeNodes, uint32_t numInlineTreeNodes, ICorDebugInfo::RichOffsetMapping* mappings, uint32_t numMappings); + void (* reportAsyncDebugInfo)(void * thisHandle, CorInfoExceptionClass** ppException, ICorDebugInfo::AsyncInfo* asyncInfo, ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, ICorDebugInfo::AsyncContinuationVarInfo* vars, uint32_t numVars); void (* reportMetadata)(void * thisHandle, CorInfoExceptionClass** ppException, const char* key, const void* value, size_t length); void* (* allocateArray)(void * thisHandle, CorInfoExceptionClass** ppException, size_t cBytes); void (* freeArray)(void * thisHandle, CorInfoExceptionClass** ppException, void* array); @@ -167,7 +168,7 @@ struct JitInterfaceCallbacks void (* MethodCompileComplete)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_METHOD_HANDLE methHnd); bool (* getTailCallHelpers)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_RESOLVED_TOKEN* callToken, CORINFO_SIG_INFO* sig, CORINFO_GET_TAILCALL_HELPERS_FLAGS flags, CORINFO_TAILCALL_HELPERS* pResult); CORINFO_CLASS_HANDLE (* getContinuationType)(void * thisHandle, CorInfoExceptionClass** ppException, size_t dataSize, bool* objRefs, size_t objRefsSize); - CORINFO_METHOD_HANDLE (* getAsyncResumptionStub)(void * thisHandle, CorInfoExceptionClass** ppException); + CORINFO_METHOD_HANDLE (* getAsyncResumptionStub)(void * thisHandle, CorInfoExceptionClass** ppException, void** entryPoint); bool (* convertPInvokeCalliToCall)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool mustConvert); bool (* notifyInstructionSetUsage)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_InstructionSet instructionSet, bool supportEnabled); void (* updateEntryPointForTailCall)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_CONST_LOOKUP* entryPoint); @@ -1258,6 +1259,17 @@ class JitInterfaceWrapper : public ICorJitInfo if (pException != nullptr) throw pException; } + virtual void reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + CorInfoExceptionClass* pException = nullptr; + _callbacks->reportAsyncDebugInfo(_thisHandle, &pException, asyncInfo, suspensionPoints, vars, numVars); + if (pException != nullptr) throw pException; +} + virtual void reportMetadata( const char* key, const void* value, @@ -1726,10 +1738,11 @@ class JitInterfaceWrapper : public ICorJitInfo return temp; } - virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub() + virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub( + void** entryPoint) { CorInfoExceptionClass* pException = nullptr; - CORINFO_METHOD_HANDLE temp = _callbacks->getAsyncResumptionStub(_thisHandle, &pException); + CORINFO_METHOD_HANDLE temp = _callbacks->getAsyncResumptionStub(_thisHandle, &pException, entryPoint); if (pException != nullptr) throw pException; return temp; } diff --git a/src/coreclr/tools/r2rdump/R2RDump.slnx b/src/coreclr/tools/r2rdump/R2RDump.slnx index 87cdcb67bd45c1..53952f61cdd334 100644 --- a/src/coreclr/tools/r2rdump/R2RDump.slnx +++ b/src/coreclr/tools/r2rdump/R2RDump.slnx @@ -7,6 +7,9 @@ + + + diff --git a/src/coreclr/tools/superpmi/superpmi-shared/agnostic.h b/src/coreclr/tools/superpmi/superpmi-shared/agnostic.h index d45676ab5a8784..a4250fdadce9c0 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/agnostic.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/agnostic.h @@ -197,7 +197,7 @@ struct Agnostic_CORINFO_ASYNC_INFO { DWORDLONG continuationClsHnd; DWORDLONG continuationNextFldHnd; - DWORDLONG continuationResumeFldHnd; + DWORDLONG continuationResumeInfoFldHnd; DWORDLONG continuationStateFldHnd; DWORDLONG continuationFlagsFldHnd; DWORDLONG captureExecutionContextMethHnd; diff --git a/src/coreclr/tools/superpmi/superpmi-shared/lwmlist.h b/src/coreclr/tools/superpmi/superpmi-shared/lwmlist.h index a52578a288bc15..12272fc9e0d63b 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/lwmlist.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/lwmlist.h @@ -129,7 +129,7 @@ LWM(GetSystemVAmd64PassStructInRegisterDescriptor, DWORDLONG, Agnostic_GetSystem LWM(GetSwiftLowering, DWORDLONG, Agnostic_GetSwiftLowering) LWM(GetFpStructLowering, DWORDLONG, Agnostic_GetFpStructLowering) LWM(GetTailCallHelpers, Agnostic_GetTailCallHelpers, Agnostic_CORINFO_TAILCALL_HELPERS) -LWM(GetAsyncResumptionStub, DWORD, DWORDLONG) +LWM(GetAsyncResumptionStub, DWORD, DLDL) LWM(GetContinuationType, Agnostic_GetContinuationTypeIn, DWORDLONG) LWM(UpdateEntryPointForTailCall, Agnostic_CORINFO_CONST_LOOKUP, Agnostic_CORINFO_CONST_LOOKUP) LWM(GetSpecialCopyHelper, DWORDLONG, DWORDLONG) diff --git a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp index 7dad7bac1fc57b..f436ad7e13f83b 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.cpp @@ -4454,7 +4454,7 @@ void MethodContext::recGetAsyncInfo(const CORINFO_ASYNC_INFO* pAsyncInfo) value.continuationClsHnd = CastHandle(pAsyncInfo->continuationClsHnd); value.continuationNextFldHnd = CastHandle(pAsyncInfo->continuationNextFldHnd); - value.continuationResumeFldHnd = CastHandle(pAsyncInfo->continuationResumeFldHnd); + value.continuationResumeInfoFldHnd = CastHandle(pAsyncInfo->continuationResumeInfoFldHnd); value.continuationStateFldHnd = CastHandle(pAsyncInfo->continuationStateFldHnd); value.continuationFlagsFldHnd = CastHandle(pAsyncInfo->continuationFlagsFldHnd); value.captureExecutionContextMethHnd = CastHandle(pAsyncInfo->captureExecutionContextMethHnd); @@ -4468,9 +4468,9 @@ void MethodContext::recGetAsyncInfo(const CORINFO_ASYNC_INFO* pAsyncInfo) } void MethodContext::dmpGetAsyncInfo(DWORD key, const Agnostic_CORINFO_ASYNC_INFO& value) { - printf("GetAsyncInfo key %u value contClsHnd-%016" PRIX64 " contNextFldHnd-%016" PRIX64 " contResumeFldHnd-%016" PRIX64 + printf("GetAsyncInfo key %u value contClsHnd-%016" PRIX64 " contNextFldHnd-%016" PRIX64 " contResumeInfoFldHnd-%016" PRIX64 " contStateFldHnd-%016" PRIX64 " contFlagsFldHnd-%016" PRIX64, - key, value.continuationClsHnd, value.continuationNextFldHnd, value.continuationResumeFldHnd, + key, value.continuationClsHnd, value.continuationNextFldHnd, value.continuationResumeInfoFldHnd, value.continuationStateFldHnd, value.continuationFlagsFldHnd); } void MethodContext::repGetAsyncInfo(CORINFO_ASYNC_INFO* pAsyncInfoOut) @@ -4478,7 +4478,7 @@ void MethodContext::repGetAsyncInfo(CORINFO_ASYNC_INFO* pAsyncInfoOut) Agnostic_CORINFO_ASYNC_INFO value = LookupByKeyOrMissNoMessage(GetAsyncInfo, 0); pAsyncInfoOut->continuationClsHnd = (CORINFO_CLASS_HANDLE)value.continuationClsHnd; pAsyncInfoOut->continuationNextFldHnd = (CORINFO_FIELD_HANDLE)value.continuationNextFldHnd; - pAsyncInfoOut->continuationResumeFldHnd = (CORINFO_FIELD_HANDLE)value.continuationResumeFldHnd; + pAsyncInfoOut->continuationResumeInfoFldHnd = (CORINFO_FIELD_HANDLE)value.continuationResumeInfoFldHnd; pAsyncInfoOut->continuationStateFldHnd = (CORINFO_FIELD_HANDLE)value.continuationStateFldHnd; pAsyncInfoOut->continuationFlagsFldHnd = (CORINFO_FIELD_HANDLE)value.continuationFlagsFldHnd; pAsyncInfoOut->captureExecutionContextMethHnd = (CORINFO_METHOD_HANDLE)value.captureExecutionContextMethHnd; @@ -6921,22 +6921,26 @@ bool MethodContext::repGetTailCallHelpers( } -void MethodContext::recGetAsyncResumptionStub(CORINFO_METHOD_HANDLE hnd) +void MethodContext::recGetAsyncResumptionStub(CORINFO_METHOD_HANDLE hnd, void* entryPoint) { if (GetAsyncResumptionStub == nullptr) - GetAsyncResumptionStub = new LightWeightMap(); + GetAsyncResumptionStub = new LightWeightMap(); - GetAsyncResumptionStub->Add(0, CastHandle(hnd)); - DEBUG_REC(dmpGetAsyncResumptionStub(CastHandle(hnd))); + DLDL result; + result.A = CastHandle(hnd); + result.B = CastPointer(entryPoint); + GetAsyncResumptionStub->Add(0, result); + DEBUG_REC(dmpGetAsyncResumptionStub(CastHandle(hnd), CastPointer(entryPoint))); } -void MethodContext::dmpGetAsyncResumptionStub(DWORD key, DWORDLONG hnd) +void MethodContext::dmpGetAsyncResumptionStub(DWORD key, const DLDL& value) { - printf("GetAsyncResumptionStub key-%u, value-%016" PRIX64, key, hnd); + printf("GetAsyncResumptionStub key-%u, hnd-%016" PRIX64 ", entrypoint-%016" PRIX64, key, value.A, value.B); } -CORINFO_METHOD_HANDLE MethodContext::repGetAsyncResumptionStub() +CORINFO_METHOD_HANDLE MethodContext::repGetAsyncResumptionStub(void** entryPoint) { - DWORDLONG hnd = LookupByKeyOrMissNoMessage(GetAsyncResumptionStub, 0); - return (CORINFO_METHOD_HANDLE)hnd; + DLDL value = LookupByKeyOrMissNoMessage(GetAsyncResumptionStub, 0); + *entryPoint = (void*)value.B; + return (CORINFO_METHOD_HANDLE)value.A; } void MethodContext::recGetContinuationType(size_t dataSize, diff --git a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.h b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.h index c5d7f82923841e..bf853790fb9d8d 100644 --- a/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.h +++ b/src/coreclr/tools/superpmi/superpmi-shared/methodcontext.h @@ -863,9 +863,9 @@ class MethodContext CORINFO_GET_TAILCALL_HELPERS_FLAGS flags, CORINFO_TAILCALL_HELPERS* pResult); - void recGetAsyncResumptionStub(CORINFO_METHOD_HANDLE hnd); - void dmpGetAsyncResumptionStub(DWORD key, DWORDLONG handle); - CORINFO_METHOD_HANDLE repGetAsyncResumptionStub(); + void recGetAsyncResumptionStub(CORINFO_METHOD_HANDLE hnd, void* entryPoint); + void dmpGetAsyncResumptionStub(DWORD key, const DLDL& value); + CORINFO_METHOD_HANDLE repGetAsyncResumptionStub(void** entryPoint); void recGetContinuationType(size_t dataSize, bool* objRefs, size_t objRefsSize, CORINFO_CLASS_HANDLE result); void dmpGetContinuationType(const Agnostic_GetContinuationTypeIn& key, DWORDLONG value); diff --git a/src/coreclr/tools/superpmi/superpmi-shim-collector/icorjitinfo.cpp b/src/coreclr/tools/superpmi/superpmi-shim-collector/icorjitinfo.cpp index e8250d6a45c483..09544216cb913f 100644 --- a/src/coreclr/tools/superpmi/superpmi-shim-collector/icorjitinfo.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shim-collector/icorjitinfo.cpp @@ -1229,10 +1229,20 @@ void interceptor_ICJI::reportRichMappings(ICorDebugInfo::InlineTreeNode* inli uint32_t numMappings) { mc->cr->AddCall("reportRichMappings"); - // TODO: record these mappings + // Compile output that we do not currently save original_ICorJitInfo->reportRichMappings(inlineTreeNodes, numInlineTreeNodes, mappings, numMappings); } +void interceptor_ICJI::reportAsyncDebugInfo(ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + mc->cr->AddCall("reportAsyncDebugInfo"); + // Compile output that we do not currently save + original_ICorJitInfo->reportAsyncDebugInfo(asyncInfo, suspensionPoints, vars, numVars); +} + void interceptor_ICJI::reportMetadata(const char* key, const void* value, size_t length) { mc->cr->AddCall("reportMetadata"); @@ -1774,11 +1784,11 @@ bool interceptor_ICJI::getTailCallHelpers( return result; } -CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub(void** entryPoint) { mc->cr->AddCall("getAsyncResumptionStub"); - CORINFO_METHOD_HANDLE stub = original_ICorJitInfo->getAsyncResumptionStub(); - mc->recGetAsyncResumptionStub(stub); + CORINFO_METHOD_HANDLE stub = original_ICorJitInfo->getAsyncResumptionStub(entryPoint); + mc->recGetAsyncResumptionStub(stub, *entryPoint); return stub; } diff --git a/src/coreclr/tools/superpmi/superpmi-shim-counter/icorjitinfo_generated.cpp b/src/coreclr/tools/superpmi/superpmi-shim-counter/icorjitinfo_generated.cpp index e78124fc9642fa..6e39de50c5d89e 100644 --- a/src/coreclr/tools/superpmi/superpmi-shim-counter/icorjitinfo_generated.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shim-counter/icorjitinfo_generated.cpp @@ -876,6 +876,16 @@ void interceptor_ICJI::reportRichMappings( original_ICorJitInfo->reportRichMappings(inlineTreeNodes, numInlineTreeNodes, mappings, numMappings); } +void interceptor_ICJI::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + mcs->AddCall("reportAsyncDebugInfo"); + original_ICorJitInfo->reportAsyncDebugInfo(asyncInfo, suspensionPoints, vars, numVars); +} + void interceptor_ICJI::reportMetadata( const char* key, const void* value, @@ -1275,10 +1285,11 @@ CORINFO_CLASS_HANDLE interceptor_ICJI::getContinuationType( return original_ICorJitInfo->getContinuationType(dataSize, objRefs, objRefsSize); } -CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub( + void** entryPoint) { mcs->AddCall("getAsyncResumptionStub"); - return original_ICorJitInfo->getAsyncResumptionStub(); + return original_ICorJitInfo->getAsyncResumptionStub(entryPoint); } bool interceptor_ICJI::convertPInvokeCalliToCall( diff --git a/src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitinfo_generated.cpp b/src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitinfo_generated.cpp index 7937d140a0d275..fcb77f747663d9 100644 --- a/src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitinfo_generated.cpp +++ b/src/coreclr/tools/superpmi/superpmi-shim-simple/icorjitinfo_generated.cpp @@ -768,6 +768,15 @@ void interceptor_ICJI::reportRichMappings( original_ICorJitInfo->reportRichMappings(inlineTreeNodes, numInlineTreeNodes, mappings, numMappings); } +void interceptor_ICJI::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + original_ICorJitInfo->reportAsyncDebugInfo(asyncInfo, suspensionPoints, vars, numVars); +} + void interceptor_ICJI::reportMetadata( const char* key, const void* value, @@ -1119,9 +1128,10 @@ CORINFO_CLASS_HANDLE interceptor_ICJI::getContinuationType( return original_ICorJitInfo->getContinuationType(dataSize, objRefs, objRefsSize); } -CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE interceptor_ICJI::getAsyncResumptionStub( + void** entryPoint) { - return original_ICorJitInfo->getAsyncResumptionStub(); + return original_ICorJitInfo->getAsyncResumptionStub(entryPoint); } bool interceptor_ICJI::convertPInvokeCalliToCall( diff --git a/src/coreclr/tools/superpmi/superpmi/icorjitinfo.cpp b/src/coreclr/tools/superpmi/superpmi/icorjitinfo.cpp index b022efbc7c600b..38aaca767c9d36 100644 --- a/src/coreclr/tools/superpmi/superpmi/icorjitinfo.cpp +++ b/src/coreclr/tools/superpmi/superpmi/icorjitinfo.cpp @@ -1052,11 +1052,23 @@ void MyICJI::reportRichMappings( uint32_t numMappings) { jitInstance->mc->cr->AddCall("reportRichMappings"); - // TODO: record these mappings + // Compile output that we do not currently save freeArray(inlineTreeNodes); freeArray(mappings); } +void MyICJI::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + jitInstance->mc->cr->AddCall("reportAsyncDebugInfo"); + // Compile output that we do not currently save + freeArray(suspensionPoints); + freeArray(vars); +} + void MyICJI::reportMetadata(const char* key, const void* value, size_t length) { jitInstance->mc->cr->AddCall("reportMetadata"); @@ -1502,10 +1514,10 @@ bool MyICJI::getTailCallHelpers( return jitInstance->mc->repGetTailCallHelpers(callToken, sig, flags, pResult); } -CORINFO_METHOD_HANDLE MyICJI::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE MyICJI::getAsyncResumptionStub(void** entryPoint) { jitInstance->mc->cr->AddCall("getAsyncResumptionStub"); - return jitInstance->mc->repGetAsyncResumptionStub();; + return jitInstance->mc->repGetAsyncResumptionStub(entryPoint); } CORINFO_CLASS_HANDLE MyICJI::getContinuationType(size_t dataSize, bool* objRefs, size_t objRefsSize) diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index e8273bf99cd236..8223718ed31c55 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -3979,17 +3979,6 @@ BOOL EECodeGenManager::GetBoundariesAndVarsWorker( if (pDebugInfo == NULL) return FALSE; -#ifdef FEATURE_ON_STACK_REPLACEMENT - BOOL hasFlagByte = TRUE; -#else - BOOL hasFlagByte = FALSE; -#endif - - if (m_storeRichDebugInfo) - { - hasFlagByte = TRUE; - } - // Uncompress. This allocates memory and may throw. CompressDebugInfo::RestoreBoundariesAndVars( fpNew, @@ -3997,8 +3986,7 @@ BOOL EECodeGenManager::GetBoundariesAndVarsWorker( boundsType, pDebugInfo, // input pcMap, ppMap, // output - pcVars, ppVars, // output - hasFlagByte + pcVars, ppVars // output ); return TRUE; @@ -4063,22 +4051,10 @@ size_t EECodeGenManager::WalkILOffsetsWorker(PTR_BYTE pDebugInfo, if (pDebugInfo == NULL) return 0; -#ifdef FEATURE_ON_STACK_REPLACEMENT - BOOL hasFlagByte = TRUE; -#else - BOOL hasFlagByte = FALSE; -#endif - - if (m_storeRichDebugInfo) - { - hasFlagByte = TRUE; - } - // Uncompress. This allocates memory and may throw. return CompressDebugInfo::WalkILOffsets( pDebugInfo, // input boundsType, - hasFlagByte, pContext, pfnWalkILOffsets ); @@ -4139,6 +4115,56 @@ BOOL EEJitManager::GetRichDebugInfo( return GetRichDebugInfoWorker(pDebugInfo, fpNew, pNewData, ppInlineTree, pNumInlineTree, ppRichMappings, pNumRichMappings); } +BOOL EECodeGenManager::GetAsyncDebugInfoWorker( + PTR_BYTE pDebugInfo, + IN FP_IDS_NEW fpNew, IN void* pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) +{ + CONTRACTL { + THROWS; // on OOM. + GC_NOTRIGGER; // getting debug info shouldn't trigger + SUPPORTS_DAC; + } CONTRACTL_END; + + // No header created, which means no jit information is available. + if (pDebugInfo == NULL) + return FALSE; + + CompressDebugInfo::RestoreAsyncDebugInfo( + fpNew, pNewData, + pDebugInfo, + pAsyncInfo, + ppSuspensionPoints, + ppAsyncVars, pcAsyncVars); + + return TRUE; +} + +BOOL EEJitManager::GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) +{ + CONTRACTL { + THROWS; // on OOM. + GC_NOTRIGGER; // getting debug info shouldn't trigger + SUPPORTS_DAC; + } CONTRACTL_END; + + CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request); + _ASSERTE(pHdr != NULL); + + PTR_BYTE pDebugInfo = pHdr->GetDebugInfo(); + + return GetAsyncDebugInfoWorker(pDebugInfo, fpNew, pNewData, pAsyncInfo, ppSuspensionPoints, ppAsyncVars, pcAsyncVars); +} + #ifdef FEATURE_INTERPRETER BOOL InterpreterJitManager::GetBoundariesAndVars( const DebugInfoRequest & request, @@ -4222,6 +4248,29 @@ BOOL InterpreterJitManager::GetRichDebugInfo( return GetRichDebugInfoWorker(pDebugInfo, fpNew, pNewData, ppInlineTree, pNumInlineTree, ppRichMappings, pNumRichMappings); } + +BOOL InterpreterJitManager::GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) +{ + CONTRACTL { + THROWS; // on OOM. + GC_NOTRIGGER; // getting debug info shouldn't trigger + SUPPORTS_DAC; + } CONTRACTL_END; + + InterpreterCodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request); + _ASSERTE(pHdr != NULL); + + PTR_BYTE pDebugInfo = pHdr->GetDebugInfo(); + + return GetAsyncDebugInfoWorker(pDebugInfo, fpNew, pNewData, pAsyncInfo, ppSuspensionPoints, ppAsyncVars, pcAsyncVars); +} + #endif // FEATURE_INTERPRETER #ifdef DACCESS_COMPILE @@ -4247,15 +4296,9 @@ void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJ } #endif // FEATURE_EH_FUNCLETS -#ifdef FEATURE_ON_STACK_REPLACEMENT - BOOL hasFlagByte = TRUE; -#else - BOOL hasFlagByte = FALSE; -#endif - if (this->GetDebugInfo() != NULL) { - CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo(), hasFlagByte); + CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo()); } } @@ -4311,7 +4354,7 @@ void InterpreterCodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJit if (this->GetDebugInfo() != NULL) { - CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo(), FALSE /* hasFlagByte */); + CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo()); } } @@ -6446,8 +6489,7 @@ BOOL ReadyToRunJitManager::GetBoundariesAndVars( boundsType, pDebugInfo, // input pcMap, ppMap, // output - pcVars, ppVars, // output - FALSE); // no patchpoint info + pcVars, ppVars); // output return TRUE; } @@ -6479,7 +6521,6 @@ size_t ReadyToRunJitManager::WalkILOffsets( return CompressDebugInfo::WalkILOffsets( pDebugInfo, // input boundsType, - FALSE, // no patchpoint info pContext, pfnWalkILOffsets); } @@ -6491,7 +6532,65 @@ BOOL ReadyToRunJitManager::GetRichDebugInfo( OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings) { - return FALSE; + CONTRACTL { + THROWS; // on OOM. + GC_NOTRIGGER; // getting vars shouldn't trigger + SUPPORTS_DAC; + } CONTRACTL_END; + + EECodeInfo codeInfo(request.GetStartAddress()); + if (!codeInfo.IsValid()) + return FALSE; + + ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); + PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); + + PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); + if (pDebugInfo == NULL) + return FALSE; + + CompressDebugInfo::RestoreRichDebugInfo( + fpNew, pNewData, + pDebugInfo, + ppInlineTree, pNumInlineTree, + ppRichMappings, pNumRichMappings); + + return TRUE; +} + +BOOL ReadyToRunJitManager::GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) +{ + CONTRACTL { + THROWS; // on OOM. + GC_NOTRIGGER; // getting async debug info shouldn't trigger + SUPPORTS_DAC; + } CONTRACTL_END; + + EECodeInfo codeInfo(request.GetStartAddress()); + if (!codeInfo.IsValid()) + return FALSE; + + ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken()); + PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken()); + + PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction); + if (pDebugInfo == NULL) + return FALSE; + + CompressDebugInfo::RestoreAsyncDebugInfo( + fpNew, pNewData, + pDebugInfo, + pAsyncInfo, + ppSuspensionPoints, + ppAsyncVars, pcAsyncVars); + + return TRUE; } #ifdef DACCESS_COMPILE @@ -6512,7 +6611,7 @@ void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemory if (pDebugInfo == NULL) return; - CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo, FALSE); + CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo); } #endif diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 0862961e8f1b62..706bfb42343525 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -1716,6 +1716,14 @@ class IJitManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings) = 0; + virtual BOOL GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) = 0; + virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset) = 0; virtual BOOL JitCodeToMethodInfo( @@ -1901,6 +1909,14 @@ class EECodeGenManager : public IJitManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + BOOL GetAsyncDebugInfoWorker( + PTR_BYTE pDebugInfo, + IN FP_IDS_NEW fpNew, IN void* pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars); + template BOOL JitCodeToMethodInfoWorker( RangeSection * pRangeSection, @@ -2116,6 +2132,14 @@ class EEJitManager final : public EECodeGenManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + virtual BOOL GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars); + virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset); virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection, @@ -2668,6 +2692,14 @@ class ReadyToRunJitManager final : public IJitManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + virtual BOOL GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars); + virtual BOOL JitCodeToMethodInfo(RangeSection * pRangeSection, PCODE currentPC, MethodDesc** ppMethodDesc, @@ -2795,6 +2827,14 @@ class InterpreterJitManager final : public EECodeGenManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + virtual BOOL GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars); + #ifndef DACCESS_COMPILE virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause, CrawlFrame *pCf); diff --git a/src/coreclr/vm/corelib.h b/src/coreclr/vm/corelib.h index 7227608a0dba17..544a5f90732086 100644 --- a/src/coreclr/vm/corelib.h +++ b/src/coreclr/vm/corelib.h @@ -856,7 +856,7 @@ DEFINE_FIELD_U(ArgBuffer, TailCallTls, m_argBuffer) DEFINE_CLASS(CONTINUATION, CompilerServices, Continuation) DEFINE_FIELD(CONTINUATION, NEXT, Next) -DEFINE_FIELD(CONTINUATION, RESUME, Resume) +DEFINE_FIELD(CONTINUATION, RESUME_INFO, ResumeInfo) DEFINE_FIELD(CONTINUATION, STATE, State) DEFINE_FIELD(CONTINUATION, FLAGS, Flags) diff --git a/src/coreclr/vm/datadescriptor/datadescriptor.inc b/src/coreclr/vm/datadescriptor/datadescriptor.inc index e84411a6fc338b..298fb468f0daf0 100644 --- a/src/coreclr/vm/datadescriptor/datadescriptor.inc +++ b/src/coreclr/vm/datadescriptor/datadescriptor.inc @@ -1072,7 +1072,7 @@ CDAC_GLOBAL_CONTRACT(CodeVersions, 1) CDAC_GLOBAL_CONTRACT(ComWrappers, 1) #endif // FEATURE_COMWRAPPERS CDAC_GLOBAL_CONTRACT(DacStreams, 1) -CDAC_GLOBAL_CONTRACT(DebugInfo, 1) +CDAC_GLOBAL_CONTRACT(DebugInfo, 2) CDAC_GLOBAL_CONTRACT(EcmaMetadata, 1) CDAC_GLOBAL_CONTRACT(Exception, 1) CDAC_GLOBAL_CONTRACT(ExecutionManager, 2) diff --git a/src/coreclr/vm/debuginfostore.cpp b/src/coreclr/vm/debuginfostore.cpp index 2687329aea9862..585bdabf8de52d 100644 --- a/src/coreclr/vm/debuginfostore.cpp +++ b/src/coreclr/vm/debuginfostore.cpp @@ -312,6 +312,9 @@ static int g_CDI_bVarsTotalCompress = 0; static int g_CDI_bRichDebugInfoTotalUncompress = 0; static int g_CDI_bRichDebugInfoTotalCompress = 0; + +static int g_CDI_bAsyncDebugInfoTotalUncompress = 0; +static int g_CDI_bAsyncDebugInfoTotalCompress = 0; #endif // Helper to write a compressed Native Var Info @@ -457,13 +460,40 @@ static void DoRichOffsetMappings( } } -enum EXTRA_DEBUG_INFO_FLAGS +template +static void DoAsyncSuspensionPoints( + T trans, + ULONG32 cSuspensionPoints, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints) { - // Debug info contains patchpoint information - EXTRA_DEBUG_INFO_PATCHPOINT = 1, - // Debug info contains rich information - EXTRA_DEBUG_INFO_RICH = 2, -}; + unsigned lastDiagnosticNativeOffset = 0; + for (uint32_t i = 0; i < cSuspensionPoints; i++) + { + ICorDebugInfo::AsyncSuspensionPoint* sp = &suspensionPoints[i]; + trans.DoEncodedDeltaU32NonMonotonic(sp->DiagnosticNativeOffset, lastDiagnosticNativeOffset); + lastDiagnosticNativeOffset = sp->DiagnosticNativeOffset; + + trans.DoEncodedU32(sp->NumContinuationVars); + } +} + +template +static void DoAsyncVars( + T trans, + ULONG32 cSuspensionPoints, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ULONG32 cVars, + ICorDebugInfo::AsyncContinuationVarInfo* vars) +{ + for (uint32_t i = 0; i < cVars; i++) + { + ICorDebugInfo::AsyncContinuationVarInfo* var = &vars[i]; + trans.DoEncodedAdjustedU32(var->VarNumber, (DWORD) ICorDebugInfo::MAX_ILNUM); + trans.DoEncodedU32(var->Offset); + } +} + +static constexpr int BITS_FOR_SOURCE_TYPE = 3; #ifndef DACCESS_COMPILE @@ -536,7 +566,7 @@ void CompressDebugInfo::CompressBoundaries( pWriter->WriteEncodedU32(nativeOffsetBits - 1); pWriter->WriteEncodedU32(ilOffsetBits - 1); - uint32_t bitWidth = 2 + nativeOffsetBits + ilOffsetBits; + uint32_t bitWidth = BITS_FOR_SOURCE_TYPE + nativeOffsetBits + ilOffsetBits; uint8_t bitsInProgress = 0; uint8_t bitsInProgressCount = 0; @@ -552,31 +582,23 @@ void CompressDebugInfo::CompressBoundaries( ICorDebugInfo::OffsetMapping * pBound = &pMap[i]; + // We only expect to see some source types + _ASSERTE((pBound->source & ~(ICorDebugInfo::CALL_INSTRUCTION | ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::ASYNC)) == 0); + uint32_t sourceBits = 0; - switch ((int)pBound->source) - { - case (int)ICorDebugInfo::SOURCE_TYPE_INVALID: - sourceBits = 0; - break; - case (int)ICorDebugInfo::CALL_INSTRUCTION: - sourceBits = 1; - break; - case (int)ICorDebugInfo::STACK_EMPTY: - sourceBits = 2; - break; - case (int)(ICorDebugInfo::CALL_INSTRUCTION | ICorDebugInfo::STACK_EMPTY): - sourceBits = 3; - break; - default: - _ASSERTE(!"Unknown source type in CompressDebugInfo::CompressBoundaries"); - sourceBits = 0; // default to invalid - break; - } + if ((pBound->source & ICorDebugInfo::CALL_INSTRUCTION) != 0) + sourceBits |= 1; + if ((pBound->source & ICorDebugInfo::STACK_EMPTY) != 0) + sourceBits |= 2; + if ((pBound->source & ICorDebugInfo::ASYNC) != 0) + sourceBits |= 4; + // Should be encodable in BITS_FOR_SOURCE_TYPE bits + _ASSERTE((sourceBits & ~((1u << BITS_FOR_SOURCE_TYPE) - 1)) == 0); uint64_t mappingDataEncoded = sourceBits | - ((uint64_t)nativeOffsetDelta << 2) | - ((uint64_t)((int32_t)pBound->ilOffset - (int32_t)ICorDebugInfo::MAX_MAPPING_VALUE) << (2 + nativeOffsetBits)); + ((uint64_t)nativeOffsetDelta << BITS_FOR_SOURCE_TYPE) | + ((uint64_t)((int32_t)pBound->ilOffset - (int32_t)ICorDebugInfo::MAX_MAPPING_VALUE) << (BITS_FOR_SOURCE_TYPE + nativeOffsetBits)); for (uint8_t bitsToWrite = (uint8_t)bitWidth; bitsToWrite > 0;) { @@ -694,7 +716,42 @@ void CompressDebugInfo::CompressRichDebugInfo( PVOID pBlob = pWriter->GetBlob(&cbBlob); g_CDI_bRichDebugInfoTotalUncompress += 8 + cInlineTree * sizeof(ICorDebugInfo::InlineTreeNode) + cRichOffsetMappings * sizeof(ICorDebugInfo::RichOffsetMapping); - g_CDI_bRichDebugInfoTotalCompress += 4 + cbBlob; + g_CDI_bRichDebugInfoTotalCompress += cbBlob; +#endif +} + +void CompressDebugInfo::CompressAsyncDebugInfo( + IN ICorDebugInfo::AsyncInfo* asyncInfo, + IN ICorDebugInfo::AsyncSuspensionPoint* pSuspensionPoints, + IN ICorDebugInfo::AsyncContinuationVarInfo* pAsyncVars, + IN ULONG iAsyncVars, + IN OUT NibbleWriter* pWriter) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + _ASSERTE(pWriter != NULL); + _ASSERTE((asyncInfo->NumSuspensionPoints > 0) && (pSuspensionPoints != NULL)); + pWriter->WriteEncodedU32(asyncInfo->NumSuspensionPoints); + pWriter->WriteEncodedU32(iAsyncVars); + + TransferWriter t(*pWriter); + DoAsyncSuspensionPoints(t, asyncInfo->NumSuspensionPoints, pSuspensionPoints); + DoAsyncVars(t, asyncInfo->NumSuspensionPoints, pSuspensionPoints, iAsyncVars, pAsyncVars); + + pWriter->Flush(); + +#ifdef _DEBUG + DWORD cbBlob; + PVOID pBlob = pWriter->GetBlob(&cbBlob); + + g_CDI_bAsyncDebugInfoTotalUncompress += 8 + asyncInfo->NumSuspensionPoints * sizeof(ICorDebugInfo::AsyncSuspensionPoint) + iAsyncVars * sizeof(ICorDebugInfo::AsyncContinuationVarInfo); + g_CDI_bAsyncDebugInfoTotalCompress += cbBlob; #endif } @@ -810,19 +867,22 @@ static void ComposeMapping(const InstrumentedILOffsetMapping * pProfilerILMap, I } } -PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( - IN ICorDebugInfo::OffsetMapping* pOffsetMapping, - IN ULONG iOffsetMapping, - const InstrumentedILOffsetMapping * pInstrumentedILBounds, - IN ICorDebugInfo::NativeVarInfo* pNativeVarInfo, - IN ULONG iNativeVarInfo, - IN PatchpointInfo* patchpointInfo, - IN ICorDebugInfo::InlineTreeNode* pInlineTree, - IN ULONG iInlineTree, - IN ICorDebugInfo::RichOffsetMapping* pRichOffsetMappings, - IN ULONG iRichOffsetMappings, - IN BOOL writeFlagByte, - IN LoaderHeap* pLoaderHeap +PTR_BYTE CompressDebugInfo::Compress( + IN ICorDebugInfo::OffsetMapping* pOffsetMapping, + IN ULONG iOffsetMapping, + const InstrumentedILOffsetMapping * pInstrumentedILBounds, + IN ICorDebugInfo::NativeVarInfo* pNativeVarInfo, + IN ULONG iNativeVarInfo, + IN PatchpointInfo* patchpointInfo, + IN ICorDebugInfo::InlineTreeNode* pInlineTree, + IN ULONG iInlineTree, + IN ICorDebugInfo::RichOffsetMapping* pRichOffsetMappings, + IN ULONG iRichOffsetMappings, + IN ICorDebugInfo::AsyncInfo* asyncInfo, + IN ICorDebugInfo::AsyncSuspensionPoint* pSuspensionPoints, + IN ICorDebugInfo::AsyncContinuationVarInfo* pAsyncVars, + IN ULONG iAsyncVars, + IN LoaderHeap* pLoaderHeap ) { CONTRACTL { @@ -831,7 +891,8 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( PRECONDITION((iNativeVarInfo == 0) == (pNativeVarInfo == NULL)); PRECONDITION((iInlineTree == 0) || (pInlineTree != NULL)); PRECONDITION((iRichOffsetMappings == 0) || (pRichOffsetMappings != NULL)); - PRECONDITION(writeFlagByte || ((patchpointInfo == NULL) && (iInlineTree == 0) && (iRichOffsetMappings == 0))); + PRECONDITION((asyncInfo->NumSuspensionPoints == 0) || (pSuspensionPoints != NULL)); + PRECONDITION((iAsyncVars == 0) || (pAsyncVars != NULL)); PRECONDITION(pLoaderHeap != NULL); } CONTRACTL_END; @@ -892,31 +953,54 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( pRichDebugInfo = richDebugInfoBuffer.GetBlob(&cbRichDebugInfo); } + NibbleWriter asyncInfoBuffer; + DWORD cbAsyncInfo = 0; + PVOID pAsyncInfoBlob = NULL; + if (asyncInfo->NumSuspensionPoints > 0) + { + CompressDebugInfo::CompressAsyncDebugInfo(asyncInfo, pSuspensionPoints, pAsyncVars, iAsyncVars, &asyncInfoBuffer); + pAsyncInfoBlob = asyncInfoBuffer.GetBlob(&cbAsyncInfo); + } + // Now write it all out to the buffer in a compact fashion. NibbleWriter w; - if (cbUninstrumentedBounds != 0) + + bool isFat = + (cbBounds == DebugInfoFat) || + (cbPatchpointInfo > 0) || + (cbRichDebugInfo > 0) || + (cbAsyncInfo > 0) || + (cbUninstrumentedBounds > 0); + + if (isFat) { - w.WriteEncodedU32(DebugInfoBoundsHasInstrumentedBounds); // 0xFFFFFFFF is used to indicate that the instrumented bounds are present. + w.WriteEncodedU32(DebugInfoFat); // Indicator that this is a fat header w.WriteEncodedU32(cbBounds); + w.WriteEncodedU32(cbVars); w.WriteEncodedU32(cbUninstrumentedBounds); + w.WriteEncodedU32(cbPatchpointInfo); + w.WriteEncodedU32(cbRichDebugInfo); + w.WriteEncodedU32(cbAsyncInfo); } else { w.WriteEncodedU32(cbBounds); + w.WriteEncodedU32(cbVars); } - w.WriteEncodedU32(cbVars); + w.Flush(); DWORD cbHeader; PVOID pHeader = w.GetBlob(&cbHeader); S_UINT32 cbFinalSize(0); - if (writeFlagByte) - cbFinalSize += 1; - + cbFinalSize += cbHeader; + cbFinalSize += cbBounds; + cbFinalSize += cbVars; + cbFinalSize += cbUninstrumentedBounds; cbFinalSize += cbPatchpointInfo; - cbFinalSize += S_UINT32(4) + S_UINT32(cbRichDebugInfo); - cbFinalSize += S_UINT32(cbHeader) + S_UINT32(cbBounds) + S_UINT32(cbUninstrumentedBounds) + S_UINT32(cbVars); + cbFinalSize += cbRichDebugInfo; + cbFinalSize += cbAsyncInfo; if (cbFinalSize.IsOverflow()) ThrowHR(COR_E_OVERFLOW); @@ -924,29 +1008,6 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( BYTE *ptrStart = (BYTE *)(void *)pLoaderHeap->AllocMem(S_SIZE_T(cbFinalSize.Value())); BYTE *ptr = ptrStart; - if (writeFlagByte) - { - BYTE flagByte = 0; - if (cbPatchpointInfo > 0) - flagByte |= EXTRA_DEBUG_INFO_PATCHPOINT; - if (cbRichDebugInfo > 0) - flagByte |= EXTRA_DEBUG_INFO_RICH; - - *ptr++ = flagByte; - } - - if (cbPatchpointInfo > 0) - memcpy(ptr, (BYTE*) patchpointInfo, cbPatchpointInfo); - ptr += cbPatchpointInfo; - - if (cbRichDebugInfo > 0) - { - memcpy(ptr, &cbRichDebugInfo, 4); - ptr += 4; - memcpy(ptr, pRichDebugInfo, cbRichDebugInfo); - ptr += cbRichDebugInfo; - } - memcpy(ptr, pHeader, cbHeader); ptr += cbHeader; @@ -954,13 +1015,25 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( memcpy(ptr, pBounds, cbBounds); ptr += cbBounds; + if (cbVars > 0) + memcpy(ptr, pVars, cbVars); + ptr += cbVars; + if (cbUninstrumentedBounds > 0) memcpy(ptr, pUninstrumentedBounds, cbUninstrumentedBounds); ptr += cbUninstrumentedBounds; - if (cbVars > 0) - memcpy(ptr, pVars, cbVars); - ptr += cbVars; + if (cbPatchpointInfo > 0) + memcpy(ptr, (BYTE*) patchpointInfo, cbPatchpointInfo); + ptr += cbPatchpointInfo; + + if (cbRichDebugInfo > 0) + memcpy(ptr, pRichDebugInfo, cbRichDebugInfo); + ptr += cbRichDebugInfo; + + if (cbAsyncInfo > 0) + memcpy(ptr, pAsyncInfoBlob, cbAsyncInfo); + ptr += cbAsyncInfo; #ifdef _DEBUG ULONG32 cNewBounds = 0; @@ -968,7 +1041,7 @@ PTR_BYTE CompressDebugInfo::CompressBoundariesAndVars( ICorDebugInfo::OffsetMapping *pNewMap = NULL; ICorDebugInfo::NativeVarInfo *pNewVars = NULL; RestoreBoundariesAndVars( - DecompressNew, NULL, BoundsType::Instrumented, ptrStart, &cNewBounds, &pNewMap, &cNewVars, &pNewVars, writeFlagByte); + DecompressNew, NULL, BoundsType::Instrumented, ptrStart, &cNewBounds, &pNewMap, &cNewVars, &pNewVars); _ASSERTE(cNewBounds == iOffsetMapping); _ASSERTE(cNewBounds == 0 || pNewMap != NULL); @@ -1013,7 +1086,7 @@ static void DoBounds(PTR_BYTE addrBounds, uint32_t cbBounds, TNumBounds countHan uint32_t bitsForNativeDelta = r.ReadEncodedU32_NoThrow() + 1; // Number of bits needed for native deltas uint32_t bitsForILOffsets = r.ReadEncodedU32_NoThrow() + 1; // How many bits needed for IL offsets - uint32_t bitsPerEntry = bitsForNativeDelta + bitsForILOffsets + 2; // 2 bits for source type + uint32_t bitsPerEntry = bitsForNativeDelta + bitsForILOffsets + BITS_FOR_SOURCE_TYPE; TADDR addrBoundsArray = dac_cast(addrBounds) + r.GetNextByteIndex(); TADDR addrBoundsArrayForReads = AlignDown(addrBoundsArray, sizeof(uint64_t)); uint32_t bitOffsetForReads = (uint32_t)((addrBoundsArray - addrBoundsArrayForReads) * 8); // We want to read using aligned 64bit reads, but we want to start at the right bit offset. @@ -1027,23 +1100,17 @@ static void DoBounds(PTR_BYTE addrBounds, uint32_t cbBounds, TNumBounds countHan for (uint32_t iEntry = 0; iEntry < cNumEntries; iEntry++, bitOffsetForReads += bitsPerEntry) { uint64_t mappingDataEncoded = ReadFromBitOffsets(dac_cast(addrBoundsArrayForReads), bitOffsetForReads, bitsPerEntry); - switch (mappingDataEncoded & 0x3) // Last 2 bits are source type - { - case 0: - bound.source = ICorDebugInfo::SOURCE_TYPE_INVALID; - break; - case 1: - bound.source = ICorDebugInfo::CALL_INSTRUCTION; - break; - case 2: - bound.source = ICorDebugInfo::STACK_EMPTY; - break; - case 3: - bound.source = (ICorDebugInfo::SourceTypes)(ICorDebugInfo::STACK_EMPTY | ICorDebugInfo::CALL_INSTRUCTION); - break; - } + uint32_t sourceTypes = 0; + if ((mappingDataEncoded & 1) != 0) + sourceTypes |= ICorDebugInfo::CALL_INSTRUCTION; + if ((mappingDataEncoded & 2) != 0) + sourceTypes |= ICorDebugInfo::STACK_EMPTY; + if ((mappingDataEncoded & 4) != 0) + sourceTypes |= ICorDebugInfo::ASYNC; + + bound.source = (ICorDebugInfo::SourceTypes)sourceTypes; - mappingDataEncoded = mappingDataEncoded >> 2; // Remove source type bits + mappingDataEncoded = mappingDataEncoded >> BITS_FOR_SOURCE_TYPE; // Remove source type bits uint32_t nativeOffsetDelta = (uint32_t)(mappingDataEncoded & ((1ULL << bitsForNativeDelta) - 1)); currentNativeOffset += nativeOffsetDelta; bound.nativeOffset = currentNativeOffset; @@ -1060,6 +1127,53 @@ static void DoBounds(PTR_BYTE addrBounds, uint32_t cbBounds, TNumBounds countHan // Uncompression (restore) routines //----------------------------------------------------------------------------- +DebugInfoChunks CompressDebugInfo::DecodeChunks(IN PTR_BYTE pDebugInfo) +{ + CONTRACTL + { + THROWS; // reading from nibble stream may throw on invalid data. + GC_NOTRIGGER; + MODE_ANY; + SUPPORTS_DAC; + } + CONTRACTL_END; + + NibbleReader r(pDebugInfo, 42 /* maximum size of compressed 7 UINT32s */); + + ULONG cbBoundsOrFatMarker = r.ReadEncodedU32(); + + DebugInfoChunks chunks; + + if (cbBoundsOrFatMarker == DebugInfoFat) + { + // Fat header + chunks.cbBounds = r.ReadEncodedU32(); + chunks.cbVars = r.ReadEncodedU32(); + chunks.cbUninstrumentedBounds = r.ReadEncodedU32(); + chunks.cbPatchpointInfo = r.ReadEncodedU32(); + chunks.cbRichDebugInfo = r.ReadEncodedU32(); + chunks.cbAsyncInfo = r.ReadEncodedU32(); + } + else + { + chunks.cbBounds = cbBoundsOrFatMarker; + chunks.cbVars = r.ReadEncodedU32(); + chunks.cbUninstrumentedBounds = 0; + chunks.cbPatchpointInfo = 0; + chunks.cbRichDebugInfo = 0; + chunks.cbAsyncInfo = 0; + } + + chunks.pBounds = pDebugInfo + r.GetNextByteIndex(); + chunks.pVars = chunks.pBounds + chunks.cbBounds; + chunks.pUninstrumentedBounds = chunks.pVars + chunks.cbVars; + chunks.pPatchpointInfo = chunks.pUninstrumentedBounds + chunks.cbUninstrumentedBounds; + chunks.pRichDebugInfo = chunks.pPatchpointInfo + chunks.cbPatchpointInfo; + chunks.pAsyncInfo = chunks.pRichDebugInfo + chunks.cbRichDebugInfo; + chunks.pEnd = chunks.pAsyncInfo + chunks.cbAsyncInfo; + return chunks; +} + // Uncompress data supplied by Compress functions. void CompressDebugInfo::RestoreBoundariesAndVars( IN FP_IDS_NEW fpNew, @@ -1069,8 +1183,7 @@ void CompressDebugInfo::RestoreBoundariesAndVars( OUT ULONG32 * pcMap, // number of entries in ppMap OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array OUT ULONG32 *pcVars, - OUT ICorDebugInfo::NativeVarInfo **ppVars, - BOOL hasFlagByte + OUT ICorDebugInfo::NativeVarInfo **ppVars ) { CONTRACTL @@ -1087,50 +1200,16 @@ void CompressDebugInfo::RestoreBoundariesAndVars( if (pcVars != NULL) *pcVars = 0; if (ppVars != NULL) *ppVars = NULL; - if (hasFlagByte) - { - // Check flag byte and skip over any patchpoint info - BYTE flagByte = *pDebugInfo; - pDebugInfo++; - - if ((flagByte & EXTRA_DEBUG_INFO_PATCHPOINT) != 0) - { - PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo); - pDebugInfo += patchpointInfo->PatchpointInfoSize(); - flagByte &= ~EXTRA_DEBUG_INFO_PATCHPOINT; - } - - if ((flagByte & EXTRA_DEBUG_INFO_RICH) != 0) - { - UINT32 cbRichDebugInfo = *PTR_UINT32(pDebugInfo); - pDebugInfo += 4; - pDebugInfo += cbRichDebugInfo; - flagByte &= ~EXTRA_DEBUG_INFO_RICH; - } - - _ASSERTE(flagByte == 0); - } - - NibbleReader r(pDebugInfo, 24 /* maximum size of compressed 4 UINT32s */); - - ULONG cbBounds = r.ReadEncodedU32(); - ULONG cbUninstrumentedBounds = 0; - if (cbBounds == DebugInfoBoundsHasInstrumentedBounds) - { - // This means we have instrumented bounds. - cbBounds = r.ReadEncodedU32(); - cbUninstrumentedBounds = r.ReadEncodedU32(); - } - ULONG cbVars = r.ReadEncodedU32(); + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); - PTR_BYTE addrBounds = pDebugInfo + r.GetNextByteIndex(); - PTR_BYTE addrVars = addrBounds + cbBounds + cbUninstrumentedBounds; + PTR_BYTE addrBounds = chunks.pBounds; + unsigned cbBounds = chunks.cbBounds; - if ((boundsType == BoundsType::Uninstrumented) && cbUninstrumentedBounds != 0) + if ((boundsType == BoundsType::Uninstrumented) && chunks.cbUninstrumentedBounds != 0) { // If we have uninstrumented bounds, we will use them instead of the regular bounds. - addrBounds = addrBounds + cbBounds; - cbBounds = cbUninstrumentedBounds; + addrBounds = chunks.pUninstrumentedBounds; + cbBounds = chunks.cbUninstrumentedBounds; } if ((pcMap != NULL || ppMap != NULL) && (cbBounds != 0)) @@ -1162,9 +1241,9 @@ void CompressDebugInfo::RestoreBoundariesAndVars( }); } - if ((pcVars != NULL || ppVars != NULL) && (cbVars != 0)) + if ((pcVars != NULL || ppVars != NULL) && (chunks.cbVars != 0)) { - NibbleReader r(addrVars, cbVars); + NibbleReader r(chunks.pVars, chunks.cbVars); TransferReader t(r); UINT32 cNumEntries = r.ReadEncodedU32(); @@ -1194,7 +1273,6 @@ void CompressDebugInfo::RestoreBoundariesAndVars( size_t CompressDebugInfo::WalkILOffsets( IN PTR_BYTE pDebugInfo, BoundsType boundsType, - BOOL hasFlagByte, void* pContext, size_t (* pfnWalkILOffsets)(ICorDebugInfo::OffsetMapping *pOffsetMapping, void *pContext) ) @@ -1208,50 +1286,16 @@ size_t CompressDebugInfo::WalkILOffsets( } CONTRACTL_END; - if (hasFlagByte) - { - // Check flag byte and skip over any patchpoint info - BYTE flagByte = *pDebugInfo; - pDebugInfo++; - - if ((flagByte & EXTRA_DEBUG_INFO_PATCHPOINT) != 0) - { - PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo); - pDebugInfo += patchpointInfo->PatchpointInfoSize(); - flagByte &= ~EXTRA_DEBUG_INFO_PATCHPOINT; - } - - if ((flagByte & EXTRA_DEBUG_INFO_RICH) != 0) - { - UINT32 cbRichDebugInfo = *PTR_UINT32(pDebugInfo); - pDebugInfo += 4; - pDebugInfo += cbRichDebugInfo; - flagByte &= ~EXTRA_DEBUG_INFO_RICH; - } - - _ASSERTE(flagByte == 0); - } - - NibbleReader r(pDebugInfo, 24 /* maximum size of compressed 4 UINT32s */); - - ULONG cbBounds = r.ReadEncodedU32_NoThrow(); - ULONG cbUninstrumentedBounds = 0; - if (cbBounds == DebugInfoBoundsHasInstrumentedBounds) - { - // This means we have instrumented bounds. - cbBounds = r.ReadEncodedU32(); - cbUninstrumentedBounds = r.ReadEncodedU32(); - } - ULONG cbVars = r.ReadEncodedU32_NoThrow(); + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); - PTR_BYTE addrBounds = pDebugInfo + r.GetNextByteIndex(); - PTR_BYTE addrVars = addrBounds + cbBounds + cbUninstrumentedBounds; + PTR_BYTE addrBounds = chunks.pBounds; + unsigned cbBounds = chunks.cbBounds; - if ((boundsType == BoundsType::Uninstrumented) && cbUninstrumentedBounds != 0) + if ((boundsType == BoundsType::Uninstrumented) && chunks.cbUninstrumentedBounds != 0) { // If we have uninstrumented bounds, we will use them instead of the regular bounds. - addrBounds = addrBounds + cbBounds; - cbBounds = cbUninstrumentedBounds; + addrBounds = chunks.pUninstrumentedBounds; + cbBounds = chunks.cbUninstrumentedBounds; } if (cbBounds != 0) @@ -1291,21 +1335,19 @@ PatchpointInfo * CompressDebugInfo::RestorePatchpointInfo(IN PTR_BYTE pDebugInfo { CONTRACTL { - NOTHROW; + THROWS; GC_NOTRIGGER; MODE_ANY; SUPPORTS_DAC; } CONTRACTL_END; - // Check flag byte. - BYTE flagByte = *pDebugInfo; - pDebugInfo++; + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); - if ((flagByte & EXTRA_DEBUG_INFO_PATCHPOINT) == 0) + if (chunks.cbPatchpointInfo == 0) return NULL; - return static_cast(PTR_READ(dac_cast(pDebugInfo), dac_cast(pDebugInfo)->PatchpointInfoSize())); + return static_cast(PTR_READ(dac_cast(chunks.pPatchpointInfo), chunks.cbPatchpointInfo)); } #endif @@ -1327,8 +1369,9 @@ void CompressDebugInfo::RestoreRichDebugInfo( } CONTRACTL_END; - BYTE flagByte = *pDebugInfo; - if ((flagByte & EXTRA_DEBUG_INFO_RICH) == 0) + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); + + if (chunks.cbRichDebugInfo == 0) { *ppInlineTree = NULL; *pNumInlineTree = 0; @@ -1337,19 +1380,7 @@ void CompressDebugInfo::RestoreRichDebugInfo( return; } - pDebugInfo++; - -#ifdef FEATURE_ON_STACK_REPLACEMENT - if ((flagByte & EXTRA_DEBUG_INFO_PATCHPOINT) != 0) - { - PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo); - pDebugInfo += patchpointInfo->PatchpointInfoSize(); - } -#endif - - UINT32 cbRichDebugInfo = *PTR_UINT32(pDebugInfo); - pDebugInfo += 4; - NibbleReader r(pDebugInfo, cbRichDebugInfo); + NibbleReader r(chunks.pRichDebugInfo, chunks.cbRichDebugInfo); *pNumInlineTree = r.ReadEncodedU32(); *pNumRichMappings = r.ReadEncodedU32(); @@ -1369,62 +1400,73 @@ void CompressDebugInfo::RestoreRichDebugInfo( DoRichOffsetMappings(t, *pNumRichMappings, *ppRichMappings); } -#ifdef DACCESS_COMPILE -void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte) +void CompressDebugInfo::RestoreAsyncDebugInfo( + IN FP_IDS_NEW fpNew, + IN void* pNewData, + IN PTR_BYTE pDebugInfo, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pNumAsyncVars) { CONTRACTL { - NOTHROW; + THROWS; GC_NOTRIGGER; - SUPPORTS_DAC; + MODE_ANY; } CONTRACTL_END; - PTR_BYTE pStart = pDebugInfo; + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); - if (hasFlagByte) + if (chunks.cbAsyncInfo == 0) { - // Check flag byte and skip over any patchpoint info - BYTE flagByte = *pDebugInfo; - pDebugInfo++; + *pAsyncInfo = {}; + *ppSuspensionPoints = NULL; + *ppAsyncVars = NULL; + *pNumAsyncVars = 0; + return; + } - if ((flagByte & EXTRA_DEBUG_INFO_PATCHPOINT) != 0) - { - PTR_PatchpointInfo patchpointInfo = dac_cast(pDebugInfo); - pDebugInfo += patchpointInfo->PatchpointInfoSize(); - flagByte &= ~EXTRA_DEBUG_INFO_PATCHPOINT; - } + NibbleReader r(chunks.pAsyncInfo, chunks.cbAsyncInfo); + pAsyncInfo->NumSuspensionPoints = r.ReadEncodedU32(); + *pNumAsyncVars = r.ReadEncodedU32(); - if ((flagByte & EXTRA_DEBUG_INFO_RICH) != 0) - { - UINT32 cbRichDebugInfo = *PTR_UINT32(pDebugInfo); - pDebugInfo += 4; - pDebugInfo += cbRichDebugInfo; - flagByte &= ~EXTRA_DEBUG_INFO_RICH; - } + UINT32 cbSuspPoints = pAsyncInfo->NumSuspensionPoints * sizeof(ICorDebugInfo::AsyncSuspensionPoint); + *ppSuspensionPoints = reinterpret_cast(fpNew(pNewData, cbSuspPoints)); + if (*ppSuspensionPoints == NULL) + ThrowOutOfMemory(); - _ASSERTE(flagByte == 0); - } + UINT32 cbAsyncVars = *pNumAsyncVars * sizeof(ICorDebugInfo::AsyncContinuationVarInfo); + *ppAsyncVars = reinterpret_cast(fpNew(pNewData, cbAsyncVars)); + if (*ppAsyncVars == NULL) + ThrowOutOfMemory(); - NibbleReader r(pDebugInfo, 24 /* maximum size of compressed 4 UINT32s */); + TransferReader t(r); + DoAsyncSuspensionPoints(t, pAsyncInfo->NumSuspensionPoints, *ppSuspensionPoints); + DoAsyncVars(t, pAsyncInfo->NumSuspensionPoints, *ppSuspensionPoints, *pNumAsyncVars, *ppAsyncVars); +} - ULONG cbBounds = r.ReadEncodedU32(); - ULONG cbUninstrumentedBounds = 0; - if (cbBounds == DebugInfoBoundsHasInstrumentedBounds) +#ifdef DACCESS_COMPILE +void CompressDebugInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo) +{ + CONTRACTL { - // This means we have instrumented bounds. - cbBounds = r.ReadEncodedU32(); - cbUninstrumentedBounds = r.ReadEncodedU32(); + NOTHROW; + GC_NOTRIGGER; + SUPPORTS_DAC; } - ULONG cbVars = r.ReadEncodedU32(); + CONTRACTL_END; + + PTR_BYTE pStart = pDebugInfo; - pDebugInfo += r.GetNextByteIndex() + cbBounds + cbUninstrumentedBounds + cbVars; + DebugInfoChunks chunks = DecodeChunks(pDebugInfo); // NibbleReader reads in units of sizeof(NibbleChunkType) // So we need to account for any partial chunk at the end. - pDebugInfo = AlignUp(dac_cast(pDebugInfo), sizeof(NibbleReader::NibbleChunkType)); + PTR_BYTE pEnd = AlignUp(dac_cast(chunks.pEnd), sizeof(NibbleReader::NibbleChunkType)); - DacEnumMemoryRegion(dac_cast(pStart), pDebugInfo - pStart); + DacEnumMemoryRegion(dac_cast(pStart), pEnd - pStart); } #endif // DACCESS_COMPILE @@ -1503,6 +1545,31 @@ BOOL DebugInfoManager::GetRichDebugInfo( return pJitMan->GetRichDebugInfo(request, fpNew, pNewData, ppInlineTree, pNumInlineTree, ppRichMappings, pNumRichMappings); } +BOOL DebugInfoManager::GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars) +{ + CONTRACTL + { + THROWS; + WRAPPER(GC_TRIGGERS); // depends on fpNew + SUPPORTS_DAC; + } + CONTRACTL_END; + + IJitManager* pJitMan = ExecutionManager::FindJitMan(request.GetStartAddress()); + if (pJitMan == NULL) + { + return FALSE; // no info available. + } + + return pJitMan->GetAsyncDebugInfo(request, fpNew, pNewData, pAsyncInfo, ppSuspensionPoints, ppAsyncVars, pcAsyncVars); +} + #ifdef DACCESS_COMPILE void DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo) { diff --git a/src/coreclr/vm/debuginfostore.h b/src/coreclr/vm/debuginfostore.h index b8ca0b27ad2a9c..abeac114249964 100644 --- a/src/coreclr/vm/debuginfostore.h +++ b/src/coreclr/vm/debuginfostore.h @@ -60,6 +60,23 @@ enum class BoundsType Uninstrumented, // Get the uninstrumented bounds }; +struct DebugInfoChunks +{ + PTR_BYTE pBounds; + ULONG32 cbBounds; + PTR_BYTE pVars; + ULONG32 cbVars; + PTR_BYTE pUninstrumentedBounds; + ULONG32 cbUninstrumentedBounds; + PTR_BYTE pPatchpointInfo; + ULONG32 cbPatchpointInfo; + PTR_BYTE pRichDebugInfo; + ULONG32 cbRichDebugInfo; + PTR_BYTE pAsyncInfo; + ULONG32 cbAsyncInfo; + PTR_BYTE pEnd; +}; + //----------------------------------------------------------------------------- // Utility routines used for compression // Note that the compression is just an implementation detail of the stores, @@ -87,20 +104,32 @@ class CompressDebugInfo IN ICorDebugInfo::RichOffsetMapping* pRichOffsetMappings, IN OUT NibbleWriter* pWriter); + static void CompressAsyncDebugInfo( + IN ICorDebugInfo::AsyncInfo* asyncInfo, + IN ICorDebugInfo::AsyncSuspensionPoint* pSuspensionPoints, + IN ICorDebugInfo::AsyncContinuationVarInfo* pAsyncVars, + IN ULONG iAsyncVars, + IN OUT NibbleWriter* pWriter); + + static DebugInfoChunks DecodeChunks(IN PTR_BYTE pDebugInfo); + public: // Stores the result in LoaderHeap - static PTR_BYTE CompressBoundariesAndVars( - IN ICorDebugInfo::OffsetMapping * pOffsetMapping, - IN ULONG iOffsetMapping, - const InstrumentedILOffsetMapping * pInstrumentedILBounds, - IN ICorDebugInfo::NativeVarInfo * pNativeVarInfo, - IN ULONG iNativeVarInfo, - IN PatchpointInfo * patchpointInfo, - IN ICorDebugInfo::InlineTreeNode * pInlineTree, - IN ULONG iInlineTree, - IN ICorDebugInfo::RichOffsetMapping * pRichOffsetMappings, - IN ULONG iRichOffsetMappings, - IN BOOL writeFlagByte, + static PTR_BYTE Compress( + IN ICorDebugInfo::OffsetMapping* pOffsetMapping, + IN ULONG iOffsetMapping, + const InstrumentedILOffsetMapping* pInstrumentedILBounds, + IN ICorDebugInfo::NativeVarInfo* pNativeVarInfo, + IN ULONG iNativeVarInfo, + IN PatchpointInfo* patchpointInfo, + IN ICorDebugInfo::InlineTreeNode* pInlineTree, + IN ULONG iInlineTree, + IN ICorDebugInfo::RichOffsetMapping* pRichOffsetMappings, + IN ULONG iRichOffsetMappings, + IN ICorDebugInfo::AsyncInfo* asyncInfo, + IN ICorDebugInfo::AsyncSuspensionPoint* pSuspensionPoints, + IN ICorDebugInfo::AsyncContinuationVarInfo* pAsyncVars, + IN ULONG iAsyncVars, IN LoaderHeap * pLoaderHeap ); @@ -113,15 +142,13 @@ class CompressDebugInfo OUT ULONG32 * pcMap, // number of entries in ppMap OUT ICorDebugInfo::OffsetMapping **ppMap, // pointer to newly allocated array OUT ULONG32 *pcVars, - OUT ICorDebugInfo::NativeVarInfo **ppVars, - BOOL hasFlagByte + OUT ICorDebugInfo::NativeVarInfo **ppVars ); // Walk the ILOffsets without needing to allocate a buffer static size_t WalkILOffsets( IN PTR_BYTE pDebugInfo, BoundsType boundsType, - BOOL hasFlagByte, void* pContext, size_t (* pfnWalkILOffsets)(ICorDebugInfo::OffsetMapping *pOffsetMapping, void *pContext) ); @@ -141,8 +168,17 @@ class CompressDebugInfo OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + static void RestoreAsyncDebugInfo( + IN FP_IDS_NEW fpNew, + IN void* pNewData, + IN PTR_BYTE pDebugInfo, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pNumAsyncVars); + #ifdef DACCESS_COMPILE - static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo, BOOL hasFlagByte); + static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags, PTR_BYTE pDebugInfo); #endif }; @@ -172,11 +208,19 @@ class DebugInfoManager OUT ICorDebugInfo::RichOffsetMapping** ppRichMappings, OUT ULONG32* pNumRichMappings); + static BOOL GetAsyncDebugInfo( + const DebugInfoRequest & request, + IN FP_IDS_NEW fpNew, IN void * pNewData, + OUT ICorDebugInfo::AsyncInfo* pAsyncInfo, + OUT ICorDebugInfo::AsyncSuspensionPoint** ppSuspensionPoints, + OUT ICorDebugInfo::AsyncContinuationVarInfo** ppAsyncVars, + OUT ULONG32* pcAsyncVars); + #ifdef DACCESS_COMPILE static void EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo); #endif }; -#define DebugInfoBoundsHasInstrumentedBounds 0xFFFFFFFF +#define DebugInfoFat 0 #endif // __DebugInfoStore_H_ diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 1456dd80cb6c5f..9d2b1deb563efd 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -10261,7 +10261,7 @@ void CEEInfo::getAsyncInfo(CORINFO_ASYNC_INFO* pAsyncInfoOut) pAsyncInfoOut->continuationClsHnd = CORINFO_CLASS_HANDLE(CoreLibBinder::GetClass(CLASS__CONTINUATION)); pAsyncInfoOut->continuationNextFldHnd = CORINFO_FIELD_HANDLE(CoreLibBinder::GetField(FIELD__CONTINUATION__NEXT)); - pAsyncInfoOut->continuationResumeFldHnd = CORINFO_FIELD_HANDLE(CoreLibBinder::GetField(FIELD__CONTINUATION__RESUME)); + pAsyncInfoOut->continuationResumeInfoFldHnd = CORINFO_FIELD_HANDLE(CoreLibBinder::GetField(FIELD__CONTINUATION__RESUME_INFO)); pAsyncInfoOut->continuationStateFldHnd = CORINFO_FIELD_HANDLE(CoreLibBinder::GetField(FIELD__CONTINUATION__STATE)); pAsyncInfoOut->continuationFlagsFldHnd = CORINFO_FIELD_HANDLE(CoreLibBinder::GetField(FIELD__CONTINUATION__FLAGS)); pAsyncInfoOut->captureExecutionContextMethHnd = CORINFO_METHOD_HANDLE(CoreLibBinder::GetMethod(METHOD__ASYNC_HELPERS__CAPTURE_EXECUTION_CONTEXT)); @@ -10772,6 +10772,9 @@ CEECodeGenInfo::CEECodeGenInfo(PrepareCodeConfig* config, MethodDesc* fd, COR_IL , m_numInlineTreeNodes(0) , m_richOffsetMappings(NULL) , m_numRichOffsetMappings(0) + , m_dbgAsyncSuspensionPoints(NULL) + , m_dbgAsyncContinuationVars(NULL) + , m_numAsyncContinuationVars(0) , m_gphCache() { STANDARD_VM_CONTRACT; @@ -10784,6 +10787,7 @@ CEECodeGenInfo::CEECodeGenInfo(PrepareCodeConfig* config, MethodDesc* fd, COR_IL m_ILHeader = ilHeader; m_jitFlags = GetCompileFlags(config, m_pMethodBeingCompiled, &m_MethodInfo); + m_dbgAsyncInfo.NumSuspensionPoints = 0; } void CEECodeGenInfo::getHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ @@ -11217,6 +11221,28 @@ void CEECodeGenInfo::reportRichMappings( EE_TO_JIT_TRANSITION(); } +void CEECodeGenInfo::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + MODE_PREEMPTIVE; + } CONTRACTL_END; + + JIT_TO_EE_TRANSITION_LEAF(); + + m_dbgAsyncInfo = *asyncInfo; + m_dbgAsyncSuspensionPoints = suspensionPoints; + m_dbgAsyncContinuationVars = vars; + m_numAsyncContinuationVars = numVars; + + EE_TO_JIT_TRANSITION_LEAF(); +} + void CEECodeGenInfo::reportMetadata( const char* key, const void* value, @@ -11479,7 +11505,7 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry, NativeCodeVersion nati return; } - if ((m_iOffsetMapping == 0) && (m_iNativeVarInfo == 0) && (patchpointInfo == NULL) && (m_numInlineTreeNodes == 0) && (m_numRichOffsetMappings == 0)) + if ((m_iOffsetMapping == 0) && (m_iNativeVarInfo == 0) && (patchpointInfo == NULL) && (m_numInlineTreeNodes == 0) && (m_numRichOffsetMappings == 0) && (m_dbgAsyncInfo.NumSuspensionPoints == 0)) return; if (patchpointInfo != NULL) @@ -11489,14 +11515,6 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry, NativeCodeVersion nati EX_TRY { - BOOL writeFlagByte = FALSE; -#ifdef FEATURE_ON_STACK_REPLACEMENT - writeFlagByte = TRUE; -#endif - if (m_jitManager->IsStoringRichDebugInfo()) - writeFlagByte = TRUE; - - const InstrumentedILOffsetMapping *pILOffsetMapping = NULL; InstrumentedILOffsetMapping loadTimeMapping; #ifdef FEATURE_REJIT @@ -11524,13 +11542,13 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry, NativeCodeVersion nati } #endif - PTR_BYTE pDebugInfo = CompressDebugInfo::CompressBoundariesAndVars( + PTR_BYTE pDebugInfo = CompressDebugInfo::Compress( m_pOffsetMapping, m_iOffsetMapping, pILOffsetMapping, m_pNativeVarInfo, m_iNativeVarInfo, patchpointInfo, m_inlineTreeNodes, m_numInlineTreeNodes, m_richOffsetMappings, m_numRichOffsetMappings, - writeFlagByte, + &m_dbgAsyncInfo, m_dbgAsyncSuspensionPoints, m_dbgAsyncContinuationVars, m_numAsyncContinuationVars, m_pMethodBeingCompiled->GetLoaderAllocator()->GetLowFrequencyHeap()); SetDebugInfo(pDebugInfo); @@ -11809,10 +11827,17 @@ void CEEJitInfo::recordRelocation(void * location, switch (fRelocType) { +#ifdef TARGET_64BIT case IMAGE_REL_BASED_DIR64: // Write 64-bits into location *((UINT64 *) locationRW) = (UINT64) target; break; +#else + case IMAGE_REL_BASED_HIGHLOW: + // Write 32-bits into location + *((UINT32 *) locationRW) = (UINT32) target; + break; +#endif #ifdef TARGET_AMD64 case IMAGE_REL_BASED_REL32: @@ -14618,7 +14643,7 @@ static Signature BuildResumptionStubCalliSignature(MetaSig& msig, MethodTable* m return AllocateSignature(alloc, sigBuilder, pamTracker); } -CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub(void** entryPoint) { CONTRACTL{ THROWS; @@ -14769,6 +14794,9 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub() amTracker.SuppressRelease(); + ILStubResolver *pResolver = result->AsDynamicMethodDesc()->GetILStubResolver(); + pResolver->SetStubTargetMethodDesc(m_pMethodBeingCompiled); + const char* optimizationTierName = "UnknownTier"; #ifdef FEATURE_TIERED_COMPILATION switch (ncv.GetOptimizationTier()) @@ -14800,6 +14828,7 @@ CORINFO_METHOD_HANDLE CEEJitInfo::getAsyncResumptionStub() sl.LogILStub(CORJIT_FLAGS()); #endif + *entryPoint = (void*)result->GetMultiCallableAddrOfCode(); return CORINFO_METHOD_HANDLE(result); } @@ -14971,6 +15000,16 @@ void CEEInfo::reportRichMappings( UNREACHABLE(); // only called on derived class. } +void CEEInfo::reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) +{ + LIMITED_METHOD_CONTRACT; + UNREACHABLE(); // only called on derived class. +} + void CEEInfo::reportMetadata(const char* key, const void* value, size_t length) { LIMITED_METHOD_CONTRACT; @@ -14989,7 +15028,7 @@ PatchpointInfo* CEEInfo::getOSRInfo(unsigned* ilOffset) UNREACHABLE(); // only called on derived class. } -CORINFO_METHOD_HANDLE CEEInfo::getAsyncResumptionStub() +CORINFO_METHOD_HANDLE CEEInfo::getAsyncResumptionStub(void** entryPoint) { LIMITED_METHOD_CONTRACT; UNREACHABLE(); // only called on derived class. diff --git a/src/coreclr/vm/jitinterface.h b/src/coreclr/vm/jitinterface.h index c5011bcee2c46a..a158c2307a877f 100644 --- a/src/coreclr/vm/jitinterface.h +++ b/src/coreclr/vm/jitinterface.h @@ -533,11 +533,18 @@ class CEECodeGenInfo : public CEEInfo freeArrayInternal(m_inlineTreeNodes); if (m_richOffsetMappings != NULL) freeArrayInternal(m_richOffsetMappings); + if (m_dbgAsyncSuspensionPoints != NULL) + freeArrayInternal(m_dbgAsyncSuspensionPoints); + if (m_dbgAsyncContinuationVars != NULL) + freeArrayInternal(m_dbgAsyncContinuationVars); m_inlineTreeNodes = NULL; m_numInlineTreeNodes = 0; m_richOffsetMappings = NULL; m_numRichOffsetMappings = 0; + m_dbgAsyncSuspensionPoints = NULL; + m_dbgAsyncContinuationVars = NULL; + m_numAsyncContinuationVars = 0; } // ICorDebugInfo stuff. @@ -564,6 +571,12 @@ class CEECodeGenInfo : public CEEInfo ICorDebugInfo::RichOffsetMapping* mappings, uint32_t numMappings) override final; + void reportAsyncDebugInfo( + ICorDebugInfo::AsyncInfo* asyncInfo, + ICorDebugInfo::AsyncSuspensionPoint* suspensionPoints, + ICorDebugInfo::AsyncContinuationVarInfo* vars, + uint32_t numVars) override final; + void reportMetadata(const char* key, const void* value, size_t length) override final; virtual void WriteCode(EECodeGenManager * jitMgr) = 0; @@ -627,6 +640,11 @@ class CEECodeGenInfo : public CEEInfo ICorDebugInfo::RichOffsetMapping *m_richOffsetMappings; ULONG32 m_numRichOffsetMappings; + ICorDebugInfo::AsyncInfo m_dbgAsyncInfo; + ICorDebugInfo::AsyncSuspensionPoint *m_dbgAsyncSuspensionPoints; + ICorDebugInfo::AsyncContinuationVarInfo *m_dbgAsyncContinuationVars; + ULONG32 m_numAsyncContinuationVars; + // The first time a call is made to CEEJitInfo::GetProfilingHandle() from this thread // for this method, these values are filled in. Thereafter, these values are used // in lieu of calling into the base CEEInfo::GetProfilingHandle() again. This protects the @@ -887,7 +905,7 @@ class CEEJitInfo final : public CEECodeGenInfo void setPatchpointInfo(PatchpointInfo* patchpointInfo) override; PatchpointInfo* getOSRInfo(unsigned* ilOffset) override; - virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub() override final; + virtual CORINFO_METHOD_HANDLE getAsyncResumptionStub(void** entryPoint) override final; protected :