diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index 3612b5384a5554..7cce3067c79936 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -111,8 +111,6 @@ if (WIN32) windows/PalRedhawkCommon.cpp windows/PalRedhawkMinWin.cpp ${GC_DIR}/windows/gcenv.windows.cpp - eventtrace.cpp - rheventtrace.cpp ) list(APPEND FULL_RUNTIME_SOURCES windows/CoffNativeCodeManager.cpp) @@ -222,6 +220,18 @@ endif() if(FEATURE_EVENT_TRACE) add_definitions(-DFEATURE_EVENT_TRACE) + list(APPEND COMMON_RUNTIME_SOURCES + eventtrace.cpp + ) + # These are carry-overs from .NET Native and only included on Windows currently + # bulktype : directly emits via ETW with EventWrite + # gcheap : GCHeapDump, GCHeapSurvivalAndMovement - not prioritizing for nativeaot yet + if (WIN32) + list(APPEND COMMON_RUNTIME_SOURCES + eventtrace_bulktype.cpp + eventtrace_gcheap.cpp + ) + endif() endif() add_definitions(-DFEATURE_BASICFREEZE) diff --git a/src/coreclr/nativeaot/Runtime/eventpipe/disableddotnetruntime.cpp b/src/coreclr/nativeaot/Runtime/eventpipe/disableddotnetruntime.cpp index f2ba91f3c4d2dd..eb0edc3bde14fa 100644 --- a/src/coreclr/nativeaot/Runtime/eventpipe/disableddotnetruntime.cpp +++ b/src/coreclr/nativeaot/Runtime/eventpipe/disableddotnetruntime.cpp @@ -632,3 +632,8 @@ ULONG EventPipeWriteEventGCFitBucketInfo( { return 0; } + +bool DotNETRuntimeProvider_IsEnabled(unsigned char level, unsigned long long keyword) +{ + return false; +} diff --git a/src/coreclr/nativeaot/Runtime/eventpipe/dotnetruntime.cpp b/src/coreclr/nativeaot/Runtime/eventpipe/dotnetruntime.cpp index 53c4574f9f2b67..1f6c1b41c19f0c 100644 --- a/src/coreclr/nativeaot/Runtime/eventpipe/dotnetruntime.cpp +++ b/src/coreclr/nativeaot/Runtime/eventpipe/dotnetruntime.cpp @@ -3147,3 +3147,18 @@ void InitDotNETRuntime(void) EventPipeEventGCLOHCompact = EventPipeAdapter::AddEvent(EventPipeProviderDotNETRuntime,208,1,0,EP_EVENT_LEVEL_INFORMATIONAL,true); EventPipeEventGCFitBucketInfo = EventPipeAdapter::AddEvent(EventPipeProviderDotNETRuntime,209,1,0,EP_EVENT_LEVEL_VERBOSE,true); } + +bool DotNETRuntimeProvider_IsEnabled(unsigned char level, unsigned long long keyword) +{ + if (!EventPipeAdapter::Enabled()) + return false; + + EVENTPIPE_TRACE_CONTEXT& context = MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context.EventPipeProvider; + if (!context.IsEnabled) + return false; + + if (level > context.Level) + return false; + + return (keyword == (ULONGLONG)0) || (keyword & context.EnabledKeywordsBitmask) != 0; +} diff --git a/src/coreclr/nativeaot/Runtime/eventtrace.cpp b/src/coreclr/nativeaot/Runtime/eventtrace.cpp index e1d9437e980ef3..4eb9c4e2ff8c59 100644 --- a/src/coreclr/nativeaot/Runtime/eventtrace.cpp +++ b/src/coreclr/nativeaot/Runtime/eventtrace.cpp @@ -13,437 +13,12 @@ #include "daccess.h" -#include "slist.h" -#include "varint.h" -#include "regdisplay.h" -#include "stackframeiterator.h" #include "thread.h" #include "threadstore.h" #include "threadstore.inl" -//#include "PalRedhawk.h" -#include "EventPipeInterface.h" - -#define Win32EventWrite PalEventWrite - -#include "eventtracepriv.h" volatile LONGLONG ETW::GCLog::s_l64LastClientSequenceNumber = 0; -/****************************************************************************/ -/* Methods that are called from the runtime */ -/****************************************************************************/ - - -// Simple helpers called by the GC to decide whether it needs to do a walk of heap -// objects and / or roots. - -BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw() -{ - LIMITED_METHOD_CONTRACT; - return ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD); -} - -BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw() -{ - LIMITED_METHOD_CONTRACT; - return ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD); -} - -BOOL ETW::GCLog::ShouldTrackMovementForEtw() -{ - LIMITED_METHOD_CONTRACT; - return ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD); -} - -BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw() -{ - // @TODO: - return FALSE; -} - -void ETW::GCLog::WalkStaticsAndCOMForETW() -{ - // @TODO: -} - - -// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges / -// GCBulkSurvivingObjectRanges events -struct EtwGcMovementContext -{ -public: - // An instance of EtwGcMovementContext is dynamically allocated and stored - // inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically - // allocated and pointed to by a profiling_context pointer created by the GC on the stack. - // This is used to batch and send GCBulkSurvivingObjectRanges events and - // GCBulkMovedObjectRanges events. This method is passed a pointer to - // MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned; - // else, a new EtwGcMovementContext is allocated, stored in that pointer, and - // then returned. Callers should test for NULL, which can be returned if out of - // memory - static EtwGcMovementContext* GetOrCreateInGCContext(EtwGcMovementContext** ppContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(ppContext != NULL); - - EtwGcMovementContext* pContext = *ppContext; - if (pContext == NULL) - { - pContext = new (nothrow) EtwGcMovementContext; - *ppContext = pContext; - } - return pContext; - } - - EtwGcMovementContext() : - iCurBulkSurvivingObjectRanges(0), - iCurBulkMovedObjectRanges(0) - { - LIMITED_METHOD_CONTRACT; - Clear(); - } - - // Resets structure for reuse on construction, and after each flush. - // (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.) - void Clear() - { - LIMITED_METHOD_CONTRACT; - cBulkSurvivingObjectRanges = 0; - cBulkMovedObjectRanges = 0; - ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges)); - ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges)); - } - - //--------------------------------------------------------------------------------------- - // GCBulkSurvivingObjectRanges - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkSurvivingObjectRanges event - UINT iCurBulkSurvivingObjectRanges; - - // Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array - UINT cBulkSurvivingObjectRanges; - - // Struct array containing the primary data for each GCBulkSurvivingObjectRanges - // event. Fix the size so the total event stays well below the 64K limit (leaving - // lots of room for non-struct fields that come before the values data) - EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[ - (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkMovedObjectRanges - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkMovedObjectRanges event - UINT iCurBulkMovedObjectRanges; - - // Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array - UINT cBulkMovedObjectRanges; - - // Struct array containing the primary data for each GCBulkMovedObjectRanges - // event. Fix the size so the total event stays well below the 64K limit (leaving - // lots of room for non-struct fields that come before the values data) - EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[ - (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)]; -}; - -// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling -// API to track its own information. -struct MovedReferenceContextForEtwAndProfapi -{ - // An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and - // pointed to by a profiling_context pointer created by the GC on the stack. This is used to - // batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges - // events and the corresponding callbacks for profapi profilers. This method is - // passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets - // returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored - // in that pointer, and then returned. Callers should test for NULL, which can be - // returned if out of memory - static MovedReferenceContextForEtwAndProfapi* CreateInGCContext(LPVOID pvContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(pvContext != NULL); - - MovedReferenceContextForEtwAndProfapi* pContext = *(MovedReferenceContextForEtwAndProfapi**)pvContext; - - // Shouldn't be called if the context was already created. Perhaps someone made - // one too many BeginMovedReferences calls, or didn't have an EndMovedReferences - // in between? - _ASSERTE(pContext == NULL); - - pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi; - *(MovedReferenceContextForEtwAndProfapi**)pvContext = pContext; - - return pContext; - } - - - MovedReferenceContextForEtwAndProfapi() : - pctxProfAPI(NULL), - pctxEtw(NULL) - - { - LIMITED_METHOD_CONTRACT; - } - - LPVOID pctxProfAPI; - EtwGcMovementContext* pctxEtw; -}; - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC for each moved or surviving reference that it encounters. This -// batches the info into our context's buffer, and flushes that buffer to ETW as it fills -// up. -// -// Arguments: -// * pbMemBlockStart - Start of moved/surviving block -// * pbMemBlockEnd - Next pointer after end of moved/surviving block -// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving -// references; negative if moved to earlier addresses) -// * profilingContext - Where our context is stored -// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved -// or surviving event -// - -// static -void ETW::GCLog::MovedReference( - BYTE* pbMemBlockStart, - BYTE* pbMemBlockEnd, - ptrdiff_t cbRelocDistance, - size_t profilingContext, - BOOL fCompacting, - BOOL /*fAllowProfApiNotification*/) // @TODO: unused param from newer implementation -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock - } - CONTRACTL_END; - - MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = - (MovedReferenceContextForEtwAndProfapi*)profilingContext; - if (pCtxForEtwAndProfapi == NULL) - { - _ASSERTE(!"MovedReference() encountered a NULL profilingContext"); - return; - } - -#ifdef PROFILING_SUPPORTED - // ProfAPI - { - BEGIN_PIN_PROFILER(CORProfilerTrackGC()); - g_profControlBlock.pProfInterface->MovedReference(pbMemBlockStart, - pbMemBlockEnd, - cbRelocDistance, - &(pCtxForEtwAndProfapi->pctxProfAPI), - fCompacting); - END_PIN_PROFILER(); - } -#endif // PROFILING_SUPPORTED - - // ETW - - if (!ShouldTrackMovementForEtw()) - return; - - EtwGcMovementContext* pContext = - EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw); - if (pContext == NULL) - return; - - if (fCompacting) - { - // Moved references - - _ASSERTE(pContext->cBulkMovedObjectRanges < _countof(pContext->rgGCBulkMovedObjectRanges)); - EventStructGCBulkMovedObjectRangesValue* pValue = - &pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges]; - pValue->OldRangeBase = pbMemBlockStart; - pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance; - pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; - pContext->cBulkMovedObjectRanges++; - - // If buffer is now full, empty it into ETW - if (pContext->cBulkMovedObjectRanges == _countof(pContext->rgGCBulkMovedObjectRanges)) - { - FireEtwGCBulkMovedObjectRanges( - pContext->iCurBulkMovedObjectRanges, - pContext->cBulkMovedObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkMovedObjectRanges[0]), - &pContext->rgGCBulkMovedObjectRanges[0]); - - pContext->iCurBulkMovedObjectRanges++; - pContext->Clear(); - } - } - else - { - // Surviving references - - _ASSERTE(pContext->cBulkSurvivingObjectRanges < _countof(pContext->rgGCBulkSurvivingObjectRanges)); - EventStructGCBulkSurvivingObjectRangesValue* pValue = - &pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges]; - pValue->RangeBase = pbMemBlockStart; - pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; - pContext->cBulkSurvivingObjectRanges++; - - // If buffer is now full, empty it into ETW - if (pContext->cBulkSurvivingObjectRanges == _countof(pContext->rgGCBulkSurvivingObjectRanges)) - { - FireEtwGCBulkSurvivingObjectRanges( - pContext->iCurBulkSurvivingObjectRanges, - pContext->cBulkSurvivingObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), - &pContext->rgGCBulkSurvivingObjectRanges[0]); - - pContext->iCurBulkSurvivingObjectRanges++; - pContext->Clear(); - } - } -} - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC just before it begins enumerating plugs. Gives us a chance to -// allocate our context structure, to allow us to batch plugs before firing events -// for them -// -// Arguments: -// * pProfilingContext - Points to location on stack (in GC function) where we can -// store a pointer to the context we allocate -// - -// static -void ETW::GCLog::BeginMovedReferences(size_t* pProfilingContext) -{ - LIMITED_METHOD_CONTRACT; - - MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext)); -} - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC at the end of a heap walk to give us a place to flush any remaining -// buffers of data to ETW or the profapi profiler -// -// Arguments: -// profilingContext - Our context we built up during the heap walk -// - -// static -void ETW::GCLog::EndMovedReferences(size_t profilingContext, - BOOL /*fAllowProfApiNotification*/) // @TODO: unused param from newer implementation -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; - } - CONTRACTL_END; - - MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi*)profilingContext; - if (pCtxForEtwAndProfapi == NULL) - { - _ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext"); - return; - } - -#ifdef PROFILING_SUPPORTED - // ProfAPI - { - BEGIN_PIN_PROFILER(CORProfilerTrackGC()); - g_profControlBlock.pProfInterface->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI)); - END_PIN_PROFILER(); - } -#endif //PROFILING_SUPPORTED - - // ETW - - if (!ShouldTrackMovementForEtw()) - return; - - // If context isn't already set up for us, then we haven't been collecting any data - // for ETW events. - EtwGcMovementContext* pContext = pCtxForEtwAndProfapi->pctxEtw; - if (pContext == NULL) - return; - - // Flush any remaining moved or surviving range data - - if (pContext->cBulkMovedObjectRanges > 0) - { - FireEtwGCBulkMovedObjectRanges( - pContext->iCurBulkMovedObjectRanges, - pContext->cBulkMovedObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkMovedObjectRanges[0]), - &pContext->rgGCBulkMovedObjectRanges[0]); - } - - if (pContext->cBulkSurvivingObjectRanges > 0) - { - FireEtwGCBulkSurvivingObjectRanges( - pContext->iCurBulkSurvivingObjectRanges, - pContext->cBulkSurvivingObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), - &pContext->rgGCBulkSurvivingObjectRanges[0]); - } - - pCtxForEtwAndProfapi->pctxEtw = NULL; - delete pContext; -} - -/***************************************************************************/ -/* This implements the public runtime provider's ManagedHeapCollectKeyword. It - performs a full, gen-2, blocking GC. -/***************************************************************************/ -void ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber) -{ - CONTRACTL - { - NOTHROW; - GC_TRIGGERS; - MODE_ANY; - } - CONTRACTL_END; - - if (!GCHeapUtilities::IsGCHeapInitialized()) - return; - - // No InterlockedExchange64 on Redhawk, even though there is one for - // InterlockedCompareExchange64. Technically, there's a race here by using - // InterlockedCompareExchange64, but it's not worth addressing. The race would be - // between two ETW controllers trying to trigger GCs simultaneously, in which case - // one will win and get its sequence number to appear in the GCStart event, while the - // other will lose. Rare, uninteresting, and low-impact. - PalInterlockedCompareExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber, s_l64LastClientSequenceNumber); - - ForceGCForDiagnostics(); -} - //--------------------------------------------------------------------------------------- // // Helper to fire the GCStart event. Figures out which version of GCStart to fire, and @@ -458,14 +33,8 @@ void ETW::GCLog::FireGcStart(ETW_GC_INFO* pGcInfo) { LIMITED_METHOD_CONTRACT; -#if !defined(FEATURE_PAL) || defined(FEATURE_DTRACE) - - if (EventPipeAdapter_Enabled() || ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_GC_KEYWORD)) + if (RUNTIME_PROVIDER_CATEGORY_ENABLED(TRACE_LEVEL_INFORMATION, CLR_GC_KEYWORD)) { -#if !defined(FEATURE_PAL) // If the controller specified a client sequence number for us to log with this // GCStart, then retrieve it LONGLONG l64ClientSequenceNumberToLog = 0; @@ -473,679 +42,11 @@ void ETW::GCLog::FireGcStart(ETW_GC_INFO* pGcInfo) (pGcInfo->GCStart.Depth == GCHeapUtilities::GetGCHeap()->GetMaxGeneration()) && (pGcInfo->GCStart.Reason == ETW_GC_INFO::GC_INDUCED)) { -#ifdef FEATURE_NATIVEAOT // No InterlockedExchange64 on Redhawk (presumably b/c there is no compiler // intrinsic for this on x86, even though there is one for InterlockedCompareExchange64) l64ClientSequenceNumberToLog = PalInterlockedCompareExchange64(&s_l64LastClientSequenceNumber, 0, s_l64LastClientSequenceNumber); -#else - l64ClientSequenceNumberToLog = InterlockedExchange64(&s_l64LastClientSequenceNumber, 0); -#endif } FireEtwGCStart_V2(pGcInfo->GCStart.Count, pGcInfo->GCStart.Depth, pGcInfo->GCStart.Reason, pGcInfo->GCStart.Type, GetClrInstanceId(), l64ClientSequenceNumberToLog); - -#elif defined(FEATURE_DTRACE) - FireEtwGCStart(pGcInfo->GCStart.Count, pGcInfo->GCStart.Reason); -#endif } - -#endif // defined(FEATURE_PAL) || defined(FEATURE_DTRACE) -} - -//--------------------------------------------------------------------------------------- -// -// Contains code common to profapi and ETW scenarios where the profiler wants to force -// the CLR to perform a GC. The important work here is to create a managed thread for -// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may -// not yet be a managed thread object. But some scenarios require a managed thread -// object be present (notably if we need to call into Jupiter during the GC). -// -// Return Value: -// HRESULT indicating success or failure -// -// Assumptions: -// Caller should ensure that the EE has fully started up and that the GC heap is -// initialized enough to actually perform a GC -// - -// static -HRESULT ETW::GCLog::ForceGCForDiagnostics() -{ - CONTRACTL - { - NOTHROW; - GC_TRIGGERS; - MODE_ANY; - } - CONTRACTL_END; - - HRESULT hr = E_FAIL; - - _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); - - ThreadStore::AttachCurrentThread(); - Thread* pThread = ThreadStore::GetCurrentThread(); - - // While doing the GC, much code assumes & asserts the thread doing the GC is in - // cooperative mode. - pThread->DisablePreemptiveMode(); - - hr = GCHeapUtilities::GetGCHeap()->GarbageCollect( - -1, // all generations should be collected - FALSE, // low_memory_p - collection_blocking); - - // In case this thread (generated by the ETW OS APIs) hangs around a while, - // better stick it back into preemptive mode, so it doesn't block any other GCs - pThread->EnablePreemptiveMode(); - - return hr; -} - - -//--------------------------------------------------------------------------------------- -// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can -// be logged via ETW in bulk -//--------------------------------------------------------------------------------------- - -BulkTypeValue::BulkTypeValue() - : cTypeParameters(0) - , rgTypeParameters() - , ullSingleTypeParameter(0) -{ - LIMITED_METHOD_CONTRACT; - ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); -} - -//--------------------------------------------------------------------------------------- -// -// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW -// - -void BulkTypeValue::Clear() -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); - cTypeParameters = 0; - ullSingleTypeParameter = 0; - rgTypeParameters.Release(); -} - -//--------------------------------------------------------------------------------------- -// -// Fire an ETW event for all the types we batched so far, and then reset our state -// so we can start batching new types at the beginning of the array. -// -// - -void BulkTypeEventLogger::FireBulkTypeEvent() -{ - LIMITED_METHOD_CONTRACT; - - if (m_nBulkTypeValueCount == 0) - { - // No types were batched up, so nothing to send - return; - } - - // Normally, we'd use the MC-generated FireEtwBulkType for all this gunk, but - // it's insufficient as the bulk type event is too complex (arrays of structs of - // varying size). So we directly log the event via EventDataDescCreate and - // EventWrite - - // We use one descriptor for the count + one for the ClrInstanceID + 4 - // per batched type (to include fixed-size data + name + param count + param - // array). But the system limit of 128 descriptors per event kicks in way - // before the 64K event size limit, and we already limit our batch size - // (m_nBulkTypeValueCount) to stay within the 128 descriptor limit. - EVENT_DATA_DESCRIPTOR EventData[128]; - UINT16 nClrInstanceID = GetClrInstanceId(); - - UINT iDesc = 0; - - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate(&EventData[iDesc++], &m_nBulkTypeValueCount, sizeof(m_nBulkTypeValueCount)); - - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate(&EventData[iDesc++], &nClrInstanceID, sizeof(nClrInstanceID)); - - for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++) - { - // Do fixed-size data as one bulk copy - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate( - &EventData[iDesc++], - &(m_rgBulkTypeValues[iTypeData].fixedSizedData), - sizeof(m_rgBulkTypeValues[iTypeData].fixedSizedData)); - - // Do var-sized data individually per field - - // Type name (nonexistent and thus empty on nativeaot) - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate(&EventData[iDesc++], L"", sizeof(WCHAR)); - - // Type parameter count - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate( - &EventData[iDesc++], - &(m_rgBulkTypeValues[iTypeData].cTypeParameters), - sizeof(m_rgBulkTypeValues[iTypeData].cTypeParameters)); - - // Type parameter array - if (m_rgBulkTypeValues[iTypeData].cTypeParameters > 0) - { - _ASSERTE(iDesc < _countof(EventData)); - EventDataDescCreate( - &EventData[iDesc++], - ((m_rgBulkTypeValues[iTypeData].cTypeParameters == 1) ? - &(m_rgBulkTypeValues[iTypeData].ullSingleTypeParameter) : - (ULONGLONG*)(m_rgBulkTypeValues[iTypeData].rgTypeParameters)), - sizeof(ULONGLONG) * m_rgBulkTypeValues[iTypeData].cTypeParameters); - } - } - - Win32EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData); - - // Reset state - m_nBulkTypeValueCount = 0; - m_nBulkTypeValueByteCount = 0; -} - -// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap -// at the end of a collection. -class EtwGcHeapDumpContext -{ -public: - // An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of - // ProfilingScanContext and ProfilerWalkHeapContext, which are context structures - // that the GC heap walker sends back to the callbacks. This method is passed a - // pointer to ProfilingScanContext::pvEtwContext or - // ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new - // EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned. - // Callers should test for NULL, which can be returned if out of memory - static EtwGcHeapDumpContext* GetOrCreateInGCContext(LPVOID* ppvEtwContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(ppvEtwContext != NULL); - - EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)*ppvEtwContext; - if (pContext == NULL) - { - pContext = new (nothrow) EtwGcHeapDumpContext; - *ppvEtwContext = pContext; - } - return pContext; - } - - EtwGcHeapDumpContext() : - iCurBulkRootEdge(0), - iCurBulkRootConditionalWeakTableElementEdge(0), - iCurBulkNodeEvent(0), - iCurBulkEdgeEvent(0), - bulkTypeEventLogger() - { - LIMITED_METHOD_CONTRACT; - ClearRootEdges(); - ClearRootConditionalWeakTableElementEdges(); - ClearNodes(); - ClearEdges(); - } - - // These helpers clear the individual buffers, for use after a flush and on - // construction. They intentionally leave the indices (iCur*) alone, since they - // persist across flushes within a GC - - void ClearRootEdges() - { - LIMITED_METHOD_CONTRACT; - cGcBulkRootEdges = 0; - ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges)); - } - - void ClearRootConditionalWeakTableElementEdges() - { - LIMITED_METHOD_CONTRACT; - cGCBulkRootConditionalWeakTableElementEdges = 0; - ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges)); - } - - void ClearNodes() - { - LIMITED_METHOD_CONTRACT; - cGcBulkNodeValues = 0; - ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues)); - } - - void ClearEdges() - { - LIMITED_METHOD_CONTRACT; - cGcBulkEdgeValues = 0; - ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues)); - } - - //--------------------------------------------------------------------------------------- - // GCBulkRootEdge - // - // A "root edge" is the relationship between a source "GCRootID" (i.e., stack - // variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed - // object that gets rooted). - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkRootEdge event - UINT iCurBulkRootEdge; - - // Number of root edges currently filled out in rgGcBulkRootEdges array - UINT cGcBulkRootEdges; - - // Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the root edge data) - EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)]; - - - //--------------------------------------------------------------------------------------- - // GCBulkRootConditionalWeakTableElementEdge - // - // These describe dependent handles, which simulate an edge connecting a key NodeID - // to a value NodeID. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkRootConditionalWeakTableElementEdge event - UINT iCurBulkRootConditionalWeakTableElementEdge; - - // Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array - UINT cGCBulkRootConditionalWeakTableElementEdges; - - // Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the root edge data) - EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges - [(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkNode - // - // A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses - // as well as leaf nodes. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkNode event - UINT iCurBulkNodeEvent; - - // Number of nodes currently filled out in rgGcBulkNodeValues array - UINT cGcBulkNodeValues; - - // Struct array containing the primary data for each GCBulkNode event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the node data) - EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkEdge - // - // An "edge" is the relationship between a source node and its referenced target - // node. Edges are reported in bulk, separately from Nodes, but it is expected that - // the consumer read the Node and Edge streams together. One takes the first node - // from the Node stream, and then reads EdgeCount entries in the Edge stream, telling - // you all of that Node's targets. Then, one takes the next node in the Node stream, - // and reads the next entries in the Edge stream (using this Node's EdgeCount to - // determine how many) to find all of its targets. This continues on until the Node - // and Edge streams have been fully read. - // - // GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events - // begin at the GCBulkRootEdge.RootedNodeAddress and move forward. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkEdge event - UINT iCurBulkEdgeEvent; - - // Number of nodes currently filled out in rgGcBulkEdgeValues array - UINT cGcBulkEdgeValues; - - // Struct array containing the primary data for each GCBulkEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the edge data) - EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)]; - - - //--------------------------------------------------------------------------------------- - // BulkType - // - // Types are a bit more complicated to batch up, since their data is of varying - // size. BulkTypeEventLogger takes care of the pesky details for us - //--------------------------------------------------------------------------------------- - - BulkTypeEventLogger bulkTypeEventLogger; -}; - - - -//--------------------------------------------------------------------------------------- -// -// Called during a heap walk for each root reference encountered. Batches up the root in -// the ETW context -// -// Arguments: -// * pvHandle - If the root is a handle, this points to the handle -// * pRootedNode - Points to object that is rooted -// * pSecondaryNodeForDependentHandle - For dependent handles, this is the -// secondary object -// * fDependentHandle - nonzero iff this is for a dependent handle -// * profilingScanContext - The shared profapi/etw context built up during the heap walk. -// * dwGCFlags - Bitmask of "GC_"-style flags set by GC -// * rootFlags - Bitmask of EtwGCRootFlags describing the root -// - -// static -void ETW::GCLog::RootReference( - LPVOID pvHandle, - Object* pRootedNode, - Object* pSecondaryNodeForDependentHandle, - BOOL fDependentHandle, - ProfilingScanContext* profilingScanContext, - DWORD dwGCFlags, - DWORD rootFlags) -{ - LIMITED_METHOD_CONTRACT; - - if (pRootedNode == NULL) - return; - - EtwGcHeapDumpContext* pContext = - EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext); - if (pContext == NULL) - return; - - // Determine root kind, root ID, and handle-specific flags - LPVOID pvRootID = NULL; - BYTE nRootKind = (BYTE)profilingScanContext->dwEtwRootKind; - switch (nRootKind) - { - case kEtwGCRootKindStack: - break; - - case kEtwGCRootKindHandle: - pvRootID = pvHandle; - break; - - case kEtwGCRootKindFinalizer: - _ASSERTE(pvRootID == NULL); - break; - - case kEtwGCRootKindOther: - default: - _ASSERTE(nRootKind == kEtwGCRootKindOther); - _ASSERTE(pvRootID == NULL); - break; - } - - // Convert GC root flags to ETW root flags - if (dwGCFlags & GC_CALL_INTERIOR) - rootFlags |= kEtwGCRootFlagsInterior; - if (dwGCFlags & GC_CALL_PINNED) - rootFlags |= kEtwGCRootFlagsPinning; - - // Add root edge to appropriate buffer - if (fDependentHandle) - { - _ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges < - _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges)); - EventStructGCBulkRootConditionalWeakTableElementEdgeValue* pRCWTEEdgeValue = - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges]; - pRCWTEEdgeValue->GCKeyNodeID = pRootedNode; - pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle; - pRCWTEEdgeValue->GCRootID = pvRootID; - pContext->cGCBulkRootConditionalWeakTableElementEdges++; - - // If RCWTE edge buffer is now full, empty it into ETW - if (pContext->cGCBulkRootConditionalWeakTableElementEdges == - _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges)) - { - FireEtwGCBulkRootConditionalWeakTableElementEdge( - pContext->iCurBulkRootConditionalWeakTableElementEdge, - pContext->cGCBulkRootConditionalWeakTableElementEdges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); - - pContext->iCurBulkRootConditionalWeakTableElementEdge++; - pContext->ClearRootConditionalWeakTableElementEdges(); - } - } - else - { - _ASSERTE(pContext->cGcBulkRootEdges < _countof(pContext->rgGcBulkRootEdges)); - EventStructGCBulkRootEdgeValue* pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges]; - pBulkRootEdgeValue->RootedNodeAddress = pRootedNode; - pBulkRootEdgeValue->GCRootKind = nRootKind; - pBulkRootEdgeValue->GCRootFlag = rootFlags; - pBulkRootEdgeValue->GCRootID = pvRootID; - pContext->cGcBulkRootEdges++; - - // If root edge buffer is now full, empty it into ETW - if (pContext->cGcBulkRootEdges == _countof(pContext->rgGcBulkRootEdges)) - { - FireEtwGCBulkRootEdge( - pContext->iCurBulkRootEdge, - pContext->cGcBulkRootEdges, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkRootEdges[0]), - &pContext->rgGcBulkRootEdges[0]); - - pContext->iCurBulkRootEdge++; - pContext->ClearRootEdges(); - } - } -} - - -//--------------------------------------------------------------------------------------- -// -// Called during a heap walk for each object reference encountered. Batches up the -// corresponding node, edges, and type data for the ETW events. -// -// Arguments: -// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk. -// * pObjReferenceSource - Object doing the pointing -// * typeID - Type of pObjReferenceSource -// * fDependentHandle - nonzero iff this is for a dependent handle -// * cRefs - Count of objects being pointed to -// * rgObjReferenceTargets - Array of objects being pointed to -// - -// static -void ETW::GCLog::ObjectReference( - ProfilerWalkHeapContext* profilerWalkHeapContext, - Object* pObjReferenceSource, - ULONGLONG typeID, - ULONGLONG cRefs, - Object** rgObjReferenceTargets) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - - // LogTypeAndParametersIfNecessary can take a lock - CAN_TAKE_LOCK; - } - CONTRACTL_END; - - EtwGcHeapDumpContext* pContext = - EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext); - if (pContext == NULL) - return; - - //--------------------------------------------------------------------------------------- - // GCBulkNode events - //--------------------------------------------------------------------------------------- - - // Add Node (pObjReferenceSource) to buffer - _ASSERTE(pContext->cGcBulkNodeValues < _countof(pContext->rgGcBulkNodeValues)); - EventStructGCBulkNodeValue* pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues]; - pBulkNodeValue->Address = pObjReferenceSource; - pBulkNodeValue->Size = pObjReferenceSource->GetSize(); - pBulkNodeValue->TypeID = typeID; - pBulkNodeValue->EdgeCount = cRefs; - pContext->cGcBulkNodeValues++; - - // If Node buffer is now full, empty it into ETW - if (pContext->cGcBulkNodeValues == _countof(pContext->rgGcBulkNodeValues)) - { - FireEtwGCBulkNode( - pContext->iCurBulkNodeEvent, - pContext->cGcBulkNodeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkNodeValues[0]), - &pContext->rgGcBulkNodeValues[0]); - - pContext->iCurBulkNodeEvent++; - pContext->ClearNodes(); - } - - //--------------------------------------------------------------------------------------- - // BulkType events - //--------------------------------------------------------------------------------------- - - // We send type information as necessary--only for nodes, and only for nodes that we - // haven't already sent type info for - if (typeID != 0) - { - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary( - &pContext->bulkTypeEventLogger, // Batch up this type with others to minimize events - typeID, - - // During heap walk, GC holds the lock for us, so we can directly enter the - // hash to see if the type has already been logged - ETW::TypeSystemLog::kTypeLogBehaviorAssumeLockAndLogIfFirstTime - ); - } - - //--------------------------------------------------------------------------------------- - // GCBulkEdge events - //--------------------------------------------------------------------------------------- - - // Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges - // are added (it could even fill up multiple times during this one call if there are - // a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we - // need. - - for (ULONGLONG i = 0; i < cRefs; i++) - { - _ASSERTE(pContext->cGcBulkEdgeValues < _countof(pContext->rgGcBulkEdgeValues)); - EventStructGCBulkEdgeValue* pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues]; - pBulkEdgeValue->Value = rgObjReferenceTargets[i]; - // FUTURE: ReferencingFieldID - pBulkEdgeValue->ReferencingFieldID = 0; - pContext->cGcBulkEdgeValues++; - - // If Edge buffer is now full, empty it into ETW - if (pContext->cGcBulkEdgeValues == _countof(pContext->rgGcBulkEdgeValues)) - { - FireEtwGCBulkEdge( - pContext->iCurBulkEdgeEvent, - pContext->cGcBulkEdgeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkEdgeValues[0]), - &pContext->rgGcBulkEdgeValues[0]); - - pContext->iCurBulkEdgeEvent++; - pContext->ClearEdges(); - } - } -} - -//--------------------------------------------------------------------------------------- -// -// Called by GC at end of heap dump to give us a convenient time to flush any remaining -// buffers of data to ETW -// -// Arguments: -// profilerWalkHeapContext - Context containing data we've batched up -// - -// static -void ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext* profilerWalkHeapContext) -{ - LIMITED_METHOD_CONTRACT; - - // If context isn't already set up for us, then we haven't been collecting any data - // for ETW events. - EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)profilerWalkHeapContext->pvEtwContext; - if (pContext == NULL) - return; - - // If the GC events are enabled, flush any remaining root, node, and / or edge data - if (ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD)) - { - if (pContext->cGcBulkRootEdges > 0) - { - FireEtwGCBulkRootEdge( - pContext->iCurBulkRootEdge, - pContext->cGcBulkRootEdges, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkRootEdges[0]), - &pContext->rgGcBulkRootEdges[0]); - } - - if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0) - { - FireEtwGCBulkRootConditionalWeakTableElementEdge( - pContext->iCurBulkRootConditionalWeakTableElementEdge, - pContext->cGCBulkRootConditionalWeakTableElementEdges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); - } - - if (pContext->cGcBulkNodeValues > 0) - { - FireEtwGCBulkNode( - pContext->iCurBulkNodeEvent, - pContext->cGcBulkNodeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkNodeValues[0]), - &pContext->rgGcBulkNodeValues[0]); - } - - if (pContext->cGcBulkEdgeValues > 0) - { - FireEtwGCBulkEdge( - pContext->iCurBulkEdgeEvent, - pContext->cGcBulkEdgeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkEdgeValues[0]), - &pContext->rgGcBulkEdgeValues[0]); - } - } - - // Ditto for type events - if (ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_TYPE_KEYWORD)) - { - pContext->bulkTypeEventLogger.FireBulkTypeEvent(); - pContext->bulkTypeEventLogger.Cleanup(); - } - - // Delete any GC state built up in the context - profilerWalkHeapContext->pvEtwContext = NULL; - delete pContext; } diff --git a/src/coreclr/nativeaot/Runtime/eventtrace.h b/src/coreclr/nativeaot/Runtime/eventtrace.h index e4124c50568879..65118de893c2ba 100644 --- a/src/coreclr/nativeaot/Runtime/eventtrace.h +++ b/src/coreclr/nativeaot/Runtime/eventtrace.h @@ -185,18 +185,18 @@ namespace ETW static void MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification = TRUE); static void EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification = TRUE); static void WalkStaticsAndCOMForETW(); -#ifndef FEATURE_NATIVEAOT - static void SendFinalizeObjectEvent(MethodTable * pMT, Object * pObj); -#endif // FEATURE_NATIVEAOT }; }; +#ifndef FEATURE_EVENT_TRACE +inline void ETW::GCLog::FireGcStart(ETW_GC_INFO * pGcInfo) { } +#endif + #ifndef FEATURE_ETW inline BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw() { return FALSE; } inline BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw() { return FALSE; } inline BOOL ETW::GCLog::ShouldTrackMovementForEtw() { return FALSE; } inline BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw() { return FALSE; } -inline void ETW::GCLog::FireGcStart(ETW_GC_INFO * pGcInfo) { } inline void ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext) { } inline void ETW::GCLog::BeginMovedReferences(size_t * pProfilingContext) { } inline void ETW::GCLog::MovedReference(BYTE * pbMemBlockStart, BYTE * pbMemBlockEnd, ptrdiff_t cbRelocDistance, size_t profilingContext, BOOL fCompacting, BOOL fAllowProfApiNotification) { } diff --git a/src/coreclr/nativeaot/Runtime/rheventtrace.cpp b/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp similarity index 71% rename from src/coreclr/nativeaot/Runtime/rheventtrace.cpp rename to src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp index 32c01f26cae264..75c9ed9dd60832 100644 --- a/src/coreclr/nativeaot/Runtime/rheventtrace.cpp +++ b/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp @@ -1,16 +1,10 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// -// Redhawk-specific ETW helper code. -// -// When Redhawk does stuff substantially different from desktop CLR, the -// Redhawk-specific implementations should go here. -// #include "common.h" #include "gcenv.h" -#include "rheventtrace.h" #include "eventtrace.h" +#include "eventtrace_etw.h" #include "rhbinder.h" #include "slist.h" #include "runtimeinstance.h" @@ -20,71 +14,124 @@ #if defined(FEATURE_EVENT_TRACE) +#define Win32EventWrite PalEventWrite + +//--------------------------------------------------------------------------------------- +// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can +// be logged via ETW in bulk //--------------------------------------------------------------------------------------- -// BulkTypeEventLogger is a helper class to batch up type information and then flush to -// ETW once the event reaches its max # descriptors +BulkTypeValue::BulkTypeValue() + : cTypeParameters(0) + , rgTypeParameters() + , ullSingleTypeParameter(0) +{ + LIMITED_METHOD_CONTRACT; + ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); +} //--------------------------------------------------------------------------------------- // -// Batches up ETW information for a type and pops out to recursively call -// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary for any -// "type parameters". Generics info is not reliably available, so "type parameter" -// really just refers to the type of array elements if thAsAddr is an array. -// -// Arguments: -// * thAsAddr - MethodTable to log -// * typeLogBehavior - Ignored in Redhawk builds +// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW // -void BulkTypeEventLogger::LogTypeAndParameters(uint64_t thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior) +void BulkTypeValue::Clear() { - if (!ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, - TRACE_LEVEL_INFORMATION, - CLR_TYPE_KEYWORD)) + CONTRACTL { - return; + THROWS; + GC_NOTRIGGER; + MODE_ANY; } + CONTRACTL_END; - MethodTable * pEEType = (MethodTable *) thAsAddr; + ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); + cTypeParameters = 0; + ullSingleTypeParameter = 0; + rgTypeParameters.Release(); +} - // Batch up this type. This grabs useful info about the type, including any - // type parameters it may have, and sticks it in m_rgBulkTypeValues - int iBulkTypeEventData = LogSingleType(pEEType); - if (iBulkTypeEventData == -1) +//--------------------------------------------------------------------------------------- +// BulkTypeEventLogger is a helper class to batch up type information and then flush to +// ETW once the event reaches its max # descriptors + +//--------------------------------------------------------------------------------------- +// +// Fire an ETW event for all the types we batched so far, and then reset our state +// so we can start batching new types at the beginning of the array. +// + +void BulkTypeEventLogger::FireBulkTypeEvent() +{ + LIMITED_METHOD_CONTRACT; + + if (m_nBulkTypeValueCount == 0) { - // There was a failure trying to log the type, so don't bother with its type - // parameters + // No types were batched up, so nothing to send return; } - // Look at the type info we just batched, so we can get the type parameters - BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData]; + // Normally, we'd use the MC-generated FireEtwBulkType for all this gunk, but + // it's insufficient as the bulk type event is too complex (arrays of structs of + // varying size). So we directly log the event via EventDataDescCreate and + // EventWrite - // We're about to recursively call ourselves for the type parameters, so make a - // local copy of their type handles first (else, as we log them we could flush - // and clear out m_rgBulkTypeValues, thus trashing pVal) - NewArrayHolder rgTypeParameters; - DWORD cTypeParams = pVal->cTypeParameters; - if (cTypeParams == 1) - { - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, pVal->ullSingleTypeParameter, typeLogBehavior); - } - else if (cTypeParams > 1) - { - rgTypeParameters = new (nothrow) ULONGLONG[cTypeParams]; - for (DWORD i=0; i < cTypeParams; i++) - { - rgTypeParameters[i] = pVal->rgTypeParameters[i]; - } + // We use one descriptor for the count + one for the ClrInstanceID + 4 + // per batched type (to include fixed-size data + name + param count + param + // array). But the system limit of 128 descriptors per event kicks in way + // before the 64K event size limit, and we already limit our batch size + // (m_nBulkTypeValueCount) to stay within the 128 descriptor limit. + EVENT_DATA_DESCRIPTOR EventData[128]; + UINT16 nClrInstanceID = GetClrInstanceId(); - // Recursively log any referenced parameter types - for (DWORD i=0; i < cTypeParams; i++) + UINT iDesc = 0; + + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate(&EventData[iDesc++], &m_nBulkTypeValueCount, sizeof(m_nBulkTypeValueCount)); + + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate(&EventData[iDesc++], &nClrInstanceID, sizeof(nClrInstanceID)); + + for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++) + { + // Do fixed-size data as one bulk copy + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate( + &EventData[iDesc++], + &(m_rgBulkTypeValues[iTypeData].fixedSizedData), + sizeof(m_rgBulkTypeValues[iTypeData].fixedSizedData)); + + // Do var-sized data individually per field + + // Type name (nonexistent and thus empty on nativeaot) + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate(&EventData[iDesc++], L"", sizeof(WCHAR)); + + // Type parameter count + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate( + &EventData[iDesc++], + &(m_rgBulkTypeValues[iTypeData].cTypeParameters), + sizeof(m_rgBulkTypeValues[iTypeData].cTypeParameters)); + + // Type parameter array + if (m_rgBulkTypeValues[iTypeData].cTypeParameters > 0) { - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, rgTypeParameters[i], typeLogBehavior); + _ASSERTE(iDesc < _countof(EventData)); + EventDataDescCreate( + &EventData[iDesc++], + ((m_rgBulkTypeValues[iTypeData].cTypeParameters == 1) ? + &(m_rgBulkTypeValues[iTypeData].ullSingleTypeParameter) : + (ULONGLONG*)(m_rgBulkTypeValues[iTypeData].rgTypeParameters)), + sizeof(ULONGLONG) * m_rgBulkTypeValues[iTypeData].cTypeParameters); } } + + Win32EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData); + + // Reset state + m_nBulkTypeValueCount = 0; + m_nBulkTypeValueByteCount = 0; } // We keep a hash of these to keep track of: @@ -311,37 +358,21 @@ int BulkTypeEventLogger::LogSingleType(MethodTable * pEEType) return m_nBulkTypeValueCount - 1; // Index of type we just added } - -void BulkTypeEventLogger::Cleanup() -{ - if (s_loggedTypesHash != NULL) - { - delete s_loggedTypesHash; - s_loggedTypesHash = NULL; - } -} - -#endif // defined(FEATURE_EVENT_TRACE) - - //--------------------------------------------------------------------------------------- // -// Outermost level of ETW-type-logging. Clients outside (rh)eventtrace.cpp call this to log -// an EETypes and (recursively) its type parameters when present. This guy then calls -// into the appropriate BulkTypeEventLogger to do the batching and logging +// Batches up ETW information for a type and pops out to recursively call +// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary for any +// "type parameters". Generics info is not reliably available, so "type parameter" +// really just refers to the type of array elements if thAsAddr is an array. // // Arguments: -// * pBulkTypeEventLogger - If our caller is keeping track of batched types, it -// passes this to us so we can use it to batch the current type (GC heap walk -// does this). In Redhawk builds this should not be NULL. -// * thAsAddr - MethodTable to batch -// * typeLogBehavior - Unused in Redhawk builds +// * thAsAddr - MethodTable to log +// * typeLogBehavior - Ignored in Redhawk builds // -void ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, uint64_t thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior) +void BulkTypeEventLogger::LogTypeAndParameters(uint64_t thAsAddr) { -#if defined(FEATURE_EVENT_TRACE) - + // BulkTypeEventLogger currently fires ETW events only if (!ETW_TRACING_CATEGORY_ENABLED( MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, @@ -350,8 +381,53 @@ void ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(BulkTypeEventLogger * p return; } - _ASSERTE(pLogger != NULL); - pLogger->LogTypeAndParameters(thAsAddr, typeLogBehavior); + MethodTable * pEEType = (MethodTable *) thAsAddr; -#endif // defined(FEATURE_EVENT_TRACE) + // Batch up this type. This grabs useful info about the type, including any + // type parameters it may have, and sticks it in m_rgBulkTypeValues + int iBulkTypeEventData = LogSingleType(pEEType); + if (iBulkTypeEventData == -1) + { + // There was a failure trying to log the type, so don't bother with its type + // parameters + return; + } + + // Look at the type info we just batched, so we can get the type parameters + BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData]; + + // We're about to recursively call ourselves for the type parameters, so make a + // local copy of their type handles first (else, as we log them we could flush + // and clear out m_rgBulkTypeValues, thus trashing pVal) + NewArrayHolder rgTypeParameters; + DWORD cTypeParams = pVal->cTypeParameters; + if (cTypeParams == 1) + { + LogTypeAndParameters(pVal->ullSingleTypeParameter); + } + else if (cTypeParams > 1) + { + rgTypeParameters = new (nothrow) ULONGLONG[cTypeParams]; + for (DWORD i=0; i < cTypeParams; i++) + { + rgTypeParameters[i] = pVal->rgTypeParameters[i]; + } + + // Recursively log any referenced parameter types + for (DWORD i=0; i < cTypeParams; i++) + { + LogTypeAndParameters(rgTypeParameters[i]); + } + } +} + +void BulkTypeEventLogger::Cleanup() +{ + if (s_loggedTypesHash != NULL) + { + delete s_loggedTypesHash; + s_loggedTypesHash = NULL; + } } + +#endif // defined(FEATURE_EVENT_TRACE) diff --git a/src/coreclr/nativeaot/Runtime/eventtrace_etw.h b/src/coreclr/nativeaot/Runtime/eventtrace_etw.h new file mode 100644 index 00000000000000..9dd7e2c20202c2 --- /dev/null +++ b/src/coreclr/nativeaot/Runtime/eventtrace_etw.h @@ -0,0 +1,41 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +// +// This header provides Redhawk-specific ETW code and macros, to allow sharing of common +// ETW code between Redhawk and desktop CLR. +// +#ifndef EVENTTRACE_ETW_H +#define EVENTTRACE_ETW_H + +#ifdef FEATURE_ETW + +// Map the CLR private provider to our version so we can avoid inserting more #ifdef's in the code. +#define MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_Context +#define MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_Context +#define Microsoft_Windows_DotNETRuntimeHandle Microsoft_Windows_Redhawk_GC_PublicHandle + +#undef ETW_TRACING_INITIALIZED +#define ETW_TRACING_INITIALIZED(RegHandle) (RegHandle != NULL) + +#undef ETW_CATEGORY_ENABLED +#define ETW_CATEGORY_ENABLED(Context, LevelParam, Keyword) \ + (Context.IsEnabled && \ + ( \ + (LevelParam <= ((Context).Level)) || \ + ((Context.Level) == 0) \ + ) && \ + ( \ + (Keyword == (ULONGLONG)0) || \ + ( \ + (Keyword & (Context.MatchAnyKeyword)) && \ + ( \ + (Keyword & (Context.MatchAllKeyword)) == (Context.MatchAllKeyword) \ + ) \ + ) \ + ) \ + ) + +#endif // FEATURE_ETW + +#endif // EVENTTRACE_ETW_H diff --git a/src/coreclr/nativeaot/Runtime/eventtrace_gcheap.cpp b/src/coreclr/nativeaot/Runtime/eventtrace_gcheap.cpp new file mode 100644 index 00000000000000..f01f7b0948f03d --- /dev/null +++ b/src/coreclr/nativeaot/Runtime/eventtrace_gcheap.cpp @@ -0,0 +1,965 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// +// File: eventtrace_gcheap.cpp +// Event Tracing support for GC heap dump and movement tracking +// + +#include "common.h" + +#include "gcenv.h" +#include "gcheaputilities.h" + +#include "daccess.h" + +#include "slist.h" +#include "varint.h" +#include "regdisplay.h" +#include "StackFrameIterator.h" +#include "thread.h" +#include "threadstore.h" +#include "threadstore.inl" + +#include "eventtracepriv.h" + +/****************************************************************************/ +/* Methods that are called from the runtime */ +/****************************************************************************/ + + +// Simple helpers called by the GC to decide whether it needs to do a walk of heap +// objects and / or roots. + +BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw() +{ + LIMITED_METHOD_CONTRACT; + return RUNTIME_PROVIDER_CATEGORY_ENABLED( + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD); +} + +BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw() +{ + LIMITED_METHOD_CONTRACT; + return RUNTIME_PROVIDER_CATEGORY_ENABLED( + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD); +} + +BOOL ETW::GCLog::ShouldTrackMovementForEtw() +{ + LIMITED_METHOD_CONTRACT; + return RUNTIME_PROVIDER_CATEGORY_ENABLED( + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD); +} + +BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw() +{ + // @TODO: + return FALSE; +} + +void ETW::GCLog::WalkStaticsAndCOMForETW() +{ + // @TODO: +} + +// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges / +// GCBulkSurvivingObjectRanges events +struct EtwGcMovementContext +{ +public: + // An instance of EtwGcMovementContext is dynamically allocated and stored + // inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically + // allocated and pointed to by a profiling_context pointer created by the GC on the stack. + // This is used to batch and send GCBulkSurvivingObjectRanges events and + // GCBulkMovedObjectRanges events. This method is passed a pointer to + // MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned; + // else, a new EtwGcMovementContext is allocated, stored in that pointer, and + // then returned. Callers should test for NULL, which can be returned if out of + // memory + static EtwGcMovementContext* GetOrCreateInGCContext(EtwGcMovementContext** ppContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(ppContext != NULL); + + EtwGcMovementContext* pContext = *ppContext; + if (pContext == NULL) + { + pContext = new (nothrow) EtwGcMovementContext; + *ppContext = pContext; + } + return pContext; + } + + EtwGcMovementContext() : + iCurBulkSurvivingObjectRanges(0), + iCurBulkMovedObjectRanges(0) + { + LIMITED_METHOD_CONTRACT; + Clear(); + } + + // Resets structure for reuse on construction, and after each flush. + // (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.) + void Clear() + { + LIMITED_METHOD_CONTRACT; + cBulkSurvivingObjectRanges = 0; + cBulkMovedObjectRanges = 0; + ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges)); + ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges)); + } + + //--------------------------------------------------------------------------------------- + // GCBulkSurvivingObjectRanges + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkSurvivingObjectRanges event + UINT iCurBulkSurvivingObjectRanges; + + // Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array + UINT cBulkSurvivingObjectRanges; + + // Struct array containing the primary data for each GCBulkSurvivingObjectRanges + // event. Fix the size so the total event stays well below the 64K limit (leaving + // lots of room for non-struct fields that come before the values data) + EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[ + (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkMovedObjectRanges + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkMovedObjectRanges event + UINT iCurBulkMovedObjectRanges; + + // Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array + UINT cBulkMovedObjectRanges; + + // Struct array containing the primary data for each GCBulkMovedObjectRanges + // event. Fix the size so the total event stays well below the 64K limit (leaving + // lots of room for non-struct fields that come before the values data) + EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[ + (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)]; +}; + +// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling +// API to track its own information. +struct MovedReferenceContextForEtwAndProfapi +{ + // An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and + // pointed to by a profiling_context pointer created by the GC on the stack. This is used to + // batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges + // events and the corresponding callbacks for profapi profilers. This method is + // passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets + // returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored + // in that pointer, and then returned. Callers should test for NULL, which can be + // returned if out of memory + static MovedReferenceContextForEtwAndProfapi* CreateInGCContext(LPVOID pvContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(pvContext != NULL); + + MovedReferenceContextForEtwAndProfapi* pContext = *(MovedReferenceContextForEtwAndProfapi**)pvContext; + + // Shouldn't be called if the context was already created. Perhaps someone made + // one too many BeginMovedReferences calls, or didn't have an EndMovedReferences + // in between? + _ASSERTE(pContext == NULL); + + pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi; + *(MovedReferenceContextForEtwAndProfapi**)pvContext = pContext; + + return pContext; + } + + + MovedReferenceContextForEtwAndProfapi() : + pctxProfAPI(NULL), + pctxEtw(NULL) + + { + LIMITED_METHOD_CONTRACT; + } + + LPVOID pctxProfAPI; + EtwGcMovementContext* pctxEtw; +}; + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC for each moved or surviving reference that it encounters. This +// batches the info into our context's buffer, and flushes that buffer to ETW as it fills +// up. +// +// Arguments: +// * pbMemBlockStart - Start of moved/surviving block +// * pbMemBlockEnd - Next pointer after end of moved/surviving block +// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving +// references; negative if moved to earlier addresses) +// * profilingContext - Where our context is stored +// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved +// or surviving event +// + +// static +void ETW::GCLog::MovedReference( + BYTE* pbMemBlockStart, + BYTE* pbMemBlockEnd, + ptrdiff_t cbRelocDistance, + size_t profilingContext, + BOOL fCompacting, + BOOL /*fAllowProfApiNotification*/) // @TODO: unused param from newer implementation +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock + } + CONTRACTL_END; + + MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = + (MovedReferenceContextForEtwAndProfapi*)profilingContext; + if (pCtxForEtwAndProfapi == NULL) + { + _ASSERTE(!"MovedReference() encountered a NULL profilingContext"); + return; + } + +#ifdef PROFILING_SUPPORTED + // ProfAPI + { + BEGIN_PIN_PROFILER(CORProfilerTrackGC()); + g_profControlBlock.pProfInterface->MovedReference(pbMemBlockStart, + pbMemBlockEnd, + cbRelocDistance, + &(pCtxForEtwAndProfapi->pctxProfAPI), + fCompacting); + END_PIN_PROFILER(); + } +#endif // PROFILING_SUPPORTED + + // ETW + + if (!ShouldTrackMovementForEtw()) + return; + + EtwGcMovementContext* pContext = + EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw); + if (pContext == NULL) + return; + + if (fCompacting) + { + // Moved references + + _ASSERTE(pContext->cBulkMovedObjectRanges < _countof(pContext->rgGCBulkMovedObjectRanges)); + EventStructGCBulkMovedObjectRangesValue* pValue = + &pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges]; + pValue->OldRangeBase = pbMemBlockStart; + pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance; + pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; + pContext->cBulkMovedObjectRanges++; + + // If buffer is now full, empty it into ETW + if (pContext->cBulkMovedObjectRanges == _countof(pContext->rgGCBulkMovedObjectRanges)) + { + FireEtwGCBulkMovedObjectRanges( + pContext->iCurBulkMovedObjectRanges, + pContext->cBulkMovedObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkMovedObjectRanges[0]), + &pContext->rgGCBulkMovedObjectRanges[0]); + + pContext->iCurBulkMovedObjectRanges++; + pContext->Clear(); + } + } + else + { + // Surviving references + + _ASSERTE(pContext->cBulkSurvivingObjectRanges < _countof(pContext->rgGCBulkSurvivingObjectRanges)); + EventStructGCBulkSurvivingObjectRangesValue* pValue = + &pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges]; + pValue->RangeBase = pbMemBlockStart; + pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; + pContext->cBulkSurvivingObjectRanges++; + + // If buffer is now full, empty it into ETW + if (pContext->cBulkSurvivingObjectRanges == _countof(pContext->rgGCBulkSurvivingObjectRanges)) + { + FireEtwGCBulkSurvivingObjectRanges( + pContext->iCurBulkSurvivingObjectRanges, + pContext->cBulkSurvivingObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), + &pContext->rgGCBulkSurvivingObjectRanges[0]); + + pContext->iCurBulkSurvivingObjectRanges++; + pContext->Clear(); + } + } +} + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC just before it begins enumerating plugs. Gives us a chance to +// allocate our context structure, to allow us to batch plugs before firing events +// for them +// +// Arguments: +// * pProfilingContext - Points to location on stack (in GC function) where we can +// store a pointer to the context we allocate +// + +// static +void ETW::GCLog::BeginMovedReferences(size_t* pProfilingContext) +{ + LIMITED_METHOD_CONTRACT; + + MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext)); +} + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC at the end of a heap walk to give us a place to flush any remaining +// buffers of data to ETW or the profapi profiler +// +// Arguments: +// profilingContext - Our context we built up during the heap walk +// + +// static +void ETW::GCLog::EndMovedReferences(size_t profilingContext, + BOOL /*fAllowProfApiNotification*/) // @TODO: unused param from newer implementation +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; + } + CONTRACTL_END; + + MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi*)profilingContext; + if (pCtxForEtwAndProfapi == NULL) + { + _ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext"); + return; + } + +#ifdef PROFILING_SUPPORTED + // ProfAPI + { + BEGIN_PIN_PROFILER(CORProfilerTrackGC()); + g_profControlBlock.pProfInterface->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI)); + END_PIN_PROFILER(); + } +#endif //PROFILING_SUPPORTED + + // ETW + + if (!ShouldTrackMovementForEtw()) + return; + + // If context isn't already set up for us, then we haven't been collecting any data + // for ETW events. + EtwGcMovementContext* pContext = pCtxForEtwAndProfapi->pctxEtw; + if (pContext == NULL) + return; + + // Flush any remaining moved or surviving range data + + if (pContext->cBulkMovedObjectRanges > 0) + { + FireEtwGCBulkMovedObjectRanges( + pContext->iCurBulkMovedObjectRanges, + pContext->cBulkMovedObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkMovedObjectRanges[0]), + &pContext->rgGCBulkMovedObjectRanges[0]); + } + + if (pContext->cBulkSurvivingObjectRanges > 0) + { + FireEtwGCBulkSurvivingObjectRanges( + pContext->iCurBulkSurvivingObjectRanges, + pContext->cBulkSurvivingObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), + &pContext->rgGCBulkSurvivingObjectRanges[0]); + } + + pCtxForEtwAndProfapi->pctxEtw = NULL; + delete pContext; +} + +// This implements the public runtime provider's GCHeapCollectKeyword. It +// performs a full, gen-2, blocking GC. +void ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber) +{ + CONTRACTL + { + NOTHROW; + GC_TRIGGERS; + MODE_ANY; + } + CONTRACTL_END; + + if (!GCHeapUtilities::IsGCHeapInitialized()) + return; + + // No InterlockedExchange64 on Redhawk, even though there is one for + // InterlockedCompareExchange64. Technically, there's a race here by using + // InterlockedCompareExchange64, but it's not worth addressing. The race would be + // between two ETW controllers trying to trigger GCs simultaneously, in which case + // one will win and get its sequence number to appear in the GCStart event, while the + // other will lose. Rare, uninteresting, and low-impact. + PalInterlockedCompareExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber, s_l64LastClientSequenceNumber); + + ForceGCForDiagnostics(); +} + +//--------------------------------------------------------------------------------------- +// +// Contains code common to profapi and ETW scenarios where the profiler wants to force +// the CLR to perform a GC. The important work here is to create a managed thread for +// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may +// not yet be a managed thread object. But some scenarios require a managed thread +// object be present. +// +// Return Value: +// HRESULT indicating success or failure +// +// Assumptions: +// Caller should ensure that the EE has fully started up and that the GC heap is +// initialized enough to actually perform a GC +// + +// static +HRESULT ETW::GCLog::ForceGCForDiagnostics() +{ + CONTRACTL + { + NOTHROW; + GC_TRIGGERS; + MODE_ANY; + } + CONTRACTL_END; + + HRESULT hr = E_FAIL; + + _ASSERTE(GCHeapUtilities::IsGCHeapInitialized()); + + ThreadStore::AttachCurrentThread(); + Thread* pThread = ThreadStore::GetCurrentThread(); + + // While doing the GC, much code assumes & asserts the thread doing the GC is in + // cooperative mode. + pThread->DisablePreemptiveMode(); + + hr = GCHeapUtilities::GetGCHeap()->GarbageCollect( + -1, // all generations should be collected + FALSE, // low_memory_p + collection_blocking); + + // In case this thread (generated by the ETW OS APIs) hangs around a while, + // better stick it back into preemptive mode, so it doesn't block any other GCs + pThread->EnablePreemptiveMode(); + + return hr; +} + +// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap +// at the end of a collection. +class EtwGcHeapDumpContext +{ +public: + // An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of + // ProfilingScanContext and ProfilerWalkHeapContext, which are context structures + // that the GC heap walker sends back to the callbacks. This method is passed a + // pointer to ProfilingScanContext::pvEtwContext or + // ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new + // EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned. + // Callers should test for NULL, which can be returned if out of memory + static EtwGcHeapDumpContext* GetOrCreateInGCContext(LPVOID* ppvEtwContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(ppvEtwContext != NULL); + + EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)*ppvEtwContext; + if (pContext == NULL) + { + pContext = new (nothrow) EtwGcHeapDumpContext; + *ppvEtwContext = pContext; + } + return pContext; + } + + EtwGcHeapDumpContext() : + iCurBulkRootEdge(0), + iCurBulkRootConditionalWeakTableElementEdge(0), + iCurBulkNodeEvent(0), + iCurBulkEdgeEvent(0), + bulkTypeEventLogger() + { + LIMITED_METHOD_CONTRACT; + ClearRootEdges(); + ClearRootConditionalWeakTableElementEdges(); + ClearNodes(); + ClearEdges(); + } + + // These helpers clear the individual buffers, for use after a flush and on + // construction. They intentionally leave the indices (iCur*) alone, since they + // persist across flushes within a GC + + void ClearRootEdges() + { + LIMITED_METHOD_CONTRACT; + cGcBulkRootEdges = 0; + ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges)); + } + + void ClearRootConditionalWeakTableElementEdges() + { + LIMITED_METHOD_CONTRACT; + cGCBulkRootConditionalWeakTableElementEdges = 0; + ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges)); + } + + void ClearNodes() + { + LIMITED_METHOD_CONTRACT; + cGcBulkNodeValues = 0; + ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues)); + } + + void ClearEdges() + { + LIMITED_METHOD_CONTRACT; + cGcBulkEdgeValues = 0; + ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues)); + } + + //--------------------------------------------------------------------------------------- + // GCBulkRootEdge + // + // A "root edge" is the relationship between a source "GCRootID" (i.e., stack + // variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed + // object that gets rooted). + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkRootEdge event + UINT iCurBulkRootEdge; + + // Number of root edges currently filled out in rgGcBulkRootEdges array + UINT cGcBulkRootEdges; + + // Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the root edge data) + EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)]; + + + //--------------------------------------------------------------------------------------- + // GCBulkRootConditionalWeakTableElementEdge + // + // These describe dependent handles, which simulate an edge connecting a key NodeID + // to a value NodeID. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkRootConditionalWeakTableElementEdge event + UINT iCurBulkRootConditionalWeakTableElementEdge; + + // Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array + UINT cGCBulkRootConditionalWeakTableElementEdges; + + // Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the root edge data) + EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges + [(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkNode + // + // A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses + // as well as leaf nodes. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkNode event + UINT iCurBulkNodeEvent; + + // Number of nodes currently filled out in rgGcBulkNodeValues array + UINT cGcBulkNodeValues; + + // Struct array containing the primary data for each GCBulkNode event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the node data) + EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkEdge + // + // An "edge" is the relationship between a source node and its referenced target + // node. Edges are reported in bulk, separately from Nodes, but it is expected that + // the consumer read the Node and Edge streams together. One takes the first node + // from the Node stream, and then reads EdgeCount entries in the Edge stream, telling + // you all of that Node's targets. Then, one takes the next node in the Node stream, + // and reads the next entries in the Edge stream (using this Node's EdgeCount to + // determine how many) to find all of its targets. This continues on until the Node + // and Edge streams have been fully read. + // + // GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events + // begin at the GCBulkRootEdge.RootedNodeAddress and move forward. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkEdge event + UINT iCurBulkEdgeEvent; + + // Number of nodes currently filled out in rgGcBulkEdgeValues array + UINT cGcBulkEdgeValues; + + // Struct array containing the primary data for each GCBulkEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the edge data) + EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)]; + + + //--------------------------------------------------------------------------------------- + // BulkType + // + // Types are a bit more complicated to batch up, since their data is of varying + // size. BulkTypeEventLogger takes care of the pesky details for us + //--------------------------------------------------------------------------------------- + + BulkTypeEventLogger bulkTypeEventLogger; +}; + + + +//--------------------------------------------------------------------------------------- +// +// Called during a heap walk for each root reference encountered. Batches up the root in +// the ETW context +// +// Arguments: +// * pvHandle - If the root is a handle, this points to the handle +// * pRootedNode - Points to object that is rooted +// * pSecondaryNodeForDependentHandle - For dependent handles, this is the +// secondary object +// * fDependentHandle - nonzero iff this is for a dependent handle +// * profilingScanContext - The shared profapi/etw context built up during the heap walk. +// * dwGCFlags - Bitmask of "GC_"-style flags set by GC +// * rootFlags - Bitmask of EtwGCRootFlags describing the root +// + +// static +void ETW::GCLog::RootReference( + LPVOID pvHandle, + Object* pRootedNode, + Object* pSecondaryNodeForDependentHandle, + BOOL fDependentHandle, + ProfilingScanContext* profilingScanContext, + DWORD dwGCFlags, + DWORD rootFlags) +{ + LIMITED_METHOD_CONTRACT; + + if (pRootedNode == NULL) + return; + + EtwGcHeapDumpContext* pContext = + EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext); + if (pContext == NULL) + return; + + // Determine root kind, root ID, and handle-specific flags + LPVOID pvRootID = NULL; + BYTE nRootKind = (BYTE)profilingScanContext->dwEtwRootKind; + switch (nRootKind) + { + case kEtwGCRootKindStack: + break; + + case kEtwGCRootKindHandle: + pvRootID = pvHandle; + break; + + case kEtwGCRootKindFinalizer: + _ASSERTE(pvRootID == NULL); + break; + + case kEtwGCRootKindOther: + default: + _ASSERTE(nRootKind == kEtwGCRootKindOther); + _ASSERTE(pvRootID == NULL); + break; + } + + // Convert GC root flags to ETW root flags + if (dwGCFlags & GC_CALL_INTERIOR) + rootFlags |= kEtwGCRootFlagsInterior; + if (dwGCFlags & GC_CALL_PINNED) + rootFlags |= kEtwGCRootFlagsPinning; + + // Add root edge to appropriate buffer + if (fDependentHandle) + { + _ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges < + _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges)); + EventStructGCBulkRootConditionalWeakTableElementEdgeValue* pRCWTEEdgeValue = + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges]; + pRCWTEEdgeValue->GCKeyNodeID = pRootedNode; + pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle; + pRCWTEEdgeValue->GCRootID = pvRootID; + pContext->cGCBulkRootConditionalWeakTableElementEdges++; + + // If RCWTE edge buffer is now full, empty it into ETW + if (pContext->cGCBulkRootConditionalWeakTableElementEdges == + _countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges)) + { + FireEtwGCBulkRootConditionalWeakTableElementEdge( + pContext->iCurBulkRootConditionalWeakTableElementEdge, + pContext->cGCBulkRootConditionalWeakTableElementEdges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); + + pContext->iCurBulkRootConditionalWeakTableElementEdge++; + pContext->ClearRootConditionalWeakTableElementEdges(); + } + } + else + { + _ASSERTE(pContext->cGcBulkRootEdges < _countof(pContext->rgGcBulkRootEdges)); + EventStructGCBulkRootEdgeValue* pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges]; + pBulkRootEdgeValue->RootedNodeAddress = pRootedNode; + pBulkRootEdgeValue->GCRootKind = nRootKind; + pBulkRootEdgeValue->GCRootFlag = rootFlags; + pBulkRootEdgeValue->GCRootID = pvRootID; + pContext->cGcBulkRootEdges++; + + // If root edge buffer is now full, empty it into ETW + if (pContext->cGcBulkRootEdges == _countof(pContext->rgGcBulkRootEdges)) + { + FireEtwGCBulkRootEdge( + pContext->iCurBulkRootEdge, + pContext->cGcBulkRootEdges, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkRootEdges[0]), + &pContext->rgGcBulkRootEdges[0]); + + pContext->iCurBulkRootEdge++; + pContext->ClearRootEdges(); + } + } +} + +//--------------------------------------------------------------------------------------- +// +// Called during a heap walk for each object reference encountered. Batches up the +// corresponding node, edges, and type data for the ETW events. +// +// Arguments: +// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk. +// * pObjReferenceSource - Object doing the pointing +// * typeID - Type of pObjReferenceSource +// * fDependentHandle - nonzero iff this is for a dependent handle +// * cRefs - Count of objects being pointed to +// * rgObjReferenceTargets - Array of objects being pointed to +// + +// static +void ETW::GCLog::ObjectReference( + ProfilerWalkHeapContext* profilerWalkHeapContext, + Object* pObjReferenceSource, + ULONGLONG typeID, + ULONGLONG cRefs, + Object** rgObjReferenceTargets) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + + // LogTypeAndParametersIfNecessary can take a lock + CAN_TAKE_LOCK; + } + CONTRACTL_END; + + EtwGcHeapDumpContext* pContext = + EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext); + if (pContext == NULL) + return; + + //--------------------------------------------------------------------------------------- + // GCBulkNode events + //--------------------------------------------------------------------------------------- + + // Add Node (pObjReferenceSource) to buffer + _ASSERTE(pContext->cGcBulkNodeValues < _countof(pContext->rgGcBulkNodeValues)); + EventStructGCBulkNodeValue* pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues]; + pBulkNodeValue->Address = pObjReferenceSource; + pBulkNodeValue->Size = pObjReferenceSource->GetSize(); + pBulkNodeValue->TypeID = typeID; + pBulkNodeValue->EdgeCount = cRefs; + pContext->cGcBulkNodeValues++; + + // If Node buffer is now full, empty it into ETW + if (pContext->cGcBulkNodeValues == _countof(pContext->rgGcBulkNodeValues)) + { + FireEtwGCBulkNode( + pContext->iCurBulkNodeEvent, + pContext->cGcBulkNodeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkNodeValues[0]), + &pContext->rgGcBulkNodeValues[0]); + + pContext->iCurBulkNodeEvent++; + pContext->ClearNodes(); + } + + //--------------------------------------------------------------------------------------- + // BulkType events + //--------------------------------------------------------------------------------------- + + // We send type information as necessary--only for nodes, and only for nodes that we + // haven't already sent type info for + if (typeID != 0) + { + // Batch up this type with others to minimize events + pContext->bulkTypeEventLogger.LogTypeAndParameters(typeID); + } + + //--------------------------------------------------------------------------------------- + // GCBulkEdge events + //--------------------------------------------------------------------------------------- + + // Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges + // are added (it could even fill up multiple times during this one call if there are + // a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we + // need. + + for (ULONGLONG i = 0; i < cRefs; i++) + { + _ASSERTE(pContext->cGcBulkEdgeValues < _countof(pContext->rgGcBulkEdgeValues)); + EventStructGCBulkEdgeValue* pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues]; + pBulkEdgeValue->Value = rgObjReferenceTargets[i]; + // FUTURE: ReferencingFieldID + pBulkEdgeValue->ReferencingFieldID = 0; + pContext->cGcBulkEdgeValues++; + + // If Edge buffer is now full, empty it into ETW + if (pContext->cGcBulkEdgeValues == _countof(pContext->rgGcBulkEdgeValues)) + { + FireEtwGCBulkEdge( + pContext->iCurBulkEdgeEvent, + pContext->cGcBulkEdgeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkEdgeValues[0]), + &pContext->rgGcBulkEdgeValues[0]); + + pContext->iCurBulkEdgeEvent++; + pContext->ClearEdges(); + } + } +} + +//--------------------------------------------------------------------------------------- +// +// Called by GC at end of heap dump to give us a convenient time to flush any remaining +// buffers of data to ETW +// +// Arguments: +// profilerWalkHeapContext - Context containing data we've batched up +// + +// static +void ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext* profilerWalkHeapContext) +{ + LIMITED_METHOD_CONTRACT; + + // If context isn't already set up for us, then we haven't been collecting any data + // for ETW events. + EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)profilerWalkHeapContext->pvEtwContext; + if (pContext == NULL) + return; + + // If the GC events are enabled, flush any remaining root, node, and / or edge data + if (RUNTIME_PROVIDER_CATEGORY_ENABLED( + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD)) + { + if (pContext->cGcBulkRootEdges > 0) + { + FireEtwGCBulkRootEdge( + pContext->iCurBulkRootEdge, + pContext->cGcBulkRootEdges, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkRootEdges[0]), + &pContext->rgGcBulkRootEdges[0]); + } + + if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0) + { + FireEtwGCBulkRootConditionalWeakTableElementEdge( + pContext->iCurBulkRootConditionalWeakTableElementEdge, + pContext->cGCBulkRootConditionalWeakTableElementEdges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); + } + + if (pContext->cGcBulkNodeValues > 0) + { + FireEtwGCBulkNode( + pContext->iCurBulkNodeEvent, + pContext->cGcBulkNodeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkNodeValues[0]), + &pContext->rgGcBulkNodeValues[0]); + } + + if (pContext->cGcBulkEdgeValues > 0) + { + FireEtwGCBulkEdge( + pContext->iCurBulkEdgeEvent, + pContext->cGcBulkEdgeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkEdgeValues[0]), + &pContext->rgGcBulkEdgeValues[0]); + } + } + + // Ditto for type events + if (RUNTIME_PROVIDER_CATEGORY_ENABLED( + TRACE_LEVEL_INFORMATION, + CLR_TYPE_KEYWORD)) + { + pContext->bulkTypeEventLogger.FireBulkTypeEvent(); + pContext->bulkTypeEventLogger.Cleanup(); + } + + // Delete any GC state built up in the context + profilerWalkHeapContext->pvEtwContext = NULL; + delete pContext; +} diff --git a/src/coreclr/nativeaot/Runtime/eventtracebase.h b/src/coreclr/nativeaot/Runtime/eventtracebase.h index 5de84b9270dfbf..241c795c0d02fc 100644 --- a/src/coreclr/nativeaot/Runtime/eventtracebase.h +++ b/src/coreclr/nativeaot/Runtime/eventtracebase.h @@ -22,8 +22,8 @@ // For convenience, it is called ETM (Event Tracing for Mac) and exists only on the Mac Leopard OS // ============================================================================ -#ifndef _ETWTRACER_HXX_ -#define _ETWTRACER_HXX_ +#ifndef EVENTTRACEBASE_H +#define EVENTTRACEBASE_H struct EventStructTypeData; void InitializeEventTracing(); @@ -95,6 +95,14 @@ struct ProfilingScanContext; #define ETW_TRACING_ENABLED(Context, EventDescriptor) \ (Context.IsEnabled && ETW_TRACING_INITIALIZED(Context.RegistrationHandle) && ETW_EVENT_ENABLED(Context, EventDescriptor)) +#define CLR_GC_KEYWORD 0x1 +#define CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD 0x40000 +#define CLR_TYPE_KEYWORD 0x80000 +#define CLR_GCHEAPDUMP_KEYWORD 0x100000 +#define CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD 0x400000 +#define CLR_MANAGEDHEAPCOLLECT_KEYWORD 0x800000 +#define CLR_GCHEAPANDTYPENAMES_KEYWORD 0x1000000 + // // Using KEYWORDZERO means when checking the events category ignore the keyword // @@ -106,31 +114,19 @@ struct ProfilingScanContext; #define ETW_TRACING_CATEGORY_ENABLED(Context, Level, Keyword) \ (ETW_TRACING_INITIALIZED(Context.RegistrationHandle) && ETW_CATEGORY_ENABLED(Context, Level, Keyword)) +bool DotNETRuntimeProvider_IsEnabled(unsigned char level, unsigned long long keyword); + +#ifdef FEATURE_ETW +#define RUNTIME_PROVIDER_CATEGORY_ENABLED(Level, Keyword) \ + (DotNETRuntimeProvider_IsEnabled(Level, Keyword) || ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, Level, Keyword)) +#else +#define RUNTIME_PROVIDER_CATEGORY_ENABLED(Level, Keyword) \ + DotNETRuntimeProvider_IsEnabled(Level, Keyword) +#endif // FEATURE_ETW + #else // FEATURE_EVENT_TRACE #include "etmdummy.h" #endif // FEATURE_EVENT_TRACE -// These parts of the ETW namespace are common for both FEATURE_NATIVEAOT and -// !FEATURE_NATIVEAOT builds. - - -struct ProfilingScanContext; -struct ProfilerWalkHeapContext; -class Object; - -namespace ETW -{ - // Class to wrap the logging of threads (runtime and rundown providers) - class ThreadLog - { - private: - static DWORD GetEtwThreadFlags(Thread * pThread); - - public: - static void FireThreadCreated(Thread * pThread); - static void FireThreadDC(Thread * pThread); - }; -}; - -#endif //_ETWTRACER_HXX_ +#endif // EVENTTRACEBASE_H diff --git a/src/coreclr/nativeaot/Runtime/eventtracepriv.h b/src/coreclr/nativeaot/Runtime/eventtracepriv.h index 87ad0e9b5bc86d..26a9ca1350df02 100644 --- a/src/coreclr/nativeaot/Runtime/eventtracepriv.h +++ b/src/coreclr/nativeaot/Runtime/eventtracepriv.h @@ -188,7 +188,7 @@ class BulkTypeEventLogger LIMITED_METHOD_CONTRACT; } - void LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior); + void LogTypeAndParameters(ULONGLONG thAsAddr); void FireBulkTypeEvent(); void Cleanup(); }; diff --git a/src/coreclr/nativeaot/Runtime/gcenv.h b/src/coreclr/nativeaot/Runtime/gcenv.h index 808204a678dbf6..3caed96f8ffa4c 100644 --- a/src/coreclr/nativeaot/Runtime/gcenv.h +++ b/src/coreclr/nativeaot/Runtime/gcenv.h @@ -28,7 +28,6 @@ #include "TargetPtrs.h" #include "MethodTable.h" #include "ObjectLayout.h" -#include "rheventtrace.h" #include "PalRedhawkCommon.h" #include "PalRedhawk.h" #include "gcrhinterface.h" @@ -58,6 +57,7 @@ #include "clretwallmain.h" #include "eventtrace.h" + #include "eventtrace_etw.h" #else // FEATURE_EVENT_TRACE diff --git a/src/coreclr/nativeaot/Runtime/rheventtrace.h b/src/coreclr/nativeaot/Runtime/rheventtrace.h deleted file mode 100644 index 253a037175295c..00000000000000 --- a/src/coreclr/nativeaot/Runtime/rheventtrace.h +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// -// This header provides Redhawk-specific ETW code and macros, to allow sharing of common -// ETW code between Redhawk and desktop CLR. -// -#ifndef __RHEVENTTRACE_INCLUDED -#define __RHEVENTTRACE_INCLUDED - - -#ifdef FEATURE_ETW - -// FireEtwGCPerHeapHistorySpecial() has to be defined manually rather than via the manifest because it does -// not have a standard signature. -#define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) (MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory)) ? Template_GCPerHeapHistorySpecial(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory, DataPerHeap, DataSize, ClrId) : 0 - -// Map the CLR private provider to our version so we can avoid inserting more #ifdef's in the code. -#define MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_Context -#define MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_Context -#define Microsoft_Windows_DotNETRuntimeHandle Microsoft_Windows_Redhawk_GC_PublicHandle - -#define CLR_GC_KEYWORD 0x1 -#define CLR_FUSION_KEYWORD 0x4 -#define CLR_LOADER_KEYWORD 0x8 -#define CLR_JIT_KEYWORD 0x10 -#define CLR_NGEN_KEYWORD 0x20 -#define CLR_STARTENUMERATION_KEYWORD 0x40 -#define CLR_ENDENUMERATION_KEYWORD 0x80 -#define CLR_SECURITY_KEYWORD 0x400 -#define CLR_APPDOMAINRESOURCEMANAGEMENT_KEYWORD 0x800 -#define CLR_JITTRACING_KEYWORD 0x1000 -#define CLR_INTEROP_KEYWORD 0x2000 -#define CLR_CONTENTION_KEYWORD 0x4000 -#define CLR_EXCEPTION_KEYWORD 0x8000 -#define CLR_THREADING_KEYWORD 0x10000 -#define CLR_JITTEDMETHODILTONATIVEMAP_KEYWORD 0x20000 -#define CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD 0x40000 -#define CLR_TYPE_KEYWORD 0x80000 -#define CLR_GCHEAPDUMP_KEYWORD 0x100000 -#define CLR_GCHEAPALLOC_KEYWORD 0x200000 -#define CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD 0x400000 -#define CLR_MANAGEDHEAPCOLLECT_KEYWORD 0x800000 -#define CLR_GCHEAPANDTYPENAMES_KEYWORD 0x1000000 -#define CLR_PERFTRACK_KEYWORD 0x20000000 -#define CLR_STACK_KEYWORD 0x40000000 -#ifndef ERROR_SUCCESS -#define ERROR_SUCCESS 0 -#endif - -#undef ETW_TRACING_INITIALIZED -#define ETW_TRACING_INITIALIZED(RegHandle) (RegHandle != NULL) - -#undef ETW_CATEGORY_ENABLED -#define ETW_CATEGORY_ENABLED(Context, LevelParam, Keyword) \ - (Context.IsEnabled && \ - ( \ - (LevelParam <= ((Context).Level)) || \ - ((Context.Level) == 0) \ - ) && \ - ( \ - (Keyword == (ULONGLONG)0) || \ - ( \ - (Keyword & (Context.MatchAnyKeyword)) && \ - ( \ - (Keyword & (Context.MatchAllKeyword)) == (Context.MatchAllKeyword) \ - ) \ - ) \ - ) \ - ) - -class MethodTable; -class BulkTypeEventLogger; - -namespace ETW -{ - // Class to wrap all type system logic for ETW - class TypeSystemLog - { - public: - // This enum is unused on Redhawk, but remains here to keep Redhawk / desktop CLR - // code shareable. - enum TypeLogBehavior - { - kTypeLogBehaviorTakeLockAndLogIfFirstTime, - kTypeLogBehaviorAssumeLockAndLogIfFirstTime, - kTypeLogBehaviorAlwaysLog, - }; - - static void LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, uint64_t thAsAddr, TypeLogBehavior typeLogBehavior); - }; -}; - -#else -#define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) -#endif - -#endif //__RHEVENTTRACE_INCLUDED diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index bf6b91acbf4bd4..1756d35a86b219 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -515,6 +515,8 @@ set(GC_HEADERS_WKS if(FEATURE_EVENT_TRACE) list(APPEND VM_SOURCES_WKS eventtrace.cpp + eventtrace_bulktype.cpp + eventtrace_gcheap.cpp ) list(APPEND VM_HEADERS_WKS eventtracepriv.h diff --git a/src/coreclr/vm/eventtrace.cpp b/src/coreclr/vm/eventtrace.cpp index 7c7a24f2cf4a12..77281f9d6474ee 100644 --- a/src/coreclr/vm/eventtrace.cpp +++ b/src/coreclr/vm/eventtrace.cpp @@ -456,423 +456,6 @@ VOID ETW::GCLog::GCSettingsEvent() #endif // !FEATURE_NATIVEAOT -//--------------------------------------------------------------------------------------- -// Code for sending GC heap object events is generally the same for both FEATURE_NATIVEAOT -// and !FEATURE_NATIVEAOT builds -//--------------------------------------------------------------------------------------- - -bool s_forcedGCInProgress = false; -class ForcedGCHolder -{ -public: - ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = true; } - ~ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = false; } -}; - -BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw() -{ - LIMITED_METHOD_CONTRACT; - - return s_forcedGCInProgress && - ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD); -} - -// Simple helpers called by the GC to decide whether it needs to do a walk of heap -// objects and / or roots. - -BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw() -{ - LIMITED_METHOD_CONTRACT; - return s_forcedGCInProgress && - ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD); -} - -BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw() -{ - LIMITED_METHOD_CONTRACT; - return s_forcedGCInProgress && - ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD); -} - -BOOL ETW::GCLog::ShouldTrackMovementForEtw() -{ - LIMITED_METHOD_CONTRACT; - return ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD); -} - -// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges / -// GCBulkSurvivingObjectRanges events -struct EtwGcMovementContext -{ -public: - // An instance of EtwGcMovementContext is dynamically allocated and stored - // inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically - // allocated and pointed to by a profiling_context pointer created by the GC on the stack. - // This is used to batch and send GCBulkSurvivingObjectRanges events and - // GCBulkMovedObjectRanges events. This method is passed a pointer to - // MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned; - // else, a new EtwGcMovementContext is allocated, stored in that pointer, and - // then returned. Callers should test for NULL, which can be returned if out of - // memory - static EtwGcMovementContext * GetOrCreateInGCContext(EtwGcMovementContext ** ppContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(ppContext != NULL); - - EtwGcMovementContext * pContext = *ppContext; - if (pContext == NULL) - { - pContext = new (nothrow) EtwGcMovementContext; - *ppContext = pContext; - } - return pContext; - } - - EtwGcMovementContext() : - iCurBulkSurvivingObjectRanges(0), - iCurBulkMovedObjectRanges(0) - { - LIMITED_METHOD_CONTRACT; - Clear(); - } - - // Resets structure for reuse on construction, and after each flush. - // (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.) - void Clear() - { - LIMITED_METHOD_CONTRACT; - cBulkSurvivingObjectRanges = 0; - cBulkMovedObjectRanges = 0; - ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges)); - ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges)); - } - - //--------------------------------------------------------------------------------------- - // GCBulkSurvivingObjectRanges - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkSurvivingObjectRanges event - UINT iCurBulkSurvivingObjectRanges; - - // Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array - UINT cBulkSurvivingObjectRanges; - - // Struct array containing the primary data for each GCBulkSurvivingObjectRanges - // event. Fix the size so the total event stays well below the 64K limit (leaving - // lots of room for non-struct fields that come before the values data) - EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[ - (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkMovedObjectRanges - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkMovedObjectRanges event - UINT iCurBulkMovedObjectRanges; - - // Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array - UINT cBulkMovedObjectRanges; - - // Struct array containing the primary data for each GCBulkMovedObjectRanges - // event. Fix the size so the total event stays well below the 64K limit (leaving - // lots of room for non-struct fields that come before the values data) - EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[ - (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)]; -}; - -// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling -// API to track its own information. -struct MovedReferenceContextForEtwAndProfapi -{ - // An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and - // pointed to by a profiling_context pointer created by the GC on the stack. This is used to - // batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges - // events and the corresponding callbacks for profapi profilers. This method is - // passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets - // returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored - // in that pointer, and then returned. Callers should test for NULL, which can be - // returned if out of memory - static MovedReferenceContextForEtwAndProfapi * CreateInGCContext(LPVOID pvContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(pvContext != NULL); - - MovedReferenceContextForEtwAndProfapi * pContext = *(MovedReferenceContextForEtwAndProfapi **) pvContext; - - // Shouldn't be called if the context was already created. Perhaps someone made - // one too many BeginMovedReferences calls, or didn't have an EndMovedReferences - // in between? - _ASSERTE(pContext == NULL); - - pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi; - *(MovedReferenceContextForEtwAndProfapi **) pvContext = pContext; - - return pContext; - } - - - MovedReferenceContextForEtwAndProfapi() : - pctxProfAPI(NULL), - pctxEtw(NULL) - - { - LIMITED_METHOD_CONTRACT; - } - - LPVOID pctxProfAPI; - EtwGcMovementContext * pctxEtw; -}; - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC for each moved or surviving reference that it encounters. This -// batches the info into our context's buffer, and flushes that buffer to ETW as it fills -// up. -// -// Arguments: -// * pbMemBlockStart - Start of moved/surviving block -// * pbMemBlockEnd - Next pointer after end of moved/surviving block -// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving -// references; negative if moved to earlier addresses) -// * profilingContext - Where our context is stored -// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved -// or surviving event -// - -// static -void ETW::GCLog::MovedReference( - BYTE * pbMemBlockStart, - BYTE * pbMemBlockEnd, - ptrdiff_t cbRelocDistance, - size_t profilingContext, - BOOL fCompacting, - BOOL fAllowProfApiNotification /* = TRUE */) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock - } - CONTRACTL_END; - - MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi = - (MovedReferenceContextForEtwAndProfapi *) profilingContext; - if (pCtxForEtwAndProfapi == NULL) - { - _ASSERTE(!"MovedReference() encountered a NULL profilingContext"); - return; - } - -#ifdef PROFILING_SUPPORTED - // ProfAPI - if (fAllowProfApiNotification) - { - BEGIN_PROFILER_CALLBACK(CORProfilerTrackGC() || CORProfilerTrackGCMovedObjects()); - (&g_profControlBlock)->MovedReference(pbMemBlockStart, - pbMemBlockEnd, - cbRelocDistance, - &(pCtxForEtwAndProfapi->pctxProfAPI), - fCompacting); - END_PROFILER_CALLBACK(); - } -#endif // PROFILING_SUPPORTED - - // ETW - - if (!ShouldTrackMovementForEtw()) - return; - - EtwGcMovementContext * pContext = - EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw); - if (pContext == NULL) - return; - - if (fCompacting) - { - // Moved references - - _ASSERTE(pContext->cBulkMovedObjectRanges < ARRAY_SIZE(pContext->rgGCBulkMovedObjectRanges)); - EventStructGCBulkMovedObjectRangesValue * pValue = - &pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges]; - pValue->OldRangeBase = pbMemBlockStart; - pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance; - pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; - pContext->cBulkMovedObjectRanges++; - - // If buffer is now full, empty it into ETW - if (pContext->cBulkMovedObjectRanges == ARRAY_SIZE(pContext->rgGCBulkMovedObjectRanges)) - { - FireEtwGCBulkMovedObjectRanges( - pContext->iCurBulkMovedObjectRanges, - pContext->cBulkMovedObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkMovedObjectRanges[0]), - &pContext->rgGCBulkMovedObjectRanges[0]); - - pContext->iCurBulkMovedObjectRanges++; - pContext->Clear(); - } - } - else - { - // Surviving references - - _ASSERTE(pContext->cBulkSurvivingObjectRanges < ARRAY_SIZE(pContext->rgGCBulkSurvivingObjectRanges)); - EventStructGCBulkSurvivingObjectRangesValue * pValue = - &pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges]; - pValue->RangeBase = pbMemBlockStart; - pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; - pContext->cBulkSurvivingObjectRanges++; - - // If buffer is now full, empty it into ETW - if (pContext->cBulkSurvivingObjectRanges == ARRAY_SIZE(pContext->rgGCBulkSurvivingObjectRanges)) - { - FireEtwGCBulkSurvivingObjectRanges( - pContext->iCurBulkSurvivingObjectRanges, - pContext->cBulkSurvivingObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), - &pContext->rgGCBulkSurvivingObjectRanges[0]); - - pContext->iCurBulkSurvivingObjectRanges++; - pContext->Clear(); - } - } -} - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC just before it begins enumerating plugs. Gives us a chance to -// allocate our context structure, to allow us to batch plugs before firing events -// for them -// -// Arguments: -// * pProfilingContext - Points to location on stack (in GC function) where we can -// store a pointer to the context we allocate -// - -// static -VOID ETW::GCLog::BeginMovedReferences(size_t * pProfilingContext) -{ - LIMITED_METHOD_CONTRACT; - - MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext)); -} - - -//--------------------------------------------------------------------------------------- -// -// Called by the GC at the end of a heap walk to give us a place to flush any remaining -// buffers of data to ETW or the profapi profiler -// -// Arguments: -// profilingContext - Our context we built up during the heap walk -// - -// static -VOID ETW::GCLog::EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification /* = TRUE */) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; - } - CONTRACTL_END; - - MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi *) profilingContext; - if (pCtxForEtwAndProfapi == NULL) - { - _ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext"); - return; - } - -#ifdef PROFILING_SUPPORTED - // ProfAPI - if (fAllowProfApiNotification) - { - BEGIN_PROFILER_CALLBACK(CORProfilerTrackGC() || CORProfilerTrackGCMovedObjects()); - (&g_profControlBlock)->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI)); - END_PROFILER_CALLBACK(); - } -#endif //PROFILING_SUPPORTED - - // ETW - - if (!ShouldTrackMovementForEtw()) - return; - - // If context isn't already set up for us, then we haven't been collecting any data - // for ETW events. - EtwGcMovementContext * pContext = pCtxForEtwAndProfapi->pctxEtw; - if (pContext == NULL) - return; - - // Flush any remaining moved or surviving range data - - if (pContext->cBulkMovedObjectRanges > 0) - { - FireEtwGCBulkMovedObjectRanges( - pContext->iCurBulkMovedObjectRanges, - pContext->cBulkMovedObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkMovedObjectRanges[0]), - &pContext->rgGCBulkMovedObjectRanges[0]); - } - - if (pContext->cBulkSurvivingObjectRanges > 0) - { - FireEtwGCBulkSurvivingObjectRanges( - pContext->iCurBulkSurvivingObjectRanges, - pContext->cBulkSurvivingObjectRanges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), - &pContext->rgGCBulkSurvivingObjectRanges[0]); - } - - pCtxForEtwAndProfapi->pctxEtw = NULL; - delete pContext; -} - -/***************************************************************************/ -/* This implements the public runtime provider's ManagedHeapCollectKeyword. It - performs a full, gen-2, blocking GC. */ -/***************************************************************************/ -VOID ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber) -{ - CONTRACTL - { - NOTHROW; - GC_TRIGGERS; - MODE_ANY; - } - CONTRACTL_END; - -#ifndef FEATURE_NATIVEAOT - if (!IsGarbageCollectorFullyInitialized()) - return; -#endif // FEATURE_NATIVEAOT - - InterlockedExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber); - - ForceGCForDiagnostics(); -} //--------------------------------------------------------------------------------------- // @@ -907,1598 +490,7 @@ VOID ETW::GCLog::FireGcStart(ETW_GC_INFO * pGcInfo) } } -//--------------------------------------------------------------------------------------- -// -// Contains code common to profapi and ETW scenarios where the profiler wants to force -// the CLR to perform a GC. The important work here is to create a managed thread for -// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may -// not yet be a managed thread object. But some scenarios require a managed thread -// object be present.. -// -// Return Value: -// HRESULT indicating success or failure -// -// Assumptions: -// Caller should ensure that the EE has fully started up and that the GC heap is -// initialized enough to actually perform a GC -// - -// static -HRESULT ETW::GCLog::ForceGCForDiagnostics() -{ - CONTRACTL - { - NOTHROW; - GC_TRIGGERS; - MODE_ANY; - } - CONTRACTL_END; - - HRESULT hr = E_FAIL; - -#ifndef FEATURE_NATIVEAOT - // Caller should ensure we're past startup. - _ASSERTE(IsGarbageCollectorFullyInitialized()); - - // In immersive apps the GarbageCollect() call below will call into the WinUI reference tracker, - // which will call back into the runtime to track references. This call - // chain would cause a Thread object to be created for this thread while code - // higher on the stack owns the ThreadStoreLock. This will lead to asserts - // since the ThreadStoreLock is non-reentrant. To avoid this we'll create - // the Thread object here instead. - if (GetThreadNULLOk() == NULL) - { - HRESULT hr = E_FAIL; - SetupThreadNoThrow(&hr); - if (FAILED(hr)) - return hr; - } - - ASSERT_NO_EE_LOCKS_HELD(); - - EX_TRY - { - // Need to switch to cooperative mode as the thread will access managed - // references (through reference tracker callbacks). - GCX_COOP(); -#endif // FEATURE_NATIVEAOT - - ForcedGCHolder forcedGCHolder; - - hr = GCHeapUtilities::GetGCHeap()->GarbageCollect( - -1, // all generations should be collected - false, // low_memory_p - collection_blocking); - -#ifndef FEATURE_NATIVEAOT - } - EX_CATCH { } - EX_END_CATCH(RethrowTerminalExceptions); -#endif // FEATURE_NATIVEAOT - - return hr; -} - - - - - - -//--------------------------------------------------------------------------------------- -// WalkStaticsAndCOMForETW walks both CCW/RCW objects and static variables. -//--------------------------------------------------------------------------------------- - -VOID ETW::GCLog::WalkStaticsAndCOMForETW() -{ - CONTRACTL - { - NOTHROW; - GC_TRIGGERS; - } - CONTRACTL_END; - - EX_TRY - { - BulkTypeEventLogger typeLogger; - - // Walk RCWs/CCWs - BulkComLogger comLogger(&typeLogger); - comLogger.LogAllComObjects(); - - // Walk static variables - BulkStaticsLogger staticLogger(&typeLogger); - staticLogger.LogAllStatics(); - // Ensure all loggers have written all events, fire type logger last to batch events - // (FireBulkComEvent or FireBulkStaticsEvent may queue up additional types). - comLogger.FireBulkComEvent(); - staticLogger.FireBulkStaticsEvent(); - typeLogger.FireBulkTypeEvent(); - } - EX_CATCH - { - } - EX_END_CATCH(SwallowAllExceptions); -} - - -//--------------------------------------------------------------------------------------- -// BulkStaticsLogger: Batches up and logs static variable roots -//--------------------------------------------------------------------------------------- - -BulkComLogger::BulkComLogger(BulkTypeEventLogger *typeLogger) - : m_currRcw(0), m_currCcw(0), m_typeLogger(typeLogger), m_etwRcwData(0), m_etwCcwData(0), m_enumResult(0) -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - m_etwRcwData = new EventRCWEntry[kMaxRcwCount]; - m_etwCcwData = new EventCCWEntry[kMaxCcwCount]; -} - -BulkComLogger::~BulkComLogger() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - FireBulkComEvent(); - - if (m_etwRcwData) - delete [] m_etwRcwData; - - if (m_etwCcwData) - delete [] m_etwCcwData; - - if (m_enumResult) - { - CCWEnumerationEntry *curr = m_enumResult; - while (curr) - { - CCWEnumerationEntry *next = curr->Next; - delete curr; - curr = next; - } - } -} - -void BulkComLogger::FireBulkComEvent() -{ - WRAPPER_NO_CONTRACT; - - FlushRcw(); - FlushCcw(); -} - -void BulkComLogger::WriteRcw(RCW *pRcw, Object *obj) -{ - CONTRACTL - { - THROWS; - GC_TRIGGERS; - MODE_ANY; - PRECONDITION(pRcw != NULL); - PRECONDITION(obj != NULL); - } - CONTRACTL_END; - - _ASSERTE(m_currRcw < kMaxRcwCount); - -#ifdef FEATURE_COMINTEROP - TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); - if (typeHandle == NULL) - { - return; - } - EventRCWEntry &rcw = m_etwRcwData[m_currRcw]; - rcw.ObjectID = (ULONGLONG)obj; - rcw.TypeID = (ULONGLONG)typeHandle.AsTAddr(); - rcw.IUnk = (ULONGLONG)pRcw->GetIUnknown_NoAddRef(); - rcw.VTable = (ULONGLONG)pRcw->GetVTablePtr(); - rcw.RefCount = pRcw->GetRefCount(); - rcw.Flags = 0; - - if (++m_currRcw >= kMaxRcwCount) - FlushRcw(); -#endif -} - -void BulkComLogger::FlushRcw() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - _ASSERTE(m_currRcw <= kMaxRcwCount); - - if (m_currRcw == 0) - return; - - if (m_typeLogger) - { - for (int i = 0; i < m_currRcw; ++i) - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwRcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); - } - - unsigned short instance = GetClrInstanceId(); - -#if !defined(HOST_UNIX) - EVENT_DATA_DESCRIPTOR eventData[3]; - EventDataDescCreate(&eventData[0], &m_currRcw, sizeof(const unsigned int)); - EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); - EventDataDescCreate(&eventData[2], m_etwRcwData, sizeof(EventRCWEntry) * m_currRcw); - - ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, ARRAY_SIZE(eventData), eventData); -#else - ULONG result = FireEtXplatGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); -#endif // !defined(HOST_UNIX) - result |= EventPipeWriteEventGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); - - _ASSERTE(result == ERROR_SUCCESS); - - m_currRcw = 0; -} - -void BulkComLogger::WriteCcw(ComCallWrapper *pCcw, Object **handle, Object *obj) -{ - CONTRACTL - { - THROWS; - GC_TRIGGERS; - MODE_ANY; - PRECONDITION(handle != NULL); - PRECONDITION(obj != NULL); - } - CONTRACTL_END; - - _ASSERTE(m_currCcw < kMaxCcwCount); - -#ifdef FEATURE_COMINTEROP - IUnknown *iUnk = NULL; - int refCount = 0; - ULONG flags = 0; - - if (pCcw) - { - iUnk = pCcw->GetOuter(); - if (iUnk == NULL) - iUnk = pCcw->GetBasicIP(true); - - refCount = pCcw->GetRefCount(); - - if (pCcw->IsWrapperActive()) - flags |= EventCCWEntry::Strong; - } - - TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); - if (typeHandle == NULL) - { - return; - } - - EventCCWEntry &ccw = m_etwCcwData[m_currCcw++]; - ccw.RootID = (ULONGLONG)handle; - ccw.ObjectID = (ULONGLONG)obj; - ccw.TypeID = (ULONGLONG)typeHandle.AsTAddr(); - ccw.IUnk = (ULONGLONG)iUnk; - ccw.RefCount = refCount; - ccw.JupiterRefCount = 0; - ccw.Flags = flags; - - if (m_currCcw >= kMaxCcwCount) - FlushCcw(); -#endif -} - -void BulkComLogger::FlushCcw() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - _ASSERTE(m_currCcw <= kMaxCcwCount); - - if (m_currCcw == 0) - return; - - if (m_typeLogger) - { - for (int i = 0; i < m_currCcw; ++i) - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwCcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); - } - - unsigned short instance = GetClrInstanceId(); - -#if !defined(HOST_UNIX) - EVENT_DATA_DESCRIPTOR eventData[3]; - EventDataDescCreate(&eventData[0], &m_currCcw, sizeof(const unsigned int)); - EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); - EventDataDescCreate(&eventData[2], m_etwCcwData, sizeof(EventCCWEntry) * m_currCcw); - - ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, ARRAY_SIZE(eventData), eventData); -#else - ULONG result = FireEtXplatGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); -#endif //!defined(HOST_UNIX) - result |= EventPipeWriteEventGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); - - _ASSERTE(result == ERROR_SUCCESS); - - m_currCcw = 0; -} - -void BulkComLogger::LogAllComObjects() -{ - CONTRACTL - { - THROWS; - GC_TRIGGERS; - MODE_ANY; - } - CONTRACTL_END; - -#ifdef FEATURE_COMINTEROP - SyncBlockCache *cache = SyncBlockCache::GetSyncBlockCache(); - if (cache == NULL) - return; - - int count = cache->GetTableEntryCount(); - SyncTableEntry *table = SyncTableEntry::GetSyncTableEntry(); - - for (int i = 0; i < count; ++i) - { - SyncTableEntry &entry = table[i]; - Object *obj = entry.m_Object.Load(); - if (obj && entry.m_SyncBlock) - { - InteropSyncBlockInfo *interop = entry.m_SyncBlock->GetInteropInfoNoCreate(); - if (interop) - { - RCW *rcw = interop->GetRawRCW(); - if (rcw) - WriteRcw(rcw, obj); - } - } - } - - // We need to do work in HandleWalkCallback which may trigger a GC. We cannot do this while - // enumerating the handle table. Instead, we will build a list of RefCount handles we found - // during the handle table enumeration first (m_enumResult) during this enumeration: - GCHandleUtilities::GetGCHandleManager()->TraceRefCountedHandles(BulkComLogger::HandleWalkCallback, uintptr_t(this), 0); - - // Now that we have all of the object handles, we will walk all of the handles and write the - // etw events. - for (CCWEnumerationEntry *curr = m_enumResult; curr; curr = curr->Next) - { - for (int i = 0; i < curr->Count; ++i) - { - Object **handle = curr->Handles[i]; - - Object *obj = NULL; - if (handle == NULL || (obj = *handle) == 0) - return; - - ObjHeader *header = obj->GetHeader(); - _ASSERTE(header != NULL); - - // We can catch the refcount handle too early where we don't have a CCW, WriteCCW - // handles this case. We still report the refcount handle without the CCW data. - ComCallWrapper *ccw = NULL; - - // Checking the index ensures that the syncblock is already created. The - // PassiveGetSyncBlock function does not check bounds, so we have to be sure - // the SyncBlock was already created. - int index = header->GetHeaderSyncBlockIndex(); - if (index > 0) - { - SyncBlock *syncBlk = header->PassiveGetSyncBlock(); - InteropSyncBlockInfo *interop = syncBlk->GetInteropInfoNoCreate(); - if (interop) - ccw = interop->GetCCW(); - } - - WriteCcw(ccw, handle, obj); - } - } - -#endif - -} - -void BulkComLogger::HandleWalkCallback(Object **handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2) -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(param1 != NULL); // Should be the "this" pointer for BulkComLogger. - PRECONDITION(param2 == 0); // This is set by Ref_TraceRefCountHandles. - } - CONTRACTL_END; - - // Simple sanity check to ensure the parameters are what we expect them to be. - _ASSERTE(param2 == 0); - - if (handle != NULL) - ((BulkComLogger*)param1)->AddCcwHandle(handle); -} - - - -// Used during CCW enumeration to keep track of all object handles which point to a CCW. -void BulkComLogger::AddCcwHandle(Object **handle) -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(handle != NULL); - } - CONTRACTL_END; - - if (m_enumResult == NULL) - m_enumResult = new CCWEnumerationEntry; - - CCWEnumerationEntry *curr = m_enumResult; - while (curr->Next) - curr = curr->Next; - - if (curr->Count == ARRAY_SIZE(curr->Handles)) - { - curr->Next = new CCWEnumerationEntry; - curr = curr->Next; - } - - curr->Handles[curr->Count++] = handle; -} - - - - -//--------------------------------------------------------------------------------------- -// BulkStaticsLogger: Batches up and logs static variable roots -//--------------------------------------------------------------------------------------- - - - -#include "domainassembly.h" - -BulkStaticsLogger::BulkStaticsLogger(BulkTypeEventLogger *typeLogger) - : m_buffer(0), m_used(0), m_count(0), m_domain(0), m_typeLogger(typeLogger) -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - m_buffer = new BYTE[kMaxBytesValues]; -} - -BulkStaticsLogger::~BulkStaticsLogger() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - if (m_used > 0) - FireBulkStaticsEvent(); - - if (m_buffer) - delete[] m_buffer; -} - -void BulkStaticsLogger::FireBulkStaticsEvent() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - if (m_used <= 0 || m_count <= 0) - return; - - _ASSERTE(m_domain != NULL); - - unsigned short instance = GetClrInstanceId(); - unsigned __int64 appDomain = (unsigned __int64)m_domain; - -#if !defined(HOST_UNIX) - EVENT_DATA_DESCRIPTOR eventData[4]; - EventDataDescCreate(&eventData[0], &m_count, sizeof(const unsigned int) ); - EventDataDescCreate(&eventData[1], &appDomain, sizeof(unsigned __int64) ); - EventDataDescCreate(&eventData[2], &instance, sizeof(const unsigned short) ); - EventDataDescCreate(&eventData[3], m_buffer, m_used); - - ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootStaticVar, ARRAY_SIZE(eventData), eventData); -#else - ULONG result = FireEtXplatGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); -#endif //!defined(HOST_UNIX) - result |= EventPipeWriteEventGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); - - _ASSERTE(result == ERROR_SUCCESS); - - m_used = 0; - m_count = 0; -} - -void BulkStaticsLogger::WriteEntry(AppDomain *domain, Object **address, Object *obj, FieldDesc *fieldDesc) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(domain != NULL); - PRECONDITION(address != NULL); - PRECONDITION(obj != NULL); - PRECONDITION(fieldDesc != NULL); - } - CONTRACTL_END; - - // Each bulk statics event is for one AppDomain. If we are now inspecting a new domain, - // we need to flush the built up events now. - if (m_domain != domain) - { - if (m_domain != NULL) - FireBulkStaticsEvent(); - - m_domain = domain; - } - - TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); - if (typeHandle == NULL) - { - return; - } - ULONGLONG th = (ULONGLONG)typeHandle.AsTAddr(); - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, th, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); - - // We should have at least 512 characters remaining in the buffer here. - int remaining = kMaxBytesValues - m_used; - _ASSERTE(kMaxBytesValues - m_used > 512); - - int len = EventStaticEntry::WriteEntry(m_buffer + m_used, remaining, (ULONGLONG)address, - (ULONGLONG)obj, th, 0, fieldDesc); - - // 512 bytes was not enough buffer? This shouldn't happen, so we'll skip emitting the - // event on error. - if (len > 0) - { - m_used += len; - m_count++; - } - - // When we are close to running out of buffer, emit the event. - if (kMaxBytesValues - m_used < 512) - FireBulkStaticsEvent(); -} - -void BulkStaticsLogger::LogAllStatics() -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - { - AppDomain *domain = ::GetAppDomain(); // There is only 1 AppDomain, so no iterator here. - - AppDomain::AssemblyIterator assemblyIter = domain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded|kIncludeExecution)); - CollectibleAssemblyHolder pDomainAssembly; - while (assemblyIter.Next(pDomainAssembly.This())) - { - // Make sure the assembly is loaded. - if (!pDomainAssembly->IsLoaded()) - continue; - - CollectibleAssemblyHolder pAssembly = pDomainAssembly->GetAssembly(); - // Get the domain module from the module/appdomain pair. - Module *module = pDomainAssembly->GetModule(); - if (module == NULL) - continue; - - DomainAssembly *domainAssembly = module->GetDomainAssembly(); - if (domainAssembly == NULL) - continue; - - // Ensure the module has fully loaded. - if (!domainAssembly->IsActive()) - continue; - - DomainLocalModule *domainModule = module->GetDomainLocalModule(); - if (domainModule == NULL) - continue; - - // Now iterate all types with - LookupMap::Iterator mtIter = module->EnumerateTypeDefs(); - while (mtIter.Next()) - { - // I don't think mt can be null here, but the dac does a null check... - // IsFullyLoaded should be equivalent to 'GetLoadLevel() == CLASS_LOADED' - MethodTable *mt = mtIter.GetElement(); - if (mt == NULL || !mt->IsFullyLoaded()) - continue; - - EEClass *cls = mt->GetClass(); - _ASSERTE(cls != NULL); - - if (cls->GetNumStaticFields() <= 0) - continue; - - ApproxFieldDescIterator fieldIter(mt, ApproxFieldDescIterator::STATIC_FIELDS); - for (FieldDesc *field = fieldIter.Next(); field != NULL; field = fieldIter.Next()) - { - // Don't want thread local - _ASSERTE(field->IsStatic()); - if (field->IsSpecialStatic() || field->IsEnCNew()) - continue; - - // Static valuetype values are boxed. - CorElementType fieldType = field->GetFieldType(); - if (fieldType != ELEMENT_TYPE_CLASS && fieldType != ELEMENT_TYPE_VALUETYPE) - continue; - - BYTE *base = field->GetBaseInDomainLocalModule(domainModule); - if (base == NULL) - continue; - - Object **address = (Object**)field->GetStaticAddressHandle(base); - Object *obj = NULL; - if (address == NULL || ((obj = *address) == NULL)) - continue; - - WriteEntry(domain, address, *address, field); - } // foreach static field - } - } // foreach domain assembly - } // foreach AppDomain -} // BulkStaticsLogger::LogAllStatics - - - -//--------------------------------------------------------------------------------------- -// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can -// be logged via ETW in bulk -//--------------------------------------------------------------------------------------- - -BulkTypeValue::BulkTypeValue() : cTypeParameters(0) -#ifdef FEATURE_NATIVEAOT -, ullSingleTypeParameter(0) -#else // FEATURE_NATIVEAOT -, sName() -#endif // FEATURE_NATIVEAOT -, rgTypeParameters() -{ - LIMITED_METHOD_CONTRACT; - ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); -} - -//--------------------------------------------------------------------------------------- -// -// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW -// - -void BulkTypeValue::Clear() -{ - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); - cTypeParameters = 0; -#ifdef FEATURE_NATIVEAOT - ullSingleTypeParameter = 0; - rgTypeParameters.Release(); -#else // FEATURE_NATIVEAOT - sName.Clear(); - rgTypeParameters.Clear(); -#endif // FEATURE_NATIVEAOT -} - -//--------------------------------------------------------------------------------------- -// -// Fire an ETW event for all the types we batched so far, and then reset our state -// so we can start batching new types at the beginning of the array. -// -// - -void BulkTypeEventLogger::FireBulkTypeEvent() -{ - LIMITED_METHOD_CONTRACT; - - if (m_nBulkTypeValueCount == 0) - { - // No types were batched up, so nothing to send - return; - } - UINT16 nClrInstanceID = GetClrInstanceId(); - - if(m_pBulkTypeEventBuffer == NULL) - { - // The buffer could not be allocated when this object was created, so bail. - return; - } - - UINT iSize = 0; - - for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++) - { - BulkTypeValue& target = m_rgBulkTypeValues[iTypeData]; - - // Do fixed-size data as one bulk copy - memcpy( - m_pBulkTypeEventBuffer + iSize, - &(target.fixedSizedData), - sizeof(target.fixedSizedData)); - iSize += sizeof(target.fixedSizedData); - - // Do var-sized data individually per field - - LPCWSTR wszName = target.sName.GetUnicode(); - if (wszName == NULL) - { - m_pBulkTypeEventBuffer[iSize++] = 0; - m_pBulkTypeEventBuffer[iSize++] = 0; - } - else - { - UINT nameSize = (target.sName.GetCount() + 1) * sizeof(WCHAR); - memcpy(m_pBulkTypeEventBuffer + iSize, wszName, nameSize); - iSize += nameSize; - } - - // Type parameter count - ULONG params = target.rgTypeParameters.GetCount(); - - ULONG *ptrInt = (ULONG*)(m_pBulkTypeEventBuffer + iSize); - *ptrInt = params; - iSize += 4; - - target.cTypeParameters = params; - - // Type parameter array - if (target.cTypeParameters > 0) - { - memcpy(m_pBulkTypeEventBuffer + iSize, target.rgTypeParameters.GetElements(), sizeof(ULONGLONG) * target.cTypeParameters); - iSize += sizeof(ULONGLONG) * target.cTypeParameters; - } - } - - FireEtwBulkType(m_nBulkTypeValueCount, GetClrInstanceId(), iSize, m_pBulkTypeEventBuffer); - - // Reset state - m_nBulkTypeValueCount = 0; - m_nBulkTypeValueByteCount = 0; -} - -#ifndef FEATURE_NATIVEAOT - -//--------------------------------------------------------------------------------------- -// -// Batches a single type into the array, flushing the array to ETW if it fills up. Most -// interaction with the type system (to analyze the type) is done here. This does not -// recursively batch up any parameter types (for arrays or generics), but does add their -// TypeHandles to the rgTypeParameters array. LogTypeAndParameters is responsible for -// initiating any recursive calls to deal with type parameters. -// -// Arguments: -// th - TypeHandle to batch -// -// Return Value: -// Index into array of where this type got batched. -1 if there was a failure. -// - -int BulkTypeEventLogger::LogSingleType(TypeHandle th) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; // some of the type system stuff can take locks - } - CONTRACTL_END; - - // If there's no room for another type, flush what we've got - if (m_nBulkTypeValueCount == ARRAY_SIZE(m_rgBulkTypeValues)) - { - FireBulkTypeEvent(); - } - - _ASSERTE(m_nBulkTypeValueCount < (int)ARRAY_SIZE(m_rgBulkTypeValues)); - - BulkTypeValue * pVal = &m_rgBulkTypeValues[m_nBulkTypeValueCount]; - - // Clear out pVal before filling it out (array elements can get reused if there - // are enough types that we need to flush to multiple events). Clearing the - // contained SBuffer can throw, so deal with exceptions - BOOL fSucceeded = FALSE; - EX_TRY - { - pVal->Clear(); - fSucceeded = TRUE; - } - EX_CATCH - { - fSucceeded = FALSE; - } - EX_END_CATCH(RethrowTerminalExceptions); - if (!fSucceeded) - return -1; - - pVal->fixedSizedData.TypeID = (ULONGLONG) th.AsTAddr(); - pVal->fixedSizedData.ModuleID = (ULONGLONG) (TADDR) th.GetModule(); - pVal->fixedSizedData.TypeNameID = (th.GetMethodTable() == NULL) ? 0 : th.GetCl(); - pVal->fixedSizedData.Flags = 0; - pVal->fixedSizedData.CorElementType = (BYTE) th.GetInternalCorElementType(); - - if (th.IsArray()) - { - // Normal typedesc array - pVal->fixedSizedData.Flags |= kEtwTypeFlagsArray; - if (pVal->fixedSizedData.CorElementType == ELEMENT_TYPE_ARRAY) - { - // Multidimensional arrays set the rank bits, SzArrays do not set the rank bits - unsigned rank = th.GetRank(); - if (rank < kEtwTypeFlagsArrayRankMax) - { - // Only ranks less than kEtwTypeFlagsArrayRankMax are supported. - // Fortunately kEtwTypeFlagsArrayRankMax should be greater than the - // number of ranks the type loader will support - rank <<= kEtwTypeFlagsArrayRankShift; - _ASSERTE((rank & kEtwTypeFlagsArrayRankMask) == rank); - pVal->fixedSizedData.Flags |= rank; - } - } - // Fetch TypeHandle of array elements - fSucceeded = FALSE; - EX_TRY - { - pVal->rgTypeParameters.Append((ULONGLONG) th.GetArrayElementTypeHandle().AsTAddr()); - fSucceeded = TRUE; - } - EX_CATCH - { - fSucceeded = FALSE; - } - EX_END_CATCH(RethrowTerminalExceptions); - if (!fSucceeded) - return -1; - } - else if (th.IsTypeDesc()) - { - // Non-array Typedescs - PTR_TypeDesc pTypeDesc = th.AsTypeDesc(); - if (pTypeDesc->HasTypeParam()) - { - fSucceeded = FALSE; - EX_TRY - { - pVal->rgTypeParameters.Append((ULONGLONG) pTypeDesc->GetTypeParam().AsTAddr()); - fSucceeded = TRUE; - } - EX_CATCH - { - fSucceeded = FALSE; - } - EX_END_CATCH(RethrowTerminalExceptions); - if (!fSucceeded) - return -1; - } - } - else - { - // Non-array MethodTable - - PTR_MethodTable pMT = th.AsMethodTable(); - - // Make CorElementType more specific if this is a string MT - if (pMT->IsString()) - { - pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_STRING; - } - else if (pMT->IsObjectClass()) - { - pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_OBJECT; - } - - // Generic arguments - DWORD cTypeParameters = pMT->GetNumGenericArgs(); - if (cTypeParameters > 0) - { - Instantiation inst = pMT->GetInstantiation(); - fSucceeded = FALSE; - EX_TRY - { - for (DWORD i=0; i < cTypeParameters; i++) - { - pVal->rgTypeParameters.Append((ULONGLONG) inst[i].AsTAddr()); - } - fSucceeded = TRUE; - } - EX_CATCH - { - fSucceeded = FALSE; - } - EX_END_CATCH(RethrowTerminalExceptions); - if (!fSucceeded) - return -1; - } - - if (pMT->HasFinalizer()) - { - pVal->fixedSizedData.Flags |= kEtwTypeFlagsFinalizable; - } - if (pMT->IsDelegate()) - { - pVal->fixedSizedData.Flags |= kEtwTypeFlagsDelegate; - } - if (pMT->IsComObjectType()) - { - pVal->fixedSizedData.Flags |= kEtwTypeFlagsExternallyImplementedCOMObject; - } - } - - // If the profiler wants it, construct a name. Always normalize the string (even if - // type names are not requested) so that calls to sName.GetCount() can't throw - EX_TRY - { - if (ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPANDTYPENAMES_KEYWORD)) - { - th.GetName(pVal->sName); - } - pVal->sName.Normalize(); - } - EX_CATCH - { - // If this failed, the name remains empty, which is ok; the event just - // won't have a name in it. - pVal->sName.Clear(); - } - EX_END_CATCH(RethrowTerminalExceptions); - - // Now that we know the full size of this type's data, see if it fits in our - // batch or whether we need to flush - - int cbVal = pVal->GetByteCountInEvent(); - if (cbVal > kMaxBytesTypeValues) - { - pVal->sName.Clear(); - cbVal = pVal->GetByteCountInEvent(); - - if (cbVal > kMaxBytesTypeValues) - { - // This type is apparently so huge, it's too big to squeeze into an event, even - // if it were the only type batched in the whole event. Bail - _ASSERTE(!"Type too big to log via ETW"); - return -1; - } - } - - if (m_nBulkTypeValueByteCount + cbVal > kMaxBytesTypeValues) - { - // Although this type fits into the array, its size is so big that the entire - // array can't be logged via ETW. So flush the array, and start over by - // calling ourselves--this refetches the type info and puts it at the - // beginning of the array. Since we know this type is small enough to be - // batched into an event on its own, this recursive call will not try to - // call itself again. - FireBulkTypeEvent(); - return LogSingleType(th); - } - - // The type fits into the batch, so update our state - m_nBulkTypeValueCount++; - m_nBulkTypeValueByteCount += cbVal; - return m_nBulkTypeValueCount - 1; // Index of type we just added -} - -//--------------------------------------------------------------------------------------- -// -// High-level method to batch a type and (recursively) its type parameters, flushing to -// ETW as needed. This is called by (static) -// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary, which is what clients use to log -// type events -// -// Arguments: -// * thAsAddr - Type to batch -// * typeLogBehavior - Reminder of whether the type system log lock is held -// (useful if we need to recursively call back into TypeSystemLog), and whether -// we even care to check if the type was already logged -// - -void BulkTypeEventLogger::LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - CAN_TAKE_LOCK; // LogSingleType can take locks - } - CONTRACTL_END; - - TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr); - - // Batch up this type. This grabs useful info about the type, including any - // type parameters it may have, and sticks it in m_rgBulkTypeValues - int iBulkTypeEventData = LogSingleType(th); - if (iBulkTypeEventData == -1) - { - // There was a failure trying to log the type, so don't bother with its type - // parameters - return; - } - - // Look at the type info we just batched, so we can get the type parameters - BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData]; - - // We're about to recursively call ourselves for the type parameters, so make a - // local copy of their type handles first (else, as we log them we could flush - // and clear out m_rgBulkTypeValues, thus trashing pVal) - - StackSArray rgTypeParameters; - DWORD cParams = pVal->rgTypeParameters.GetCount(); - - BOOL fSucceeded = FALSE; - EX_TRY - { - for (COUNT_T i = 0; i < cParams; i++) - { - rgTypeParameters.Append(pVal->rgTypeParameters[i]); - } - fSucceeded = TRUE; - } - EX_CATCH - { - fSucceeded = FALSE; - } - EX_END_CATCH(RethrowTerminalExceptions); - if (!fSucceeded) - return; - - // Before we recurse, adjust the special-cased type-log behavior that allows a - // top-level type to be logged without lookup, but still requires lookups to avoid - // dupes of type parameters - if (typeLogBehavior == ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLogTopLevelType) - typeLogBehavior = ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime; - - // Recursively log any referenced parameter types - for (COUNT_T i=0; i < cParams; i++) - { - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, rgTypeParameters[i], typeLogBehavior); - } -} - -#endif // FEATURE_NATIVEAOT - -// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap -// at the end of a collection. -class EtwGcHeapDumpContext -{ -public: - // An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of - // ProfilingScanContext and ProfilerWalkHeapContext, which are context structures - // that the GC heap walker sends back to the callbacks. This method is passed a - // pointer to ProfilingScanContext::pvEtwContext or - // ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new - // EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned. - // Callers should test for NULL, which can be returned if out of memory - static EtwGcHeapDumpContext * GetOrCreateInGCContext(LPVOID * ppvEtwContext) - { - LIMITED_METHOD_CONTRACT; - - _ASSERTE(ppvEtwContext != NULL); - - EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) *ppvEtwContext; - if (pContext == NULL) - { - pContext = new (nothrow) EtwGcHeapDumpContext; - *ppvEtwContext = pContext; - } - return pContext; - } - - EtwGcHeapDumpContext() : - iCurBulkRootEdge(0), - iCurBulkRootConditionalWeakTableElementEdge(0), - iCurBulkNodeEvent(0), - iCurBulkEdgeEvent(0), - bulkTypeEventLogger() - { - LIMITED_METHOD_CONTRACT; - ClearRootEdges(); - ClearRootConditionalWeakTableElementEdges(); - ClearNodes(); - ClearEdges(); - } - - // These helpers clear the individual buffers, for use after a flush and on - // construction. They intentionally leave the indices (iCur*) alone, since they - // persist across flushes within a GC - - void ClearRootEdges() - { - LIMITED_METHOD_CONTRACT; - cGcBulkRootEdges = 0; - ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges)); - } - - void ClearRootConditionalWeakTableElementEdges() - { - LIMITED_METHOD_CONTRACT; - cGCBulkRootConditionalWeakTableElementEdges = 0; - ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges)); - } - - void ClearNodes() - { - LIMITED_METHOD_CONTRACT; - cGcBulkNodeValues = 0; - ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues)); - } - - void ClearEdges() - { - LIMITED_METHOD_CONTRACT; - cGcBulkEdgeValues = 0; - ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues)); - } - - //--------------------------------------------------------------------------------------- - // GCBulkRootEdge - // - // A "root edge" is the relationship between a source "GCRootID" (i.e., stack - // variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed - // object that gets rooted). - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkRootEdge event - UINT iCurBulkRootEdge; - - // Number of root edges currently filled out in rgGcBulkRootEdges array - UINT cGcBulkRootEdges; - - // Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the root edge data) - EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)]; - - - //--------------------------------------------------------------------------------------- - // GCBulkRootConditionalWeakTableElementEdge - // - // These describe dependent handles, which simulate an edge connecting a key NodeID - // to a value NodeID. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkRootConditionalWeakTableElementEdge event - UINT iCurBulkRootConditionalWeakTableElementEdge; - - // Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array - UINT cGCBulkRootConditionalWeakTableElementEdges; - - // Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the root edge data) - EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges - [(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkNode - // - // A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses - // as well as leaf nodes. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkNode event - UINT iCurBulkNodeEvent; - - // Number of nodes currently filled out in rgGcBulkNodeValues array - UINT cGcBulkNodeValues; - - // Struct array containing the primary data for each GCBulkNode event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the node data) - EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)]; - - //--------------------------------------------------------------------------------------- - // GCBulkEdge - // - // An "edge" is the relationship between a source node and its referenced target - // node. Edges are reported in bulk, separately from Nodes, but it is expected that - // the consumer read the Node and Edge streams together. One takes the first node - // from the Node stream, and then reads EdgeCount entries in the Edge stream, telling - // you all of that Node's targets. Then, one takes the next node in the Node stream, - // and reads the next entries in the Edge stream (using this Node's EdgeCount to - // determine how many) to find all of its targets. This continues on until the Node - // and Edge streams have been fully read. - // - // GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events - // begin at the GCBulkRootEdge.RootedNodeAddress and move forward. - // - //--------------------------------------------------------------------------------------- - - // Sequence number for each GCBulkEdge event - UINT iCurBulkEdgeEvent; - - // Number of nodes currently filled out in rgGcBulkEdgeValues array - UINT cGcBulkEdgeValues; - - // Struct array containing the primary data for each GCBulkEdge event. Fix the size so - // the total event stays well below the 64K - // limit (leaving lots of room for non-struct fields that come before the edge data) - EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)]; - - - //--------------------------------------------------------------------------------------- - // BulkType - // - // Types are a bit more complicated to batch up, since their data is of varying - // size. BulkTypeEventLogger takes care of the pesky details for us - //--------------------------------------------------------------------------------------- - - BulkTypeEventLogger bulkTypeEventLogger; -}; - - - -//--------------------------------------------------------------------------------------- -// -// Called during a heap walk for each root reference encountered. Batches up the root in -// the ETW context -// -// Arguments: -// * pvHandle - If the root is a handle, this points to the handle -// * pRootedNode - Points to object that is rooted -// * pSecondaryNodeForDependentHandle - For dependent handles, this is the -// secondary object -// * fDependentHandle - nonzero iff this is for a dependent handle -// * profilingScanContext - The shared profapi/etw context built up during the heap walk. -// * dwGCFlags - Bitmask of "GC_"-style flags set by GC -// * rootFlags - Bitmask of EtwGCRootFlags describing the root -// - -// static -VOID ETW::GCLog::RootReference( - LPVOID pvHandle, - Object * pRootedNode, - Object * pSecondaryNodeForDependentHandle, - BOOL fDependentHandle, - ProfilingScanContext * profilingScanContext, - DWORD dwGCFlags, - DWORD rootFlags) -{ - LIMITED_METHOD_CONTRACT; - - EtwGcHeapDumpContext * pContext = - EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext); - if (pContext == NULL) - return; - - // Determine root kind, root ID, and handle-specific flags - LPVOID pvRootID = NULL; - BYTE nRootKind = (BYTE) profilingScanContext->dwEtwRootKind; - switch (nRootKind) - { - case kEtwGCRootKindStack: -#if !defined (FEATURE_NATIVEAOT) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE)) - pvRootID = profilingScanContext->pMD; -#endif // !defined (FEATURE_NATIVEAOT) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE)) - break; - - case kEtwGCRootKindHandle: - pvRootID = pvHandle; - break; - - case kEtwGCRootKindFinalizer: - _ASSERTE(pvRootID == NULL); - break; - - case kEtwGCRootKindOther: - default: - _ASSERTE(nRootKind == kEtwGCRootKindOther); - _ASSERTE(pvRootID == NULL); - break; - } - - // Convert GC root flags to ETW root flags - if (dwGCFlags & GC_CALL_INTERIOR) - rootFlags |= kEtwGCRootFlagsInterior; - if (dwGCFlags & GC_CALL_PINNED) - rootFlags |= kEtwGCRootFlagsPinning; - - // Add root edge to appropriate buffer - if (fDependentHandle) - { - _ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges < - ARRAY_SIZE(pContext->rgGCBulkRootConditionalWeakTableElementEdges)); - EventStructGCBulkRootConditionalWeakTableElementEdgeValue * pRCWTEEdgeValue = - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges]; - pRCWTEEdgeValue->GCKeyNodeID = pRootedNode; - pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle; - pRCWTEEdgeValue->GCRootID = pvRootID; - pContext->cGCBulkRootConditionalWeakTableElementEdges++; - - // If RCWTE edge buffer is now full, empty it into ETW - if (pContext->cGCBulkRootConditionalWeakTableElementEdges == - ARRAY_SIZE(pContext->rgGCBulkRootConditionalWeakTableElementEdges)) - { - FireEtwGCBulkRootConditionalWeakTableElementEdge( - pContext->iCurBulkRootConditionalWeakTableElementEdge, - pContext->cGCBulkRootConditionalWeakTableElementEdges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); - - pContext->iCurBulkRootConditionalWeakTableElementEdge++; - pContext->ClearRootConditionalWeakTableElementEdges(); - } - } - else - { - _ASSERTE(pContext->cGcBulkRootEdges < ARRAY_SIZE(pContext->rgGcBulkRootEdges)); - EventStructGCBulkRootEdgeValue * pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges]; - pBulkRootEdgeValue->RootedNodeAddress = pRootedNode; - pBulkRootEdgeValue->GCRootKind = nRootKind; - pBulkRootEdgeValue->GCRootFlag = rootFlags; - pBulkRootEdgeValue->GCRootID = pvRootID; - pContext->cGcBulkRootEdges++; - - // If root edge buffer is now full, empty it into ETW - if (pContext->cGcBulkRootEdges == ARRAY_SIZE(pContext->rgGcBulkRootEdges)) - { - FireEtwGCBulkRootEdge( - pContext->iCurBulkRootEdge, - pContext->cGcBulkRootEdges, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkRootEdges[0]), - &pContext->rgGcBulkRootEdges[0]); - - pContext->iCurBulkRootEdge++; - pContext->ClearRootEdges(); - } - } -} - -//--------------------------------------------------------------------------------------- -// -// Called during a heap walk for each object reference encountered. Batches up the -// corresponding node, edges, and type data for the ETW events. -// -// Arguments: -// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk. -// * pObjReferenceSource - Object doing the pointing -// * typeID - Type of pObjReferenceSource -// * fDependentHandle - nonzero iff this is for a dependent handle -// * cRefs - Count of objects being pointed to -// * rgObjReferenceTargets - Array of objects being pointed to -// - -// static -VOID ETW::GCLog::ObjectReference( - ProfilerWalkHeapContext * profilerWalkHeapContext, - Object * pObjReferenceSource, - ULONGLONG typeID, - ULONGLONG cRefs, - Object ** rgObjReferenceTargets) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - - // LogTypeAndParametersIfNecessary can take a lock - CAN_TAKE_LOCK; - } - CONTRACTL_END; - - EtwGcHeapDumpContext * pContext = - EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext); - if (pContext == NULL) - return; - - //--------------------------------------------------------------------------------------- - // GCBulkNode events - //--------------------------------------------------------------------------------------- - - // Add Node (pObjReferenceSource) to buffer - _ASSERTE(pContext->cGcBulkNodeValues < ARRAY_SIZE(pContext->rgGcBulkNodeValues)); - EventStructGCBulkNodeValue * pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues]; - pBulkNodeValue->Address = pObjReferenceSource; - pBulkNodeValue->Size = pObjReferenceSource->GetSize(); - pBulkNodeValue->TypeID = typeID; - pBulkNodeValue->EdgeCount = cRefs; - pContext->cGcBulkNodeValues++; - - // If Node buffer is now full, empty it into ETW - if (pContext->cGcBulkNodeValues == ARRAY_SIZE(pContext->rgGcBulkNodeValues)) - { - FireEtwGCBulkNode( - pContext->iCurBulkNodeEvent, - pContext->cGcBulkNodeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkNodeValues[0]), - &pContext->rgGcBulkNodeValues[0]); - - pContext->iCurBulkNodeEvent++; - pContext->ClearNodes(); - } - - //--------------------------------------------------------------------------------------- - // BulkType events - //--------------------------------------------------------------------------------------- - - // We send type information as necessary--only for nodes, and only for nodes that we - // haven't already sent type info for - if (typeID != 0) - { - ETW::TypeSystemLog::LogTypeAndParametersIfNecessary( - &pContext->bulkTypeEventLogger, // Batch up this type with others to minimize events - typeID, - - // During heap walk, GC holds the lock for us, so we can directly enter the - // hash to see if the type has already been logged - ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime - ); - } - - //--------------------------------------------------------------------------------------- - // GCBulkEdge events - //--------------------------------------------------------------------------------------- - - // Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges - // are added (it could even fill up multiple times during this one call if there are - // a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we - // need. - - for (ULONGLONG i=0; i < cRefs; i++) - { - _ASSERTE(pContext->cGcBulkEdgeValues < ARRAY_SIZE(pContext->rgGcBulkEdgeValues)); - EventStructGCBulkEdgeValue * pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues]; - pBulkEdgeValue->Value = rgObjReferenceTargets[i]; - // FUTURE: ReferencingFieldID - pBulkEdgeValue->ReferencingFieldID = 0; - pContext->cGcBulkEdgeValues++; - - // If Edge buffer is now full, empty it into ETW - if (pContext->cGcBulkEdgeValues == ARRAY_SIZE(pContext->rgGcBulkEdgeValues)) - { - FireEtwGCBulkEdge( - pContext->iCurBulkEdgeEvent, - pContext->cGcBulkEdgeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkEdgeValues[0]), - &pContext->rgGcBulkEdgeValues[0]); - - pContext->iCurBulkEdgeEvent++; - pContext->ClearEdges(); - } - } -} - -//--------------------------------------------------------------------------------------- -// -// Called by GC at end of heap dump to give us a convenient time to flush any remaining -// buffers of data to ETW -// -// Arguments: -// profilerWalkHeapContext - Context containing data we've batched up -// - -// static -VOID ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext) -{ - LIMITED_METHOD_CONTRACT; - - // If context isn't already set up for us, then we haven't been collecting any data - // for ETW events. - EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) profilerWalkHeapContext->pvEtwContext; - if (pContext == NULL) - return; - - // If the GC events are enabled, flush any remaining root, node, and / or edge data - if (s_forcedGCInProgress && - ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_GCHEAPDUMP_KEYWORD)) - { - if (pContext->cGcBulkRootEdges > 0) - { - FireEtwGCBulkRootEdge( - pContext->iCurBulkRootEdge, - pContext->cGcBulkRootEdges, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkRootEdges[0]), - &pContext->rgGcBulkRootEdges[0]); - } - - if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0) - { - FireEtwGCBulkRootConditionalWeakTableElementEdge( - pContext->iCurBulkRootConditionalWeakTableElementEdge, - pContext->cGCBulkRootConditionalWeakTableElementEdges, - GetClrInstanceId(), - sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), - &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); - } - - if (pContext->cGcBulkNodeValues > 0) - { - FireEtwGCBulkNode( - pContext->iCurBulkNodeEvent, - pContext->cGcBulkNodeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkNodeValues[0]), - &pContext->rgGcBulkNodeValues[0]); - } - - if (pContext->cGcBulkEdgeValues > 0) - { - FireEtwGCBulkEdge( - pContext->iCurBulkEdgeEvent, - pContext->cGcBulkEdgeValues, - GetClrInstanceId(), - sizeof(pContext->rgGcBulkEdgeValues[0]), - &pContext->rgGcBulkEdgeValues[0]); - } - } - - // Ditto for type events - if (ETW_TRACING_CATEGORY_ENABLED( - MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, - TRACE_LEVEL_INFORMATION, - CLR_TYPE_KEYWORD)) - { - pContext->bulkTypeEventLogger.FireBulkTypeEvent(); - } - - // Delete any GC state built up in the context - profilerWalkHeapContext->pvEtwContext = NULL; - delete pContext; -} //--------------------------------------------------------------------------------------- diff --git a/src/coreclr/vm/eventtrace_bulktype.cpp b/src/coreclr/vm/eventtrace_bulktype.cpp new file mode 100644 index 00000000000000..e44850a80fd314 --- /dev/null +++ b/src/coreclr/vm/eventtrace_bulktype.cpp @@ -0,0 +1,1007 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include "common.h" + +#include "eventtrace.h" +#include "winbase.h" +#include "contract.h" +#include "ex.h" +#include "dbginterface.h" +#include "finalizerthread.h" +#include "clrversion.h" +#include "typestring.h" + +#ifdef FEATURE_COMINTEROP +#include "comcallablewrapper.h" +#include "runtimecallablewrapper.h" +#endif + +#include "eventtracepriv.h" + +//--------------------------------------------------------------------------------------- +// BulkStaticsLogger: Batches up and logs static variable roots +//--------------------------------------------------------------------------------------- + +BulkComLogger::BulkComLogger(BulkTypeEventLogger *typeLogger) + : m_currRcw(0), m_currCcw(0), m_typeLogger(typeLogger), m_etwRcwData(0), m_etwCcwData(0), m_enumResult(0) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + m_etwRcwData = new EventRCWEntry[kMaxRcwCount]; + m_etwCcwData = new EventCCWEntry[kMaxCcwCount]; +} + +BulkComLogger::~BulkComLogger() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + FireBulkComEvent(); + + if (m_etwRcwData) + delete [] m_etwRcwData; + + if (m_etwCcwData) + delete [] m_etwCcwData; + + if (m_enumResult) + { + CCWEnumerationEntry *curr = m_enumResult; + while (curr) + { + CCWEnumerationEntry *next = curr->Next; + delete curr; + curr = next; + } + } +} + +void BulkComLogger::FireBulkComEvent() +{ + WRAPPER_NO_CONTRACT; + + FlushRcw(); + FlushCcw(); +} + +void BulkComLogger::WriteRcw(RCW *pRcw, Object *obj) +{ + CONTRACTL + { + THROWS; + GC_TRIGGERS; + MODE_ANY; + PRECONDITION(pRcw != NULL); + PRECONDITION(obj != NULL); + } + CONTRACTL_END; + + _ASSERTE(m_currRcw < kMaxRcwCount); + +#ifdef FEATURE_COMINTEROP + TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); + if (typeHandle == NULL) + { + return; + } + EventRCWEntry &rcw = m_etwRcwData[m_currRcw]; + rcw.ObjectID = (ULONGLONG)obj; + rcw.TypeID = (ULONGLONG)typeHandle.AsTAddr(); + rcw.IUnk = (ULONGLONG)pRcw->GetIUnknown_NoAddRef(); + rcw.VTable = (ULONGLONG)pRcw->GetVTablePtr(); + rcw.RefCount = pRcw->GetRefCount(); + rcw.Flags = 0; + + if (++m_currRcw >= kMaxRcwCount) + FlushRcw(); +#endif +} + +void BulkComLogger::FlushRcw() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + _ASSERTE(m_currRcw <= kMaxRcwCount); + + if (m_currRcw == 0) + return; + + if (m_typeLogger) + { + for (int i = 0; i < m_currRcw; ++i) + ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwRcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); + } + + unsigned short instance = GetClrInstanceId(); + +#if !defined(HOST_UNIX) + EVENT_DATA_DESCRIPTOR eventData[3]; + EventDataDescCreate(&eventData[0], &m_currRcw, sizeof(const unsigned int)); + EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); + EventDataDescCreate(&eventData[2], m_etwRcwData, sizeof(EventRCWEntry) * m_currRcw); + + ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, ARRAY_SIZE(eventData), eventData); +#else + ULONG result = FireEtXplatGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); +#endif // !defined(HOST_UNIX) + result |= EventPipeWriteEventGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData); + + _ASSERTE(result == ERROR_SUCCESS); + + m_currRcw = 0; +} + +void BulkComLogger::WriteCcw(ComCallWrapper *pCcw, Object **handle, Object *obj) +{ + CONTRACTL + { + THROWS; + GC_TRIGGERS; + MODE_ANY; + PRECONDITION(handle != NULL); + PRECONDITION(obj != NULL); + } + CONTRACTL_END; + + _ASSERTE(m_currCcw < kMaxCcwCount); + +#ifdef FEATURE_COMINTEROP + IUnknown *iUnk = NULL; + int refCount = 0; + ULONG flags = 0; + + if (pCcw) + { + iUnk = pCcw->GetOuter(); + if (iUnk == NULL) + iUnk = pCcw->GetBasicIP(true); + + refCount = pCcw->GetRefCount(); + + if (pCcw->IsWrapperActive()) + flags |= EventCCWEntry::Strong; + } + + TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); + if (typeHandle == NULL) + { + return; + } + + EventCCWEntry &ccw = m_etwCcwData[m_currCcw++]; + ccw.RootID = (ULONGLONG)handle; + ccw.ObjectID = (ULONGLONG)obj; + ccw.TypeID = (ULONGLONG)typeHandle.AsTAddr(); + ccw.IUnk = (ULONGLONG)iUnk; + ccw.RefCount = refCount; + ccw.JupiterRefCount = 0; + ccw.Flags = flags; + + if (m_currCcw >= kMaxCcwCount) + FlushCcw(); +#endif +} + +void BulkComLogger::FlushCcw() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + _ASSERTE(m_currCcw <= kMaxCcwCount); + + if (m_currCcw == 0) + return; + + if (m_typeLogger) + { + for (int i = 0; i < m_currCcw; ++i) + ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwCcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); + } + + unsigned short instance = GetClrInstanceId(); + +#if !defined(HOST_UNIX) + EVENT_DATA_DESCRIPTOR eventData[3]; + EventDataDescCreate(&eventData[0], &m_currCcw, sizeof(const unsigned int)); + EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short)); + EventDataDescCreate(&eventData[2], m_etwCcwData, sizeof(EventCCWEntry) * m_currCcw); + + ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, ARRAY_SIZE(eventData), eventData); +#else + ULONG result = FireEtXplatGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); +#endif //!defined(HOST_UNIX) + result |= EventPipeWriteEventGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData); + + _ASSERTE(result == ERROR_SUCCESS); + + m_currCcw = 0; +} + +void BulkComLogger::LogAllComObjects() +{ + CONTRACTL + { + THROWS; + GC_TRIGGERS; + MODE_ANY; + } + CONTRACTL_END; + +#ifdef FEATURE_COMINTEROP + SyncBlockCache *cache = SyncBlockCache::GetSyncBlockCache(); + if (cache == NULL) + return; + + int count = cache->GetTableEntryCount(); + SyncTableEntry *table = SyncTableEntry::GetSyncTableEntry(); + + for (int i = 0; i < count; ++i) + { + SyncTableEntry &entry = table[i]; + Object *obj = entry.m_Object.Load(); + if (obj && entry.m_SyncBlock) + { + InteropSyncBlockInfo *interop = entry.m_SyncBlock->GetInteropInfoNoCreate(); + if (interop) + { + RCW *rcw = interop->GetRawRCW(); + if (rcw) + WriteRcw(rcw, obj); + } + } + } + + // We need to do work in HandleWalkCallback which may trigger a GC. We cannot do this while + // enumerating the handle table. Instead, we will build a list of RefCount handles we found + // during the handle table enumeration first (m_enumResult) during this enumeration: + GCHandleUtilities::GetGCHandleManager()->TraceRefCountedHandles(BulkComLogger::HandleWalkCallback, uintptr_t(this), 0); + + // Now that we have all of the object handles, we will walk all of the handles and write the + // etw events. + for (CCWEnumerationEntry *curr = m_enumResult; curr; curr = curr->Next) + { + for (int i = 0; i < curr->Count; ++i) + { + Object **handle = curr->Handles[i]; + + Object *obj = NULL; + if (handle == NULL || (obj = *handle) == 0) + return; + + ObjHeader *header = obj->GetHeader(); + _ASSERTE(header != NULL); + + // We can catch the refcount handle too early where we don't have a CCW, WriteCCW + // handles this case. We still report the refcount handle without the CCW data. + ComCallWrapper *ccw = NULL; + + // Checking the index ensures that the syncblock is already created. The + // PassiveGetSyncBlock function does not check bounds, so we have to be sure + // the SyncBlock was already created. + int index = header->GetHeaderSyncBlockIndex(); + if (index > 0) + { + SyncBlock *syncBlk = header->PassiveGetSyncBlock(); + InteropSyncBlockInfo *interop = syncBlk->GetInteropInfoNoCreate(); + if (interop) + ccw = interop->GetCCW(); + } + + WriteCcw(ccw, handle, obj); + } + } + +#endif + +} + +void BulkComLogger::HandleWalkCallback(Object **handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + PRECONDITION(param1 != NULL); // Should be the "this" pointer for BulkComLogger. + PRECONDITION(param2 == 0); // This is set by Ref_TraceRefCountHandles. + } + CONTRACTL_END; + + // Simple sanity check to ensure the parameters are what we expect them to be. + _ASSERTE(param2 == 0); + + if (handle != NULL) + ((BulkComLogger*)param1)->AddCcwHandle(handle); +} + + + +// Used during CCW enumeration to keep track of all object handles which point to a CCW. +void BulkComLogger::AddCcwHandle(Object **handle) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + PRECONDITION(handle != NULL); + } + CONTRACTL_END; + + if (m_enumResult == NULL) + m_enumResult = new CCWEnumerationEntry; + + CCWEnumerationEntry *curr = m_enumResult; + while (curr->Next) + curr = curr->Next; + + if (curr->Count == ARRAY_SIZE(curr->Handles)) + { + curr->Next = new CCWEnumerationEntry; + curr = curr->Next; + } + + curr->Handles[curr->Count++] = handle; +} + + + + +//--------------------------------------------------------------------------------------- +// BulkStaticsLogger: Batches up and logs static variable roots +//--------------------------------------------------------------------------------------- + + + +#include "domainassembly.h" + +BulkStaticsLogger::BulkStaticsLogger(BulkTypeEventLogger *typeLogger) + : m_buffer(0), m_used(0), m_count(0), m_domain(0), m_typeLogger(typeLogger) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + m_buffer = new BYTE[kMaxBytesValues]; +} + +BulkStaticsLogger::~BulkStaticsLogger() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + if (m_used > 0) + FireBulkStaticsEvent(); + + if (m_buffer) + delete[] m_buffer; +} + +void BulkStaticsLogger::FireBulkStaticsEvent() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + if (m_used <= 0 || m_count <= 0) + return; + + _ASSERTE(m_domain != NULL); + + unsigned short instance = GetClrInstanceId(); + unsigned __int64 appDomain = (unsigned __int64)m_domain; + +#if !defined(HOST_UNIX) + EVENT_DATA_DESCRIPTOR eventData[4]; + EventDataDescCreate(&eventData[0], &m_count, sizeof(const unsigned int) ); + EventDataDescCreate(&eventData[1], &appDomain, sizeof(unsigned __int64) ); + EventDataDescCreate(&eventData[2], &instance, sizeof(const unsigned short) ); + EventDataDescCreate(&eventData[3], m_buffer, m_used); + + ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootStaticVar, ARRAY_SIZE(eventData), eventData); +#else + ULONG result = FireEtXplatGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); +#endif //!defined(HOST_UNIX) + result |= EventPipeWriteEventGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer); + + _ASSERTE(result == ERROR_SUCCESS); + + m_used = 0; + m_count = 0; +} + +void BulkStaticsLogger::WriteEntry(AppDomain *domain, Object **address, Object *obj, FieldDesc *fieldDesc) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + PRECONDITION(domain != NULL); + PRECONDITION(address != NULL); + PRECONDITION(obj != NULL); + PRECONDITION(fieldDesc != NULL); + } + CONTRACTL_END; + + // Each bulk statics event is for one AppDomain. If we are now inspecting a new domain, + // we need to flush the built up events now. + if (m_domain != domain) + { + if (m_domain != NULL) + FireBulkStaticsEvent(); + + m_domain = domain; + } + + TypeHandle typeHandle = obj->GetGCSafeTypeHandleIfPossible(); + if (typeHandle == NULL) + { + return; + } + ULONGLONG th = (ULONGLONG)typeHandle.AsTAddr(); + ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, th, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime); + + // We should have at least 512 characters remaining in the buffer here. + int remaining = kMaxBytesValues - m_used; + _ASSERTE(kMaxBytesValues - m_used > 512); + + int len = EventStaticEntry::WriteEntry(m_buffer + m_used, remaining, (ULONGLONG)address, + (ULONGLONG)obj, th, 0, fieldDesc); + + // 512 bytes was not enough buffer? This shouldn't happen, so we'll skip emitting the + // event on error. + if (len > 0) + { + m_used += len; + m_count++; + } + + // When we are close to running out of buffer, emit the event. + if (kMaxBytesValues - m_used < 512) + FireBulkStaticsEvent(); +} + +void BulkStaticsLogger::LogAllStatics() +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + { + AppDomain *domain = ::GetAppDomain(); // There is only 1 AppDomain, so no iterator here. + + AppDomain::AssemblyIterator assemblyIter = domain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded|kIncludeExecution)); + CollectibleAssemblyHolder pDomainAssembly; + while (assemblyIter.Next(pDomainAssembly.This())) + { + // Make sure the assembly is loaded. + if (!pDomainAssembly->IsLoaded()) + continue; + + CollectibleAssemblyHolder pAssembly = pDomainAssembly->GetAssembly(); + // Get the domain module from the module/appdomain pair. + Module *module = pDomainAssembly->GetModule(); + if (module == NULL) + continue; + + DomainAssembly *domainAssembly = module->GetDomainAssembly(); + if (domainAssembly == NULL) + continue; + + // Ensure the module has fully loaded. + if (!domainAssembly->IsActive()) + continue; + + DomainLocalModule *domainModule = module->GetDomainLocalModule(); + if (domainModule == NULL) + continue; + + // Now iterate all types with + LookupMap::Iterator mtIter = module->EnumerateTypeDefs(); + while (mtIter.Next()) + { + // I don't think mt can be null here, but the dac does a null check... + // IsFullyLoaded should be equivalent to 'GetLoadLevel() == CLASS_LOADED' + MethodTable *mt = mtIter.GetElement(); + if (mt == NULL || !mt->IsFullyLoaded()) + continue; + + EEClass *cls = mt->GetClass(); + _ASSERTE(cls != NULL); + + if (cls->GetNumStaticFields() <= 0) + continue; + + ApproxFieldDescIterator fieldIter(mt, ApproxFieldDescIterator::STATIC_FIELDS); + for (FieldDesc *field = fieldIter.Next(); field != NULL; field = fieldIter.Next()) + { + // Don't want thread local + _ASSERTE(field->IsStatic()); + if (field->IsSpecialStatic() || field->IsEnCNew()) + continue; + + // Static valuetype values are boxed. + CorElementType fieldType = field->GetFieldType(); + if (fieldType != ELEMENT_TYPE_CLASS && fieldType != ELEMENT_TYPE_VALUETYPE) + continue; + + BYTE *base = field->GetBaseInDomainLocalModule(domainModule); + if (base == NULL) + continue; + + Object **address = (Object**)field->GetStaticAddressHandle(base); + Object *obj = NULL; + if (address == NULL || ((obj = *address) == NULL)) + continue; + + WriteEntry(domain, address, *address, field); + } // foreach static field + } + } // foreach domain assembly + } // foreach AppDomain +} // BulkStaticsLogger::LogAllStatics + + +//--------------------------------------------------------------------------------------- +// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can +// be logged via ETW in bulk +//--------------------------------------------------------------------------------------- + +BulkTypeValue::BulkTypeValue() + : cTypeParameters(0) +#ifdef FEATURE_NATIVEAOT + , ullSingleTypeParameter(0) +#else // FEATURE_NATIVEAOT + , sName() +#endif // FEATURE_NATIVEAOT + , rgTypeParameters() +{ + LIMITED_METHOD_CONTRACT; + ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); +} + +//--------------------------------------------------------------------------------------- +// +// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW +// + +void BulkTypeValue::Clear() +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + ZeroMemory(&fixedSizedData, sizeof(fixedSizedData)); + cTypeParameters = 0; +#ifdef FEATURE_NATIVEAOT + ullSingleTypeParameter = 0; + rgTypeParameters.Release(); +#else // FEATURE_NATIVEAOT + sName.Clear(); + rgTypeParameters.Clear(); +#endif // FEATURE_NATIVEAOT +} + +//--------------------------------------------------------------------------------------- +// +// Fire an ETW event for all the types we batched so far, and then reset our state +// so we can start batching new types at the beginning of the array. +// + +void BulkTypeEventLogger::FireBulkTypeEvent() +{ + LIMITED_METHOD_CONTRACT; + + if (m_nBulkTypeValueCount == 0) + { + // No types were batched up, so nothing to send + return; + } + UINT16 nClrInstanceID = GetClrInstanceId(); + + if(m_pBulkTypeEventBuffer == NULL) + { + // The buffer could not be allocated when this object was created, so bail. + return; + } + + UINT iSize = 0; + + for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++) + { + BulkTypeValue& target = m_rgBulkTypeValues[iTypeData]; + + // Do fixed-size data as one bulk copy + memcpy( + m_pBulkTypeEventBuffer + iSize, + &(target.fixedSizedData), + sizeof(target.fixedSizedData)); + iSize += sizeof(target.fixedSizedData); + + // Do var-sized data individually per field + + LPCWSTR wszName = target.sName.GetUnicode(); + if (wszName == NULL) + { + m_pBulkTypeEventBuffer[iSize++] = 0; + m_pBulkTypeEventBuffer[iSize++] = 0; + } + else + { + UINT nameSize = (target.sName.GetCount() + 1) * sizeof(WCHAR); + memcpy(m_pBulkTypeEventBuffer + iSize, wszName, nameSize); + iSize += nameSize; + } + + // Type parameter count + ULONG params = target.rgTypeParameters.GetCount(); + + ULONG *ptrInt = (ULONG*)(m_pBulkTypeEventBuffer + iSize); + *ptrInt = params; + iSize += 4; + + target.cTypeParameters = params; + + // Type parameter array + if (target.cTypeParameters > 0) + { + memcpy(m_pBulkTypeEventBuffer + iSize, target.rgTypeParameters.GetElements(), sizeof(ULONGLONG) * target.cTypeParameters); + iSize += sizeof(ULONGLONG) * target.cTypeParameters; + } + } + + FireEtwBulkType(m_nBulkTypeValueCount, GetClrInstanceId(), iSize, m_pBulkTypeEventBuffer); + + // Reset state + m_nBulkTypeValueCount = 0; + m_nBulkTypeValueByteCount = 0; +} + +#ifndef FEATURE_NATIVEAOT + +//--------------------------------------------------------------------------------------- +// +// Batches a single type into the array, flushing the array to ETW if it fills up. Most +// interaction with the type system (to analyze the type) is done here. This does not +// recursively batch up any parameter types (for arrays or generics), but does add their +// TypeHandles to the rgTypeParameters array. LogTypeAndParameters is responsible for +// initiating any recursive calls to deal with type parameters. +// +// Arguments: +// th - TypeHandle to batch +// +// Return Value: +// Index into array of where this type got batched. -1 if there was a failure. +// + +int BulkTypeEventLogger::LogSingleType(TypeHandle th) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; // some of the type system stuff can take locks + } + CONTRACTL_END; + + // If there's no room for another type, flush what we've got + if (m_nBulkTypeValueCount == ARRAY_SIZE(m_rgBulkTypeValues)) + { + FireBulkTypeEvent(); + } + + _ASSERTE(m_nBulkTypeValueCount < (int)ARRAY_SIZE(m_rgBulkTypeValues)); + + BulkTypeValue * pVal = &m_rgBulkTypeValues[m_nBulkTypeValueCount]; + + // Clear out pVal before filling it out (array elements can get reused if there + // are enough types that we need to flush to multiple events). Clearing the + // contained SBuffer can throw, so deal with exceptions + BOOL fSucceeded = FALSE; + EX_TRY + { + pVal->Clear(); + fSucceeded = TRUE; + } + EX_CATCH + { + fSucceeded = FALSE; + } + EX_END_CATCH(RethrowTerminalExceptions); + if (!fSucceeded) + return -1; + + pVal->fixedSizedData.TypeID = (ULONGLONG) th.AsTAddr(); + pVal->fixedSizedData.ModuleID = (ULONGLONG) (TADDR) th.GetModule(); + pVal->fixedSizedData.TypeNameID = (th.GetMethodTable() == NULL) ? 0 : th.GetCl(); + pVal->fixedSizedData.Flags = 0; + pVal->fixedSizedData.CorElementType = (BYTE) th.GetInternalCorElementType(); + + if (th.IsArray()) + { + // Normal typedesc array + pVal->fixedSizedData.Flags |= kEtwTypeFlagsArray; + if (pVal->fixedSizedData.CorElementType == ELEMENT_TYPE_ARRAY) + { + // Multidimensional arrays set the rank bits, SzArrays do not set the rank bits + unsigned rank = th.GetRank(); + if (rank < kEtwTypeFlagsArrayRankMax) + { + // Only ranks less than kEtwTypeFlagsArrayRankMax are supported. + // Fortunately kEtwTypeFlagsArrayRankMax should be greater than the + // number of ranks the type loader will support + rank <<= kEtwTypeFlagsArrayRankShift; + _ASSERTE((rank & kEtwTypeFlagsArrayRankMask) == rank); + pVal->fixedSizedData.Flags |= rank; + } + } + // Fetch TypeHandle of array elements + fSucceeded = FALSE; + EX_TRY + { + pVal->rgTypeParameters.Append((ULONGLONG) th.GetArrayElementTypeHandle().AsTAddr()); + fSucceeded = TRUE; + } + EX_CATCH + { + fSucceeded = FALSE; + } + EX_END_CATCH(RethrowTerminalExceptions); + if (!fSucceeded) + return -1; + } + else if (th.IsTypeDesc()) + { + // Non-array Typedescs + PTR_TypeDesc pTypeDesc = th.AsTypeDesc(); + if (pTypeDesc->HasTypeParam()) + { + fSucceeded = FALSE; + EX_TRY + { + pVal->rgTypeParameters.Append((ULONGLONG) pTypeDesc->GetTypeParam().AsTAddr()); + fSucceeded = TRUE; + } + EX_CATCH + { + fSucceeded = FALSE; + } + EX_END_CATCH(RethrowTerminalExceptions); + if (!fSucceeded) + return -1; + } + } + else + { + // Non-array MethodTable + + PTR_MethodTable pMT = th.AsMethodTable(); + + // Make CorElementType more specific if this is a string MT + if (pMT->IsString()) + { + pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_STRING; + } + else if (pMT->IsObjectClass()) + { + pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_OBJECT; + } + + // Generic arguments + DWORD cTypeParameters = pMT->GetNumGenericArgs(); + if (cTypeParameters > 0) + { + Instantiation inst = pMT->GetInstantiation(); + fSucceeded = FALSE; + EX_TRY + { + for (DWORD i=0; i < cTypeParameters; i++) + { + pVal->rgTypeParameters.Append((ULONGLONG) inst[i].AsTAddr()); + } + fSucceeded = TRUE; + } + EX_CATCH + { + fSucceeded = FALSE; + } + EX_END_CATCH(RethrowTerminalExceptions); + if (!fSucceeded) + return -1; + } + + if (pMT->HasFinalizer()) + { + pVal->fixedSizedData.Flags |= kEtwTypeFlagsFinalizable; + } + if (pMT->IsDelegate()) + { + pVal->fixedSizedData.Flags |= kEtwTypeFlagsDelegate; + } + if (pMT->IsComObjectType()) + { + pVal->fixedSizedData.Flags |= kEtwTypeFlagsExternallyImplementedCOMObject; + } + } + + // If the profiler wants it, construct a name. Always normalize the string (even if + // type names are not requested) so that calls to sName.GetCount() can't throw + EX_TRY + { + if (ETW_TRACING_CATEGORY_ENABLED( + MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPANDTYPENAMES_KEYWORD)) + { + th.GetName(pVal->sName); + } + pVal->sName.Normalize(); + } + EX_CATCH + { + // If this failed, the name remains empty, which is ok; the event just + // won't have a name in it. + pVal->sName.Clear(); + } + EX_END_CATCH(RethrowTerminalExceptions); + + // Now that we know the full size of this type's data, see if it fits in our + // batch or whether we need to flush + + int cbVal = pVal->GetByteCountInEvent(); + if (cbVal > kMaxBytesTypeValues) + { + pVal->sName.Clear(); + cbVal = pVal->GetByteCountInEvent(); + + if (cbVal > kMaxBytesTypeValues) + { + // This type is apparently so huge, it's too big to squeeze into an event, even + // if it were the only type batched in the whole event. Bail + _ASSERTE(!"Type too big to log via ETW"); + return -1; + } + } + + if (m_nBulkTypeValueByteCount + cbVal > kMaxBytesTypeValues) + { + // Although this type fits into the array, its size is so big that the entire + // array can't be logged via ETW. So flush the array, and start over by + // calling ourselves--this refetches the type info and puts it at the + // beginning of the array. Since we know this type is small enough to be + // batched into an event on its own, this recursive call will not try to + // call itself again. + FireBulkTypeEvent(); + return LogSingleType(th); + } + + // The type fits into the batch, so update our state + m_nBulkTypeValueCount++; + m_nBulkTypeValueByteCount += cbVal; + return m_nBulkTypeValueCount - 1; // Index of type we just added +} + +//--------------------------------------------------------------------------------------- +// +// High-level method to batch a type and (recursively) its type parameters, flushing to +// ETW as needed. This is called by (static) +// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary, which is what clients use to log +// type events +// +// Arguments: +// * thAsAddr - Type to batch +// * typeLogBehavior - Reminder of whether the type system log lock is held +// (useful if we need to recursively call back into TypeSystemLog), and whether +// we even care to check if the type was already logged +// + +void BulkTypeEventLogger::LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; // LogSingleType can take locks + } + CONTRACTL_END; + + TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr); + + // Batch up this type. This grabs useful info about the type, including any + // type parameters it may have, and sticks it in m_rgBulkTypeValues + int iBulkTypeEventData = LogSingleType(th); + if (iBulkTypeEventData == -1) + { + // There was a failure trying to log the type, so don't bother with its type + // parameters + return; + } + + // Look at the type info we just batched, so we can get the type parameters + BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData]; + + // We're about to recursively call ourselves for the type parameters, so make a + // local copy of their type handles first (else, as we log them we could flush + // and clear out m_rgBulkTypeValues, thus trashing pVal) + + StackSArray rgTypeParameters; + DWORD cParams = pVal->rgTypeParameters.GetCount(); + + BOOL fSucceeded = FALSE; + EX_TRY + { + for (COUNT_T i = 0; i < cParams; i++) + { + rgTypeParameters.Append(pVal->rgTypeParameters[i]); + } + fSucceeded = TRUE; + } + EX_CATCH + { + fSucceeded = FALSE; + } + EX_END_CATCH(RethrowTerminalExceptions); + if (!fSucceeded) + return; + + // Before we recurse, adjust the special-cased type-log behavior that allows a + // top-level type to be logged without lookup, but still requires lookups to avoid + // dupes of type parameters + if (typeLogBehavior == ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLogTopLevelType) + typeLogBehavior = ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime; + + // Recursively log any referenced parameter types + for (COUNT_T i=0; i < cParams; i++) + { + ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, rgTypeParameters[i], typeLogBehavior); + } +} + +#endif // FEATURE_NATIVEAOT diff --git a/src/coreclr/vm/eventtrace_gcheap.cpp b/src/coreclr/vm/eventtrace_gcheap.cpp new file mode 100644 index 00000000000000..50dfcea6e434a9 --- /dev/null +++ b/src/coreclr/vm/eventtrace_gcheap.cpp @@ -0,0 +1,1032 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include "common.h" + +#include "eventtrace.h" +#include "winbase.h" +#include "contract.h" +#include "ex.h" +#include "dbginterface.h" +#include "finalizerthread.h" +#include "clrversion.h" +#include "typestring.h" + +#include "eventtracepriv.h" + +//--------------------------------------------------------------------------------------- +// Code for sending GC heap object events is generally the same for both FEATURE_NATIVEAOT +// and !FEATURE_NATIVEAOT builds +//--------------------------------------------------------------------------------------- + +bool s_forcedGCInProgress = false; +class ForcedGCHolder +{ +public: + ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = true; } + ~ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = false; } +}; + + +// Simple helpers called by the GC to decide whether it needs to do a walk of heap +// objects and / or roots. + +BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw() +{ + LIMITED_METHOD_CONTRACT; + return s_forcedGCInProgress && + ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD); +} + +BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw() +{ + LIMITED_METHOD_CONTRACT; + return s_forcedGCInProgress && + ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD); +} + +BOOL ETW::GCLog::ShouldTrackMovementForEtw() +{ + LIMITED_METHOD_CONTRACT; + return ETW_TRACING_CATEGORY_ENABLED( + MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD); +} + +BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw() +{ + LIMITED_METHOD_CONTRACT; + + return s_forcedGCInProgress && + ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD); +} + +// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges / +// GCBulkSurvivingObjectRanges events +struct EtwGcMovementContext +{ +public: + // An instance of EtwGcMovementContext is dynamically allocated and stored + // inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically + // allocated and pointed to by a profiling_context pointer created by the GC on the stack. + // This is used to batch and send GCBulkSurvivingObjectRanges events and + // GCBulkMovedObjectRanges events. This method is passed a pointer to + // MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned; + // else, a new EtwGcMovementContext is allocated, stored in that pointer, and + // then returned. Callers should test for NULL, which can be returned if out of + // memory + static EtwGcMovementContext* GetOrCreateInGCContext(EtwGcMovementContext** ppContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(ppContext != NULL); + + EtwGcMovementContext* pContext = *ppContext; + if (pContext == NULL) + { + pContext = new (nothrow) EtwGcMovementContext; + *ppContext = pContext; + } + return pContext; + } + + EtwGcMovementContext() : + iCurBulkSurvivingObjectRanges(0), + iCurBulkMovedObjectRanges(0) + { + LIMITED_METHOD_CONTRACT; + Clear(); + } + + // Resets structure for reuse on construction, and after each flush. + // (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.) + void Clear() + { + LIMITED_METHOD_CONTRACT; + cBulkSurvivingObjectRanges = 0; + cBulkMovedObjectRanges = 0; + ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges)); + ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges)); + } + + //--------------------------------------------------------------------------------------- + // GCBulkSurvivingObjectRanges + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkSurvivingObjectRanges event + UINT iCurBulkSurvivingObjectRanges; + + // Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array + UINT cBulkSurvivingObjectRanges; + + // Struct array containing the primary data for each GCBulkSurvivingObjectRanges + // event. Fix the size so the total event stays well below the 64K limit (leaving + // lots of room for non-struct fields that come before the values data) + EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[ + (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkMovedObjectRanges + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkMovedObjectRanges event + UINT iCurBulkMovedObjectRanges; + + // Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array + UINT cBulkMovedObjectRanges; + + // Struct array containing the primary data for each GCBulkMovedObjectRanges + // event. Fix the size so the total event stays well below the 64K limit (leaving + // lots of room for non-struct fields that come before the values data) + EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[ + (cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)]; +}; + +// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling +// API to track its own information. +struct MovedReferenceContextForEtwAndProfapi +{ + // An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and + // pointed to by a profiling_context pointer created by the GC on the stack. This is used to + // batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges + // events and the corresponding callbacks for profapi profilers. This method is + // passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets + // returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored + // in that pointer, and then returned. Callers should test for NULL, which can be + // returned if out of memory + static MovedReferenceContextForEtwAndProfapi* CreateInGCContext(LPVOID pvContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(pvContext != NULL); + + MovedReferenceContextForEtwAndProfapi* pContext = *(MovedReferenceContextForEtwAndProfapi**)pvContext; + + // Shouldn't be called if the context was already created. Perhaps someone made + // one too many BeginMovedReferences calls, or didn't have an EndMovedReferences + // in between? + _ASSERTE(pContext == NULL); + + pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi; + *(MovedReferenceContextForEtwAndProfapi**)pvContext = pContext; + + return pContext; + } + + + MovedReferenceContextForEtwAndProfapi() : + pctxProfAPI(NULL), + pctxEtw(NULL) + + { + LIMITED_METHOD_CONTRACT; + } + + LPVOID pctxProfAPI; + EtwGcMovementContext* pctxEtw; +}; + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC for each moved or surviving reference that it encounters. This +// batches the info into our context's buffer, and flushes that buffer to ETW as it fills +// up. +// +// Arguments: +// * pbMemBlockStart - Start of moved/surviving block +// * pbMemBlockEnd - Next pointer after end of moved/surviving block +// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving +// references; negative if moved to earlier addresses) +// * profilingContext - Where our context is stored +// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved +// or surviving event +// + +// static +void ETW::GCLog::MovedReference( + BYTE* pbMemBlockStart, + BYTE* pbMemBlockEnd, + ptrdiff_t cbRelocDistance, + size_t profilingContext, + BOOL fCompacting, + BOOL fAllowProfApiNotification /* = TRUE */) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock + } + CONTRACTL_END; + + MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = + (MovedReferenceContextForEtwAndProfapi*) profilingContext; + if (pCtxForEtwAndProfapi == NULL) + { + _ASSERTE(!"MovedReference() encountered a NULL profilingContext"); + return; + } + +#ifdef PROFILING_SUPPORTED + // ProfAPI + if (fAllowProfApiNotification) + { + BEGIN_PROFILER_CALLBACK(CORProfilerTrackGC() || CORProfilerTrackGCMovedObjects()); + (&g_profControlBlock)->MovedReference(pbMemBlockStart, + pbMemBlockEnd, + cbRelocDistance, + &(pCtxForEtwAndProfapi->pctxProfAPI), + fCompacting); + END_PROFILER_CALLBACK(); + } +#endif // PROFILING_SUPPORTED + + // ETW + + if (!ShouldTrackMovementForEtw()) + return; + + EtwGcMovementContext* pContext = + EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw); + if (pContext == NULL) + return; + + if (fCompacting) + { + // Moved references + + _ASSERTE(pContext->cBulkMovedObjectRanges < ARRAY_SIZE(pContext->rgGCBulkMovedObjectRanges)); + EventStructGCBulkMovedObjectRangesValue* pValue = + &pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges]; + pValue->OldRangeBase = pbMemBlockStart; + pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance; + pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; + pContext->cBulkMovedObjectRanges++; + + // If buffer is now full, empty it into ETW + if (pContext->cBulkMovedObjectRanges == ARRAY_SIZE(pContext->rgGCBulkMovedObjectRanges)) + { + FireEtwGCBulkMovedObjectRanges( + pContext->iCurBulkMovedObjectRanges, + pContext->cBulkMovedObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkMovedObjectRanges[0]), + &pContext->rgGCBulkMovedObjectRanges[0]); + + pContext->iCurBulkMovedObjectRanges++; + pContext->Clear(); + } + } + else + { + // Surviving references + + _ASSERTE(pContext->cBulkSurvivingObjectRanges < ARRAY_SIZE(pContext->rgGCBulkSurvivingObjectRanges)); + EventStructGCBulkSurvivingObjectRangesValue* pValue = + &pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges]; + pValue->RangeBase = pbMemBlockStart; + pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart; + pContext->cBulkSurvivingObjectRanges++; + + // If buffer is now full, empty it into ETW + if (pContext->cBulkSurvivingObjectRanges == ARRAY_SIZE(pContext->rgGCBulkSurvivingObjectRanges)) + { + FireEtwGCBulkSurvivingObjectRanges( + pContext->iCurBulkSurvivingObjectRanges, + pContext->cBulkSurvivingObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), + &pContext->rgGCBulkSurvivingObjectRanges[0]); + + pContext->iCurBulkSurvivingObjectRanges++; + pContext->Clear(); + } + } +} + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC just before it begins enumerating plugs. Gives us a chance to +// allocate our context structure, to allow us to batch plugs before firing events +// for them +// +// Arguments: +// * pProfilingContext - Points to location on stack (in GC function) where we can +// store a pointer to the context we allocate +// + +// static +VOID ETW::GCLog::BeginMovedReferences(size_t* pProfilingContext) +{ + LIMITED_METHOD_CONTRACT; + + MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext)); +} + + +//--------------------------------------------------------------------------------------- +// +// Called by the GC at the end of a heap walk to give us a place to flush any remaining +// buffers of data to ETW or the profapi profiler +// +// Arguments: +// profilingContext - Our context we built up during the heap walk +// + +// static +VOID ETW::GCLog::EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification /* = TRUE */) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + CAN_TAKE_LOCK; + } + CONTRACTL_END; + + MovedReferenceContextForEtwAndProfapi* pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi*) profilingContext; + if (pCtxForEtwAndProfapi == NULL) + { + _ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext"); + return; + } + +#ifdef PROFILING_SUPPORTED + // ProfAPI + if (fAllowProfApiNotification) + { + BEGIN_PROFILER_CALLBACK(CORProfilerTrackGC() || CORProfilerTrackGCMovedObjects()); + (&g_profControlBlock)->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI)); + END_PROFILER_CALLBACK(); + } +#endif //PROFILING_SUPPORTED + + // ETW + + if (!ShouldTrackMovementForEtw()) + return; + + // If context isn't already set up for us, then we haven't been collecting any data + // for ETW events. + EtwGcMovementContext* pContext = pCtxForEtwAndProfapi->pctxEtw; + if (pContext == NULL) + return; + + // Flush any remaining moved or surviving range data + + if (pContext->cBulkMovedObjectRanges > 0) + { + FireEtwGCBulkMovedObjectRanges( + pContext->iCurBulkMovedObjectRanges, + pContext->cBulkMovedObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkMovedObjectRanges[0]), + &pContext->rgGCBulkMovedObjectRanges[0]); + } + + if (pContext->cBulkSurvivingObjectRanges > 0) + { + FireEtwGCBulkSurvivingObjectRanges( + pContext->iCurBulkSurvivingObjectRanges, + pContext->cBulkSurvivingObjectRanges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]), + &pContext->rgGCBulkSurvivingObjectRanges[0]); + } + + pCtxForEtwAndProfapi->pctxEtw = NULL; + delete pContext; +} + +// This implements the public runtime provider's GCHeapCollectKeyword. It +// performs a full, gen-2, blocking GC. +VOID ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber) +{ + CONTRACTL + { + NOTHROW; + GC_TRIGGERS; + MODE_ANY; + } + CONTRACTL_END; + +#ifndef FEATURE_NATIVEAOT + if (!IsGarbageCollectorFullyInitialized()) + return; +#endif // FEATURE_NATIVEAOT + + InterlockedExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber); + + ForceGCForDiagnostics(); +} + +//--------------------------------------------------------------------------------------- +// +// Contains code common to profapi and ETW scenarios where the profiler wants to force +// the CLR to perform a GC. The important work here is to create a managed thread for +// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may +// not yet be a managed thread object. But some scenarios require a managed thread +// object be present. +// +// Return Value: +// HRESULT indicating success or failure +// +// Assumptions: +// Caller should ensure that the EE has fully started up and that the GC heap is +// initialized enough to actually perform a GC +// + +// static +HRESULT ETW::GCLog::ForceGCForDiagnostics() +{ + CONTRACTL + { + NOTHROW; + GC_TRIGGERS; + MODE_ANY; + } + CONTRACTL_END; + + HRESULT hr = E_FAIL; + +#ifndef FEATURE_NATIVEAOT + // Caller should ensure we're past startup. + _ASSERTE(IsGarbageCollectorFullyInitialized()); + + // In immersive apps the GarbageCollect() call below will call into the WinUI reference tracker, + // which will call back into the runtime to track references. This call + // chain would cause a Thread object to be created for this thread while code + // higher on the stack owns the ThreadStoreLock. This will lead to asserts + // since the ThreadStoreLock is non-reentrant. To avoid this we'll create + // the Thread object here instead. + if (GetThreadNULLOk() == NULL) + { + HRESULT hr = E_FAIL; + SetupThreadNoThrow(&hr); + if (FAILED(hr)) + return hr; + } + + ASSERT_NO_EE_LOCKS_HELD(); + + EX_TRY + { + // Need to switch to cooperative mode as the thread will access managed + // references (through reference tracker callbacks). + GCX_COOP(); +#endif // FEATURE_NATIVEAOT + + ForcedGCHolder forcedGCHolder; + + hr = GCHeapUtilities::GetGCHeap()->GarbageCollect( + -1, // all generations should be collected + false, // low_memory_p + collection_blocking); + +#ifndef FEATURE_NATIVEAOT + } + EX_CATCH { } + EX_END_CATCH(RethrowTerminalExceptions); +#endif // FEATURE_NATIVEAOT + + return hr; +} + +//--------------------------------------------------------------------------------------- +// WalkStaticsAndCOMForETW walks both CCW/RCW objects and static variables. +//--------------------------------------------------------------------------------------- + +VOID ETW::GCLog::WalkStaticsAndCOMForETW() +{ + CONTRACTL + { + NOTHROW; + GC_TRIGGERS; + } + CONTRACTL_END; + + EX_TRY + { + BulkTypeEventLogger typeLogger; + + // Walk RCWs/CCWs + BulkComLogger comLogger(&typeLogger); + comLogger.LogAllComObjects(); + + // Walk static variables + BulkStaticsLogger staticLogger(&typeLogger); + staticLogger.LogAllStatics(); + + // Ensure all loggers have written all events, fire type logger last to batch events + // (FireBulkComEvent or FireBulkStaticsEvent may queue up additional types). + comLogger.FireBulkComEvent(); + staticLogger.FireBulkStaticsEvent(); + typeLogger.FireBulkTypeEvent(); + } + EX_CATCH + { + } + EX_END_CATCH(SwallowAllExceptions); +} + + +// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap +// at the end of a collection. +class EtwGcHeapDumpContext +{ +public: + // An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of + // ProfilingScanContext and ProfilerWalkHeapContext, which are context structures + // that the GC heap walker sends back to the callbacks. This method is passed a + // pointer to ProfilingScanContext::pvEtwContext or + // ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new + // EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned. + // Callers should test for NULL, which can be returned if out of memory + static EtwGcHeapDumpContext* GetOrCreateInGCContext(LPVOID* ppvEtwContext) + { + LIMITED_METHOD_CONTRACT; + + _ASSERTE(ppvEtwContext != NULL); + + EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)*ppvEtwContext; + if (pContext == NULL) + { + pContext = new (nothrow) EtwGcHeapDumpContext; + *ppvEtwContext = pContext; + } + return pContext; + } + + EtwGcHeapDumpContext() : + iCurBulkRootEdge(0), + iCurBulkRootConditionalWeakTableElementEdge(0), + iCurBulkNodeEvent(0), + iCurBulkEdgeEvent(0), + bulkTypeEventLogger() + { + LIMITED_METHOD_CONTRACT; + ClearRootEdges(); + ClearRootConditionalWeakTableElementEdges(); + ClearNodes(); + ClearEdges(); + } + + // These helpers clear the individual buffers, for use after a flush and on + // construction. They intentionally leave the indices (iCur*) alone, since they + // persist across flushes within a GC + + void ClearRootEdges() + { + LIMITED_METHOD_CONTRACT; + cGcBulkRootEdges = 0; + ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges)); + } + + void ClearRootConditionalWeakTableElementEdges() + { + LIMITED_METHOD_CONTRACT; + cGCBulkRootConditionalWeakTableElementEdges = 0; + ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges)); + } + + void ClearNodes() + { + LIMITED_METHOD_CONTRACT; + cGcBulkNodeValues = 0; + ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues)); + } + + void ClearEdges() + { + LIMITED_METHOD_CONTRACT; + cGcBulkEdgeValues = 0; + ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues)); + } + + //--------------------------------------------------------------------------------------- + // GCBulkRootEdge + // + // A "root edge" is the relationship between a source "GCRootID" (i.e., stack + // variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed + // object that gets rooted). + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkRootEdge event + UINT iCurBulkRootEdge; + + // Number of root edges currently filled out in rgGcBulkRootEdges array + UINT cGcBulkRootEdges; + + // Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the root edge data) + EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)]; + + + //--------------------------------------------------------------------------------------- + // GCBulkRootConditionalWeakTableElementEdge + // + // These describe dependent handles, which simulate an edge connecting a key NodeID + // to a value NodeID. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkRootConditionalWeakTableElementEdge event + UINT iCurBulkRootConditionalWeakTableElementEdge; + + // Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array + UINT cGCBulkRootConditionalWeakTableElementEdges; + + // Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the root edge data) + EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges + [(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkNode + // + // A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses + // as well as leaf nodes. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkNode event + UINT iCurBulkNodeEvent; + + // Number of nodes currently filled out in rgGcBulkNodeValues array + UINT cGcBulkNodeValues; + + // Struct array containing the primary data for each GCBulkNode event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the node data) + EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)]; + + //--------------------------------------------------------------------------------------- + // GCBulkEdge + // + // An "edge" is the relationship between a source node and its referenced target + // node. Edges are reported in bulk, separately from Nodes, but it is expected that + // the consumer read the Node and Edge streams together. One takes the first node + // from the Node stream, and then reads EdgeCount entries in the Edge stream, telling + // you all of that Node's targets. Then, one takes the next node in the Node stream, + // and reads the next entries in the Edge stream (using this Node's EdgeCount to + // determine how many) to find all of its targets. This continues on until the Node + // and Edge streams have been fully read. + // + // GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events + // begin at the GCBulkRootEdge.RootedNodeAddress and move forward. + // + //--------------------------------------------------------------------------------------- + + // Sequence number for each GCBulkEdge event + UINT iCurBulkEdgeEvent; + + // Number of nodes currently filled out in rgGcBulkEdgeValues array + UINT cGcBulkEdgeValues; + + // Struct array containing the primary data for each GCBulkEdge event. Fix the size so + // the total event stays well below the 64K + // limit (leaving lots of room for non-struct fields that come before the edge data) + EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)]; + + + //--------------------------------------------------------------------------------------- + // BulkType + // + // Types are a bit more complicated to batch up, since their data is of varying + // size. BulkTypeEventLogger takes care of the pesky details for us + //--------------------------------------------------------------------------------------- + + BulkTypeEventLogger bulkTypeEventLogger; +}; + + + +//--------------------------------------------------------------------------------------- +// +// Called during a heap walk for each root reference encountered. Batches up the root in +// the ETW context +// +// Arguments: +// * pvHandle - If the root is a handle, this points to the handle +// * pRootedNode - Points to object that is rooted +// * pSecondaryNodeForDependentHandle - For dependent handles, this is the +// secondary object +// * fDependentHandle - nonzero iff this is for a dependent handle +// * profilingScanContext - The shared profapi/etw context built up during the heap walk. +// * dwGCFlags - Bitmask of "GC_"-style flags set by GC +// * rootFlags - Bitmask of EtwGCRootFlags describing the root +// + +// static +VOID ETW::GCLog::RootReference( + LPVOID pvHandle, + Object* pRootedNode, + Object* pSecondaryNodeForDependentHandle, + BOOL fDependentHandle, + ProfilingScanContext* profilingScanContext, + DWORD dwGCFlags, + DWORD rootFlags) +{ + LIMITED_METHOD_CONTRACT; + + EtwGcHeapDumpContext* pContext = + EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext); + if (pContext == NULL) + return; + + // Determine root kind, root ID, and handle-specific flags + LPVOID pvRootID = NULL; + BYTE nRootKind = (BYTE)profilingScanContext->dwEtwRootKind; + switch (nRootKind) + { + case kEtwGCRootKindStack: +#if !defined (FEATURE_NATIVEAOT) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE)) + pvRootID = profilingScanContext->pMD; +#endif // !defined (FEATURE_NATIVEAOT) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE)) + break; + + case kEtwGCRootKindHandle: + pvRootID = pvHandle; + break; + + case kEtwGCRootKindFinalizer: + _ASSERTE(pvRootID == NULL); + break; + + case kEtwGCRootKindOther: + default: + _ASSERTE(nRootKind == kEtwGCRootKindOther); + _ASSERTE(pvRootID == NULL); + break; + } + + // Convert GC root flags to ETW root flags + if (dwGCFlags & GC_CALL_INTERIOR) + rootFlags |= kEtwGCRootFlagsInterior; + if (dwGCFlags & GC_CALL_PINNED) + rootFlags |= kEtwGCRootFlagsPinning; + + // Add root edge to appropriate buffer + if (fDependentHandle) + { + _ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges < + ARRAY_SIZE(pContext->rgGCBulkRootConditionalWeakTableElementEdges)); + EventStructGCBulkRootConditionalWeakTableElementEdgeValue* pRCWTEEdgeValue = + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges]; + pRCWTEEdgeValue->GCKeyNodeID = pRootedNode; + pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle; + pRCWTEEdgeValue->GCRootID = pvRootID; + pContext->cGCBulkRootConditionalWeakTableElementEdges++; + + // If RCWTE edge buffer is now full, empty it into ETW + if (pContext->cGCBulkRootConditionalWeakTableElementEdges == + ARRAY_SIZE(pContext->rgGCBulkRootConditionalWeakTableElementEdges)) + { + FireEtwGCBulkRootConditionalWeakTableElementEdge( + pContext->iCurBulkRootConditionalWeakTableElementEdge, + pContext->cGCBulkRootConditionalWeakTableElementEdges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); + + pContext->iCurBulkRootConditionalWeakTableElementEdge++; + pContext->ClearRootConditionalWeakTableElementEdges(); + } + } + else + { + _ASSERTE(pContext->cGcBulkRootEdges < ARRAY_SIZE(pContext->rgGcBulkRootEdges)); + EventStructGCBulkRootEdgeValue* pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges]; + pBulkRootEdgeValue->RootedNodeAddress = pRootedNode; + pBulkRootEdgeValue->GCRootKind = nRootKind; + pBulkRootEdgeValue->GCRootFlag = rootFlags; + pBulkRootEdgeValue->GCRootID = pvRootID; + pContext->cGcBulkRootEdges++; + + // If root edge buffer is now full, empty it into ETW + if (pContext->cGcBulkRootEdges == ARRAY_SIZE(pContext->rgGcBulkRootEdges)) + { + FireEtwGCBulkRootEdge( + pContext->iCurBulkRootEdge, + pContext->cGcBulkRootEdges, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkRootEdges[0]), + &pContext->rgGcBulkRootEdges[0]); + + pContext->iCurBulkRootEdge++; + pContext->ClearRootEdges(); + } + } +} + +//--------------------------------------------------------------------------------------- +// +// Called during a heap walk for each object reference encountered. Batches up the +// corresponding node, edges, and type data for the ETW events. +// +// Arguments: +// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk. +// * pObjReferenceSource - Object doing the pointing +// * typeID - Type of pObjReferenceSource +// * fDependentHandle - nonzero iff this is for a dependent handle +// * cRefs - Count of objects being pointed to +// * rgObjReferenceTargets - Array of objects being pointed to +// + +// static +VOID ETW::GCLog::ObjectReference( + ProfilerWalkHeapContext* profilerWalkHeapContext, + Object* pObjReferenceSource, + ULONGLONG typeID, + ULONGLONG cRefs, + Object** rgObjReferenceTargets) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + + // LogTypeAndParametersIfNecessary can take a lock + CAN_TAKE_LOCK; + } + CONTRACTL_END; + + EtwGcHeapDumpContext* pContext = + EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext); + if (pContext == NULL) + return; + + //--------------------------------------------------------------------------------------- + // GCBulkNode events + //--------------------------------------------------------------------------------------- + + // Add Node (pObjReferenceSource) to buffer + _ASSERTE(pContext->cGcBulkNodeValues < ARRAY_SIZE(pContext->rgGcBulkNodeValues)); + EventStructGCBulkNodeValue* pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues]; + pBulkNodeValue->Address = pObjReferenceSource; + pBulkNodeValue->Size = pObjReferenceSource->GetSize(); + pBulkNodeValue->TypeID = typeID; + pBulkNodeValue->EdgeCount = cRefs; + pContext->cGcBulkNodeValues++; + + // If Node buffer is now full, empty it into ETW + if (pContext->cGcBulkNodeValues == ARRAY_SIZE(pContext->rgGcBulkNodeValues)) + { + FireEtwGCBulkNode( + pContext->iCurBulkNodeEvent, + pContext->cGcBulkNodeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkNodeValues[0]), + &pContext->rgGcBulkNodeValues[0]); + + pContext->iCurBulkNodeEvent++; + pContext->ClearNodes(); + } + + //--------------------------------------------------------------------------------------- + // BulkType events + //--------------------------------------------------------------------------------------- + + // We send type information as necessary--only for nodes, and only for nodes that we + // haven't already sent type info for + if (typeID != 0) + { + ETW::TypeSystemLog::LogTypeAndParametersIfNecessary( + &pContext->bulkTypeEventLogger, // Batch up this type with others to minimize events + typeID, + + // During heap walk, GC holds the lock for us, so we can directly enter the + // hash to see if the type has already been logged + ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime + ); + } + + //--------------------------------------------------------------------------------------- + // GCBulkEdge events + //--------------------------------------------------------------------------------------- + + // Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges + // are added (it could even fill up multiple times during this one call if there are + // a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we + // need. + + for (ULONGLONG i = 0; i < cRefs; i++) + { + _ASSERTE(pContext->cGcBulkEdgeValues < ARRAY_SIZE(pContext->rgGcBulkEdgeValues)); + EventStructGCBulkEdgeValue* pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues]; + pBulkEdgeValue->Value = rgObjReferenceTargets[i]; + // FUTURE: ReferencingFieldID + pBulkEdgeValue->ReferencingFieldID = 0; + pContext->cGcBulkEdgeValues++; + + // If Edge buffer is now full, empty it into ETW + if (pContext->cGcBulkEdgeValues == ARRAY_SIZE(pContext->rgGcBulkEdgeValues)) + { + FireEtwGCBulkEdge( + pContext->iCurBulkEdgeEvent, + pContext->cGcBulkEdgeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkEdgeValues[0]), + &pContext->rgGcBulkEdgeValues[0]); + + pContext->iCurBulkEdgeEvent++; + pContext->ClearEdges(); + } + } +} + +//--------------------------------------------------------------------------------------- +// +// Called by GC at end of heap dump to give us a convenient time to flush any remaining +// buffers of data to ETW +// +// Arguments: +// profilerWalkHeapContext - Context containing data we've batched up +// + +// static +VOID ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext* profilerWalkHeapContext) +{ + LIMITED_METHOD_CONTRACT; + + // If context isn't already set up for us, then we haven't been collecting any data + // for ETW events. + EtwGcHeapDumpContext* pContext = (EtwGcHeapDumpContext*)profilerWalkHeapContext->pvEtwContext; + if (pContext == NULL) + return; + + // If the GC events are enabled, flush any remaining root, node, and / or edge data + if (s_forcedGCInProgress && + ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_GCHEAPDUMP_KEYWORD)) + { + if (pContext->cGcBulkRootEdges > 0) + { + FireEtwGCBulkRootEdge( + pContext->iCurBulkRootEdge, + pContext->cGcBulkRootEdges, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkRootEdges[0]), + &pContext->rgGcBulkRootEdges[0]); + } + + if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0) + { + FireEtwGCBulkRootConditionalWeakTableElementEdge( + pContext->iCurBulkRootConditionalWeakTableElementEdge, + pContext->cGCBulkRootConditionalWeakTableElementEdges, + GetClrInstanceId(), + sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]), + &pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]); + } + + if (pContext->cGcBulkNodeValues > 0) + { + FireEtwGCBulkNode( + pContext->iCurBulkNodeEvent, + pContext->cGcBulkNodeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkNodeValues[0]), + &pContext->rgGcBulkNodeValues[0]); + } + + if (pContext->cGcBulkEdgeValues > 0) + { + FireEtwGCBulkEdge( + pContext->iCurBulkEdgeEvent, + pContext->cGcBulkEdgeValues, + GetClrInstanceId(), + sizeof(pContext->rgGcBulkEdgeValues[0]), + &pContext->rgGcBulkEdgeValues[0]); + } + } + + // Ditto for type events + if (ETW_TRACING_CATEGORY_ENABLED( + MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, + TRACE_LEVEL_INFORMATION, + CLR_TYPE_KEYWORD)) + { + pContext->bulkTypeEventLogger.FireBulkTypeEvent(); + } + + // Delete any GC state built up in the context + profilerWalkHeapContext->pvEtwContext = NULL; + delete pContext; +}