From 08d6595b99878cdd8a9d1424d8d3006abafc7ade Mon Sep 17 00:00:00 2001 From: Fokko Veegens <24793348+FokkoVeegens@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:03:42 +0100 Subject: [PATCH 1/6] Enhance usage analysis with model tracking features - Added tier and multiplier attributes for various models in modelPricing.json - Introduced ModelSwitchingAnalysis type in main.ts to track model usage patterns - Implemented new UI section for displaying multi-model usage statistics, including average models per conversation, switching frequency, and models by tier for both daily and monthly views --- src/README.md | 4 +- src/extension.ts | 473 +++++++++++++++++++++++++++++++++++--- src/modelPricing.json | 153 ++++++++---- src/webview/usage/main.ts | 110 +++++++++ 4 files changed, 670 insertions(+), 70 deletions(-) diff --git a/src/README.md b/src/README.md index ad08cfe..484b25c 100644 --- a/src/README.md +++ b/src/README.md @@ -42,7 +42,9 @@ Contains pricing information for AI models, including input and output token cos "model-name": { "inputCostPerMillion": 1.75, "outputCostPerMillion": 14.0, - "category": "Model category" + "category": "Model category", + "tier": "standard|premium|unknown", + "multiplier": 1 } } } diff --git a/src/extension.ts b/src/extension.ts index d8d555d..33ff502 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -24,6 +24,8 @@ interface ModelPricing { inputCostPerMillion: number; outputCostPerMillion: number; category?: string; + tier?: 'standard' | 'premium' | 'unknown'; + multiplier?: number; } interface EditorUsage { @@ -84,6 +86,13 @@ interface SessionUsageAnalysis { modeUsage: ModeUsage; contextReferences: ContextReferenceUsage; mcpTools: McpToolUsage; + modelSwitching: { + uniqueModels: string[]; + modelCount: number; + switchCount: number; + tiers: { standard: string[]; premium: string[]; unknown: string[] }; + hasMixedTiers: boolean; + }; } interface ToolCallUsage { @@ -113,6 +122,19 @@ interface McpToolUsage { byTool: { [toolName: string]: number }; } +interface ModelSwitchingAnalysis { + modelsPerSession: number[]; // Array of unique model counts per session + totalSessions: number; + averageModelsPerSession: number; + maxModelsPerSession: number; + minModelsPerSession: number; + switchingFrequency: number; // % of sessions with >1 model + standardModels: string[]; // Unique standard models used + premiumModels: string[]; // Unique premium models used + unknownModels: string[]; // Unique models with unknown tier + mixedTierSessions: number; // Sessions using both standard and premium +} + interface UsageAnalysisStats { today: UsageAnalysisPeriod; month: UsageAnalysisPeriod; @@ -125,6 +147,7 @@ interface UsageAnalysisPeriod { modeUsage: ModeUsage; contextReferences: ContextReferenceUsage; mcpTools: McpToolUsage; + modelSwitching: ModelSwitchingAnalysis; } // Detailed session file information for diagnostics view @@ -174,6 +197,9 @@ interface SessionLogData { } class CopilotTokenTracker implements vscode.Disposable { + // Cache version - increment this when making changes that require cache invalidation + private static readonly CACHE_VERSION = 7; // Fix JSONL model extraction for Today data (2026-01-30) + private diagnosticsPanel?: vscode.WebviewPanel; private logViewerPanel?: vscode.WebviewPanel; private statusBarItem: vscode.StatusBarItem; @@ -195,13 +221,16 @@ class CopilotTokenTracker implements vscode.Disposable { private co2Per1kTokens = 0.2; // gCO2e per 1000 tokens, a rough estimate private co2AbsorptionPerTreePerYear = 21000; // grams of CO2 per tree per year private waterUsagePer1kTokens = 0.3; // liters of water per 1000 tokens, based on data center usage estimates + private _modelDebugFileCount = 0; // Counter for debug logging + private _cacheHits = 0; // Counter for cache hits during usage analysis + private _cacheMisses = 0; // Counter for cache misses during usage analysis // Model pricing data - loaded from modelPricing.json // Reference: OpenAI API Pricing (https://openai.com/api/pricing/) - Retrieved December 2025 // Reference: Anthropic Claude Pricing (https://www.anthropic.com/pricing) - Standard rates // Note: GitHub Copilot uses these models but pricing may differ from direct API usage // These are reference prices for cost estimation purposes only - private modelPricing: { [key: string]: ModelPricing } = modelPricingData.pricing; + private modelPricing: { [key: string]: ModelPricing } = modelPricingData.pricing as { [key: string]: ModelPricing }; // Helper method to get repository URL from package.json private getRepositoryUrl(): string { @@ -333,6 +362,14 @@ class CopilotTokenTracker implements vscode.Disposable { // Persistent cache storage methods private loadCacheFromStorage(): void { try { + // Check cache version first + const storedVersion = this.context.globalState.get('sessionFileCacheVersion'); + if (storedVersion !== CopilotTokenTracker.CACHE_VERSION) { + this.log(`Cache version mismatch (stored: ${storedVersion}, current: ${CopilotTokenTracker.CACHE_VERSION}). Clearing cache.`); + this.sessionFileCache = new Map(); + return; + } + const cacheData = this.context.globalState.get>('sessionFileCache'); if (cacheData) { this.sessionFileCache = new Map(Object.entries(cacheData)); @@ -352,7 +389,8 @@ class CopilotTokenTracker implements vscode.Disposable { // Convert Map to plain object for storage const cacheData = Object.fromEntries(this.sessionFileCache); await this.context.globalState.update('sessionFileCache', cacheData); - this.log(`Saved ${this.sessionFileCache.size} cached session files to storage`); + await this.context.globalState.update('sessionFileCacheVersion', CopilotTokenTracker.CACHE_VERSION); + this.log(`Saved ${this.sessionFileCache.size} cached session files to storage (version ${CopilotTokenTracker.CACHE_VERSION})`); } catch (error) { this.error('Error saving cache to storage:', error); } @@ -839,6 +877,11 @@ class CopilotTokenTracker implements vscode.Disposable { const todayStart = new Date(now.getFullYear(), now.getMonth(), now.getDate()); const monthStart = new Date(now.getFullYear(), now.getMonth(), 1); + this.log('🔍 [Usage Analysis] Starting calculation...'); + this._modelDebugFileCount = 0; // Reset debug counter + this._cacheHits = 0; // Reset cache hit counter + this._cacheMisses = 0; // Reset cache miss counter + const emptyPeriod = (): UsageAnalysisPeriod => ({ sessions: 0, toolCalls: { total: 0, byTool: {} }, @@ -852,7 +895,19 @@ class CopilotTokenTracker implements vscode.Disposable { terminal: 0, vscode: 0 }, - mcpTools: { total: 0, byServer: {}, byTool: {} } + mcpTools: { total: 0, byServer: {}, byTool: {} }, + modelSwitching: { + modelsPerSession: [], + totalSessions: 0, + averageModelsPerSession: 0, + maxModelsPerSession: 0, + minModelsPerSession: 0, + switchingFrequency: 0, + standardModels: [], + premiumModels: [], + unknownModels: [], + mixedTierSessions: 0 + } }); const todayStats = emptyPeriod(); @@ -860,8 +915,11 @@ class CopilotTokenTracker implements vscode.Disposable { try { const sessionFiles = await this.getCopilotSessionFiles(); - this.log(`Processing ${sessionFiles.length} session files for usage analysis`); + this.log(`🔍 [Usage Analysis] Processing ${sessionFiles.length} session files`); + let processed = 0; + const progressInterval = Math.max(1, Math.floor(sessionFiles.length / 20)); // Log every 5% + for (const sessionFile of sessionFiles) { try { const fileStats = fs.statSync(sessionFile); @@ -876,17 +934,38 @@ class CopilotTokenTracker implements vscode.Disposable { // Add to today stats if modified today if (fileStats.mtime >= todayStart) { todayStats.sessions++; + // Debug: log today session model data + const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; + const modelCount = analysis.modelSwitching?.modelCount || 0; + if (todayStats.sessions <= 10) { + this.log(`[DEBUG Today] Session #${todayStats.sessions}: ${fileName}, modelCount=${modelCount}, models=[${analysis.modelSwitching?.uniqueModels?.join(', ') || 'none'}]`); + } this.mergeUsageAnalysis(todayStats, analysis); } } + + processed++; + if (processed % progressInterval === 0) { + this.log(`🔍 [Usage Analysis] Progress: ${processed}/${sessionFiles.length} files (${Math.round(processed/sessionFiles.length*100)}%)`); + } } catch (fileError) { this.warn(`Error processing session file ${sessionFile} for usage analysis: ${fileError}`); + processed++; } } } catch (error) { this.error('Error calculating usage analysis stats:', error); } + // Log cache statistics + this.log(`🔍 [Usage Analysis] Cache stats: ${this._cacheHits} hits, ${this._cacheMisses} misses`); + + // Log model switching statistics for debugging (always log Today to debug the issue) + this.logModelSwitchingStats('Today', todayStats.modelSwitching); + if (monthStats.modelSwitching.totalSessions > 0) { + this.logModelSwitchingStats('This Month', monthStats.modelSwitching); + } + return { today: todayStats, month: monthStats, @@ -926,14 +1005,73 @@ class CopilotTokenTracker implements vscode.Disposable { for (const [tool, count] of Object.entries(analysis.mcpTools.byTool)) { period.mcpTools.byTool[tool] = (period.mcpTools.byTool[tool] || 0) + count; } + + // Merge model switching data + // Ensure modelSwitching exists (backward compatibility with old cache) + if (!analysis.modelSwitching) { + analysis.modelSwitching = { + uniqueModels: [], + modelCount: 0, + switchCount: 0, + tiers: { standard: [], premium: [], unknown: [] }, + hasMixedTiers: false + }; + } + + period.modelSwitching.totalSessions++; + period.modelSwitching.modelsPerSession.push(analysis.modelSwitching.modelCount); + + // Track unique models by tier + for (const model of analysis.modelSwitching.tiers.standard) { + if (!period.modelSwitching.standardModels.includes(model)) { + period.modelSwitching.standardModels.push(model); + } + } + for (const model of analysis.modelSwitching.tiers.premium) { + if (!period.modelSwitching.premiumModels.includes(model)) { + period.modelSwitching.premiumModels.push(model); + } + } + for (const model of analysis.modelSwitching.tiers.unknown) { + if (!period.modelSwitching.unknownModels.includes(model)) { + period.modelSwitching.unknownModels.push(model); + } + } + + // Count sessions with mixed tiers + if (analysis.modelSwitching.hasMixedTiers) { + period.modelSwitching.mixedTierSessions++; + } + + // Calculate aggregate statistics + if (period.modelSwitching.modelsPerSession.length > 0) { + const counts = period.modelSwitching.modelsPerSession; + period.modelSwitching.averageModelsPerSession = counts.reduce((a, b) => a + b, 0) / counts.length; + period.modelSwitching.maxModelsPerSession = Math.max(...counts); + period.modelSwitching.minModelsPerSession = Math.min(...counts); + period.modelSwitching.switchingFrequency = (counts.filter(c => c > 1).length / counts.length) * 100; + } + } + + private logModelSwitchingStats(label: string, stats: ModelSwitchingAnalysis): void { + this.log(`[${label}] Model Switching Summary:`); + this.log(` - Total Sessions: ${stats.totalSessions}`); + this.log(` - Avg Models/Session: ${stats.averageModelsPerSession.toFixed(2)}`); + this.log(` - Max Models: ${stats.maxModelsPerSession}`); + this.log(` - Switching Freq: ${stats.switchingFrequency.toFixed(1)}%`); + this.log(` - Standard Models (${stats.standardModels.length}): ${stats.standardModels.join(', ') || 'none'}`); + this.log(` - Premium Models (${stats.premiumModels.length}): ${stats.premiumModels.join(', ') || 'none'}`); + this.log(` - Unknown Models (${stats.unknownModels.length}): ${stats.unknownModels.join(', ') || 'none'}`); + this.log(` - Mixed Tier Sessions: ${stats.mixedTierSessions}`); } private async countInteractionsInSession(sessionFile: string): Promise { try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); - // Handle .jsonl files (Copilot CLI format and VS Code incremental format) - if (sessionFile.endsWith('.jsonl')) { + // Handle .jsonl files OR .json files with JSONL content (Copilot CLI format and VS Code incremental format) + const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); + if (isJsonlContent) { const lines = fileContent.trim().split('\n'); let interactions = 0; for (const line of lines) { @@ -977,12 +1115,25 @@ class CopilotTokenTracker implements vscode.Disposable { private async getModelUsageFromSession(sessionFile: string): Promise { const modelUsage: ModelUsage = {}; + const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; + + // Debug: log every file processed (only log first 5 files to avoid noise) + if (!this._modelDebugFileCount) { this._modelDebugFileCount = 0; } + this._modelDebugFileCount++; + const shouldLogDebug = this._modelDebugFileCount <= 5; try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); - // Handle .jsonl files (Copilot CLI format and VS Code incremental format) - if (sessionFile.endsWith('.jsonl')) { + // Detect JSONL content: either by extension or by content analysis + const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); + + if (shouldLogDebug) { + this.log(`[DEBUG Model] Processing file ${this._modelDebugFileCount}: ${fileName}, isJsonl: ${isJsonlContent}, size: ${fileContent.length} bytes`); + } + + // Handle .jsonl files OR .json files with JSONL content (Copilot CLI format and VS Code incremental format) + if (isJsonlContent) { const lines = fileContent.trim().split('\n'); // Default model for CLI sessions - they may not specify the model per event let defaultModel = 'gpt-4o'; @@ -992,14 +1143,23 @@ class CopilotTokenTracker implements vscode.Disposable { try { const event = JSON.parse(line); - // Handle VS Code incremental format - extract model from session header - if (event.kind === 0 && event.v?.inputState?.selectedModel?.metadata?.id) { - defaultModel = event.v.inputState.selectedModel.metadata.id; + // Handle VS Code incremental format - extract model from session header (kind: 0) + // The schema has v.selectedModel.identifier or v.selectedModel.metadata.id + if (event.kind === 0) { + const modelId = event.v?.selectedModel?.identifier || + event.v?.selectedModel?.metadata?.id || + event.v?.inputState?.selectedModel?.metadata?.id; // Legacy fallback + if (modelId) { + defaultModel = modelId.replace(/^copilot\//, ''); + } } - // Handle model changes (kind: 1 with selectedModel update) - if (event.kind === 1 && event.k?.includes('selectedModel') && event.v?.metadata?.id) { - defaultModel = event.v.metadata.id; + // Handle model changes (kind: 2 with selectedModel update, NOT kind: 1 which is delete) + if (event.kind === 2 && event.k?.[0] === 'selectedModel') { + const modelId = event.v?.identifier || event.v?.metadata?.id; + if (modelId) { + defaultModel = modelId.replace(/^copilot\//, ''); + } } const model = event.model || defaultModel; @@ -1021,8 +1181,39 @@ class CopilotTokenTracker implements vscode.Disposable { // Handle VS Code incremental format (kind: 2 with requests) if (event.kind === 2 && event.k?.[0] === 'requests' && Array.isArray(event.v)) { for (const request of event.v) { + // Extract request-level modelId if available + let requestModel = model; + if (request.modelId) { + requestModel = request.modelId.replace(/^copilot\//, ''); + } else if (request.result?.metadata?.modelId) { + requestModel = request.result.metadata.modelId.replace(/^copilot\//, ''); + } else if (request.result?.details) { + // Parse model from details string like "Claude Opus 4.5 • 3x" + requestModel = this.getModelFromRequest(request); + } + + if (!modelUsage[requestModel]) { + modelUsage[requestModel] = { inputTokens: 0, outputTokens: 0 }; + } + if (request.message?.text) { - modelUsage[model].inputTokens += this.estimateTokensFromText(request.message.text, model); + modelUsage[requestModel].inputTokens += this.estimateTokensFromText(request.message.text, requestModel); + } + // Also process message.parts if available + if (request.message?.parts && Array.isArray(request.message.parts)) { + for (const part of request.message.parts) { + if (part.text && part.text !== request.message?.text) { + modelUsage[requestModel].inputTokens += this.estimateTokensFromText(part.text, requestModel); + } + } + } + // Process response items if present in the request + if (request.response && Array.isArray(request.response)) { + for (const responseItem of request.response) { + if (responseItem.value) { + modelUsage[requestModel].outputTokens += this.estimateTokensFromText(responseItem.value, requestModel); + } + } } } } @@ -1041,13 +1232,34 @@ class CopilotTokenTracker implements vscode.Disposable { // Skip malformed lines } } + // Debug: log JSONL model extraction results + if (shouldLogDebug) { + const models = Object.keys(modelUsage); + this.log(`[DEBUG Model] JSONL file ${fileName}: found ${models.length} models: ${models.join(', ') || 'none'}, defaultModel=${defaultModel}`); + } return modelUsage; } // Handle regular .json files const sessionContent = JSON.parse(fileContent); + + // Debug: log JSON structure for first few files + if (shouldLogDebug) { + const hasRequests = Array.isArray(sessionContent.requests); + const requestCount = hasRequests ? sessionContent.requests.length : 0; + const topLevelKeys = Object.keys(sessionContent).slice(0, 5).join(', '); + this.log(`[DEBUG Model] JSON file ${fileName}: hasRequests=${hasRequests}, requestCount=${requestCount}, keys=[${topLevelKeys}]`); + } if (sessionContent.requests && Array.isArray(sessionContent.requests)) { + // Debug: log first request to understand structure + if (sessionContent.requests.length > 0 && shouldLogDebug) { + const firstReq = sessionContent.requests[0]; + const model = this.getModelFromRequest(firstReq); + const reqKeys = Object.keys(firstReq).slice(0, 5).join(', '); + this.log(`[DEBUG Model] First request keys: [${reqKeys}], modelId=${firstReq.modelId}, result.metadata.modelId=${firstReq.result?.metadata?.modelId}, detected=${model}`); + } + for (const request of sessionContent.requests) { // Get model for this request const model = this.getModelFromRequest(request); @@ -1101,14 +1313,22 @@ class CopilotTokenTracker implements vscode.Disposable { terminal: 0, vscode: 0 }, - mcpTools: { total: 0, byServer: {}, byTool: {} } + mcpTools: { total: 0, byServer: {}, byTool: {} }, + modelSwitching: { + uniqueModels: [], + modelCount: 0, + switchCount: 0, + tiers: { standard: [], premium: [], unknown: [] }, + hasMixedTiers: false + } }; try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); - // Handle .jsonl files (Copilot CLI format and VS Code incremental format) - if (sessionFile.endsWith('.jsonl')) { + // Handle .jsonl files OR .json files with JSONL content (Copilot CLI format and VS Code incremental format) + const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); + if (isJsonlContent) { const lines = fileContent.trim().split('\n'); let sessionMode = 'ask'; // Default mode @@ -1199,6 +1419,8 @@ class CopilotTokenTracker implements vscode.Disposable { // Skip malformed lines } } + // Calculate model switching for JSONL files before returning + await this.calculateModelSwitching(sessionFile, analysis); return analysis; } @@ -1320,9 +1542,83 @@ class CopilotTokenTracker implements vscode.Disposable { this.warn(`Error analyzing session usage from ${sessionFile}: ${error}`); } + // Calculate model switching statistics from session + await this.calculateModelSwitching(sessionFile, analysis); + return analysis; } + /** + * Calculate model switching statistics for a session file. + * This method updates the analysis.modelSwitching field in place. + */ + private async calculateModelSwitching(sessionFile: string, analysis: SessionUsageAnalysis): Promise { + try { + // Use non-cached method to avoid circular dependency + // (getSessionFileDataCached -> analyzeSessionUsage -> getModelUsageFromSessionCached -> getSessionFileDataCached) + const modelUsage = await this.getModelUsageFromSession(sessionFile); + const modelCount = modelUsage ? Object.keys(modelUsage).length : 0; + + // Debug: log model extraction results for first few files (regardless of success) + const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; + if (this._modelDebugFileCount <= 10) { + this.log(`[DEBUG analyzeSession] File #${this._modelDebugFileCount}: ${fileName}, models found: ${modelCount}${modelCount > 0 ? ` (${Object.keys(modelUsage).join(', ')})` : ''}`); + } + + // Skip if modelUsage is undefined or empty (not a valid session file) + if (!modelUsage || modelCount === 0) { + return; + } + + // Get unique models from this session + const uniqueModels = Object.keys(modelUsage); + analysis.modelSwitching.uniqueModels = uniqueModels; + analysis.modelSwitching.modelCount = uniqueModels.length; + + // Classify models by tier + const standardModels: string[] = []; + const premiumModels: string[] = []; + const unknownModels: string[] = []; + + for (const model of uniqueModels) { + const tier = this.getModelTier(model); + if (tier === 'standard') { + standardModels.push(model); + } else if (tier === 'premium') { + premiumModels.push(model); + } else { + unknownModels.push(model); + } + } + + analysis.modelSwitching.tiers = { standard: standardModels, premium: premiumModels, unknown: unknownModels }; + analysis.modelSwitching.hasMixedTiers = standardModels.length > 0 && premiumModels.length > 0; + + // Count model switches by examining request sequence (for JSON files only - not JSONL) + const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); + const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); + if (!isJsonlContent) { + const sessionContent = JSON.parse(fileContent); + if (sessionContent.requests && Array.isArray(sessionContent.requests)) { + let previousModel: string | null = null; + let switchCount = 0; + + for (const request of sessionContent.requests) { + const currentModel = this.getModelFromRequest(request); + if (previousModel && currentModel !== previousModel) { + switchCount++; + } + previousModel = currentModel; + } + + analysis.modelSwitching.switchCount = switchCount; + } + } + } catch (error) { + this.warn(`Error calculating model switching for ${sessionFile}: ${error}`); + } + } + /** * Analyze text for context references like #file, #selection, @workspace */ @@ -1375,9 +1671,11 @@ class CopilotTokenTracker implements vscode.Disposable { // Check if we have valid cached data const cached = this.getCachedSessionData(sessionFilePath); if (cached && cached.mtime === mtime) { + this._cacheHits++; return cached; } + this._cacheMisses++; // Cache miss - read and process the file once to get all data const tokens = await this.estimateTokensFromSession(sessionFilePath); const interactions = await this.countInteractionsInSession(sessionFilePath); @@ -1413,7 +1711,7 @@ class CopilotTokenTracker implements vscode.Disposable { private async getUsageAnalysisFromSessionCached(sessionFile: string, mtime: number): Promise { const sessionData = await this.getSessionFileDataCached(sessionFile, mtime); - return sessionData.usageAnalysis || { + const analysis = sessionData.usageAnalysis || { toolCalls: { total: 0, byTool: {} }, modeUsage: { ask: 0, edit: 0, agent: 0 }, contextReferences: { @@ -1425,8 +1723,28 @@ class CopilotTokenTracker implements vscode.Disposable { terminal: 0, vscode: 0 }, - mcpTools: { total: 0, byServer: {}, byTool: {} } + mcpTools: { total: 0, byServer: {}, byTool: {} }, + modelSwitching: { + uniqueModels: [], + modelCount: 0, + switchCount: 0, + tiers: { standard: [], premium: [], unknown: [] }, + hasMixedTiers: false + } }; + + // Ensure modelSwitching field exists for backward compatibility with old cache + if (!analysis.modelSwitching) { + analysis.modelSwitching = { + uniqueModels: [], + modelCount: 0, + switchCount: 0, + tiers: { standard: [], premium: [], unknown: [] }, + hasMixedTiers: false + }; + } + + return analysis; } /** @@ -1468,8 +1786,9 @@ class CopilotTokenTracker implements vscode.Disposable { try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); - // Handle .jsonl files (Copilot CLI format and VS Code incremental format) - if (sessionFile.endsWith('.jsonl')) { + // Handle .jsonl files OR .json files with JSONL content (Copilot CLI format and VS Code incremental format) + const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); + if (isJsonlContent) { const lines = fileContent.trim().split('\n'); const timestamps: number[] = []; @@ -2167,6 +2486,10 @@ class CopilotTokenTracker implements vscode.Disposable { if (entry.isDirectory()) { this.scanDirectoryForSessionFiles(fullPath, sessionFiles); } else if (entry.name.endsWith('.json') || entry.name.endsWith('.jsonl')) { + // Skip known non-session files (embeddings, indexes, etc.) + if (this.isNonSessionFile(entry.name)) { + continue; + } // Only add files that look like session files (have reasonable content) try { const stats = fs.statSync(fullPath); @@ -2183,12 +2506,29 @@ class CopilotTokenTracker implements vscode.Disposable { } } + /** + * Check if a filename is a known non-session file that should be excluded + */ + private isNonSessionFile(filename: string): boolean { + const nonSessionFilePatterns = [ + 'embeddings', // commandEmbeddings.json, settingEmbeddings.json + 'index', // index files + 'cache', // cache files + 'preferences', + 'settings', + 'config' + ]; + const lowerFilename = filename.toLowerCase(); + return nonSessionFilePatterns.some(pattern => lowerFilename.includes(pattern)); + } + private async estimateTokensFromSession(sessionFilePath: string): Promise { try { const fileContent = await fs.promises.readFile(sessionFilePath, 'utf8'); - // Handle .jsonl files (each line is a separate JSON object) - if (sessionFilePath.endsWith('.jsonl')) { + // Handle .jsonl files OR .json files with JSONL content (each line is a separate JSON object) + const isJsonlContent = sessionFilePath.endsWith('.jsonl') || this.isJsonlContent(fileContent); + if (isJsonlContent) { return this.estimateTokensFromJsonlSession(fileContent); } @@ -2279,10 +2619,17 @@ class CopilotTokenTracker implements vscode.Disposable { } private getModelFromRequest(request: any): string { - // Try to determine model from request metadata + // Try to determine model from request metadata (most reliable source) + // First check the top-level modelId field (VS Code format) + if (request.modelId) { + // Remove "copilot/" prefix if present + return request.modelId.replace(/^copilot\//, ''); + } + if (request.result && request.result.metadata && request.result.metadata.modelId) { - return request.result.metadata.modelId; + return request.result.metadata.modelId.replace(/^copilot\//, ''); } + if (request.result && request.result.details) { if (request.result.details.includes('Claude Sonnet 3.5')) { return 'claude-sonnet-3.5'; } if (request.result.details.includes('Claude Sonnet 3.7')) { return 'claude-sonnet-3.7'; } @@ -2302,6 +2649,59 @@ class CopilotTokenTracker implements vscode.Disposable { return 'gpt-4'; // default } + /** + * Detect if file content is JSONL format (multiple JSON objects, one per line) + * This handles cases where .json files actually contain JSONL content + */ + private isJsonlContent(content: string): boolean { + const trimmed = content.trim(); + // JSONL typically has multiple lines, each starting with { and ending with } + if (!trimmed.includes('\n')) { + return false; // Single line - not JSONL + } + const lines = trimmed.split('\n').filter(l => l.trim()); + if (lines.length < 2) { + return false; // Need multiple lines for JSONL + } + // Check if first two non-empty lines look like separate JSON objects + const firstLine = lines[0].trim(); + const secondLine = lines[1].trim(); + return firstLine.startsWith('{') && firstLine.endsWith('}') && + secondLine.startsWith('{') && secondLine.endsWith('}'); + } + + private getModelTier(modelId: string): 'standard' | 'premium' | 'unknown' { + // Determine tier based on multiplier: 0 = standard, >0 = premium + // Look up from modelPricing.json + const pricingInfo = this.modelPricing[modelId]; + if (pricingInfo && typeof pricingInfo.multiplier === 'number') { + return pricingInfo.multiplier === 0 ? 'standard' : 'premium'; + } + + // Fallback: try to match partial model names + for (const [key, value] of Object.entries(this.modelPricing)) { + if (modelId.includes(key) || key.includes(modelId)) { + if (typeof value.multiplier === 'number') { + return value.multiplier === 0 ? 'standard' : 'premium'; + } + } + } + + return 'unknown'; + } + + private extractModelTierFromTooltip(tooltip: string): 'standard' | 'premium' | 'unknown' { + // Standard models have text like "does not count towards your premium request limit" + if (tooltip && tooltip.toLowerCase().includes('does not count towards your premium request limit')) { + return 'standard'; + } + // Premium models may have multiplier text like "3x" or "counted at a" + if (tooltip && (tooltip.includes('•') || tooltip.toLowerCase().includes('counted at'))) { + return 'premium'; + } + return 'unknown'; + } + private estimateTokensFromText(text: string, model: string = 'gpt-4'): number { // Token estimation based on character count and model let tokensPerChar = 0.25; // default @@ -2439,15 +2839,28 @@ class CopilotTokenTracker implements vscode.Disposable { public async showUsageAnalysis(): Promise { this.log('📊 Opening Usage Analysis dashboard'); - // If panel already exists, just reveal it + // If panel already exists, dispose it and recreate with fresh data if (this.analysisPanel) { - this.analysisPanel.reveal(); - this.log('📊 Usage Analysis dashboard revealed (already exists)'); - return; + this.log('📊 Closing existing panel to refresh data...'); + this.analysisPanel.dispose(); + this.analysisPanel = undefined; } // Get usage analysis stats const analysisStats = await this.calculateUsageAnalysisStats(); + + // Log the data being sent to webview for debugging + this.log('[DEBUG] Usage Analysis Data:'); + this.log(JSON.stringify({ + today: { + sessions: analysisStats.today.sessions, + modelSwitching: analysisStats.today.modelSwitching + }, + month: { + sessions: analysisStats.month.sessions, + modelSwitching: analysisStats.month.modelSwitching + } + }, null, 2)); // Create webview panel this.analysisPanel = vscode.window.createWebviewPanel( diff --git a/src/modelPricing.json b/src/modelPricing.json index dc9d197..a092066 100644 --- a/src/modelPricing.json +++ b/src/modelPricing.json @@ -2,7 +2,7 @@ "$schema": "http://json-schema.org/draft-07/schema#", "description": "Model pricing data - costs per million tokens for input and output", "metadata": { - "lastUpdated": "2026-01-16", + "lastUpdated": "2026-01-30", "sources": [ { "name": "OpenAI API Pricing", @@ -23,7 +23,8 @@ { "name": "GitHub Copilot Supported Models", "url": "https://docs.github.com/en/copilot/reference/ai-models/supported-models", - "retrievedDate": "2026-01-16" + "retrievedDate": "2026-01-30", + "note": "Source for tier/multiplier data" } ], "disclaimer": "GitHub Copilot uses these models but pricing may differ from direct API usage. These are reference prices for cost estimation purposes only." @@ -32,187 +33,261 @@ "gpt-5": { "inputCostPerMillion": 1.25, "outputCostPerMillion": 10.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5-codex": { "inputCostPerMillion": 1.25, "outputCostPerMillion": 10.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5-mini": { "inputCostPerMillion": 0.25, "outputCostPerMillion": 2.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "standard", + "multiplier": 0 }, "gpt-5.1": { "inputCostPerMillion": 1.25, "outputCostPerMillion": 10.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5.1-codex": { "inputCostPerMillion": 1.25, "outputCostPerMillion": 10.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5.1-codex-max": { "inputCostPerMillion": 1.75, "outputCostPerMillion": 14.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5.1-codex-mini": { "inputCostPerMillion": 0.25, "outputCostPerMillion": 2.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 0.33 }, "gpt-5.2": { "inputCostPerMillion": 1.75, "outputCostPerMillion": 14.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5.2-codex": { "inputCostPerMillion": 1.75, "outputCostPerMillion": 14.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-5.2-pro": { "inputCostPerMillion": 21.0, "outputCostPerMillion": 168.0, - "category": "GPT-5 models" + "category": "GPT-5 models", + "tier": "premium", + "multiplier": 1 }, "gpt-4": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 12.0, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "unknown", + "multiplier": 1 }, "gpt-4.1": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 12.0, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "standard", + "multiplier": 0 }, "gpt-4.1-mini": { "inputCostPerMillion": 0.8, "outputCostPerMillion": 3.2, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "standard", + "multiplier": 0 }, "gpt-4.1-nano": { "inputCostPerMillion": 0.2, "outputCostPerMillion": 0.8, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "standard", + "multiplier": 0 }, "gpt-4o": { "inputCostPerMillion": 2.5, "outputCostPerMillion": 10.0, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "standard", + "multiplier": 0 }, "gpt-4o-mini": { "inputCostPerMillion": 0.15, "outputCostPerMillion": 0.6, - "category": "GPT-4 models" + "category": "GPT-4 models", + "tier": "standard", + "multiplier": 0 }, "claude-sonnet-3.5": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 15.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "unknown", + "multiplier": 1 }, "claude-sonnet-3.7": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 15.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "unknown", + "multiplier": 1 }, "claude-sonnet-4": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 15.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "premium", + "multiplier": 1 }, "claude-sonnet-4.5": { "inputCostPerMillion": 3.0, "outputCostPerMillion": 15.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "premium", + "multiplier": 1 }, "claude-haiku": { "inputCostPerMillion": 0.25, "outputCostPerMillion": 1.25, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "standard", + "multiplier": 0 }, "claude-haiku-4.5": { "inputCostPerMillion": 1.0, "outputCostPerMillion": 5.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "premium", + "multiplier": 0.33 }, "claude-opus-4.1": { "inputCostPerMillion": 15.0, "outputCostPerMillion": 75.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "premium", + "multiplier": 10 }, "claude-opus-4.5": { "inputCostPerMillion": 5.0, "outputCostPerMillion": 25.0, - "category": "Claude models (Anthropic)" + "category": "Claude models (Anthropic)", + "tier": "premium", + "multiplier": 3 }, "o3-mini": { "inputCostPerMillion": 4.0, "outputCostPerMillion": 16.0, - "category": "OpenAI reasoning models" + "category": "OpenAI reasoning models", + "tier": "premium", + "multiplier": 1 }, "o4-mini": { "inputCostPerMillion": 4.0, "outputCostPerMillion": 16.0, - "category": "OpenAI reasoning models" + "category": "OpenAI reasoning models", + "tier": "premium", + "multiplier": 1 }, "gpt-3.5-turbo": { "inputCostPerMillion": 0.5, "outputCostPerMillion": 1.5, - "category": "Legacy models" + "category": "Legacy models", + "tier": "standard", + "multiplier": 0 }, "gemini-2.5-pro": { "inputCostPerMillion": 1.25, "outputCostPerMillion": 10.0, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "premium", + "multiplier": 1 }, "gemini-2.5-flash": { "inputCostPerMillion": 0.30, "outputCostPerMillion": 2.5, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "unknown", + "multiplier": 1 }, "gemini-2.5-flash-lite": { "inputCostPerMillion": 0.10, "outputCostPerMillion": 0.40, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "unknown", + "multiplier": 1 }, "gemini-2.0-flash": { "inputCostPerMillion": 0.10, "outputCostPerMillion": 0.40, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "standard", + "multiplier": 0 }, "gemini-2.0-flash-lite": { "inputCostPerMillion": 0.075, "outputCostPerMillion": 0.30, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "standard", + "multiplier": 0 }, "gemini-3-flash": { "inputCostPerMillion": 0.50, "outputCostPerMillion": 3.0, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "premium", + "multiplier": 0.33 }, "gemini-3-pro": { "inputCostPerMillion": 2.0, "outputCostPerMillion": 12.0, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "premium", + "multiplier": 1 }, "gemini-3-pro-preview": { "inputCostPerMillion": 2.0, "outputCostPerMillion": 12.0, - "category": "Google Gemini models" + "category": "Google Gemini models", + "tier": "premium", + "multiplier": 1 }, "grok-code-fast-1": { "inputCostPerMillion": 0.20, "outputCostPerMillion": 1.50, - "category": "xAI Grok models" + "category": "xAI Grok models", + "tier": "premium", + "multiplier": 0.25 }, "raptor-mini": { "inputCostPerMillion": 0.25, "outputCostPerMillion": 2.0, - "category": "GitHub Copilot fine-tuned models" + "category": "GitHub Copilot fine-tuned models", + "tier": "standard", + "multiplier": 0 } } } diff --git a/src/webview/usage/main.ts b/src/webview/usage/main.ts index 0a96f91..d61cb74 100644 --- a/src/webview/usage/main.ts +++ b/src/webview/usage/main.ts @@ -15,12 +15,26 @@ type ContextReferenceUsage = { type ToolCallUsage = { total: number; byTool: { [key: string]: number } }; type McpToolUsage = { total: number; byServer: { [key: string]: number }; byTool: { [key: string]: number } }; +type ModelSwitchingAnalysis = { + modelsPerSession: number[]; + totalSessions: number; + averageModelsPerSession: number; + maxModelsPerSession: number; + minModelsPerSession: number; + switchingFrequency: number; + standardModels: string[]; + premiumModels: string[]; + unknownModels: string[]; + mixedTierSessions: number; +}; + type UsageAnalysisPeriod = { sessions: number; toolCalls: ToolCallUsage; modeUsage: ModeUsage; contextReferences: ContextReferenceUsage; mcpTools: McpToolUsage; + modelSwitching: ModelSwitchingAnalysis; }; type UsageAnalysisStats = { @@ -363,6 +377,102 @@ function renderLayout(stats: UsageAnalysisStats): void { + +
+
🔀Multi-Model Usage
+
Track model diversity and switching patterns in your conversations
+
+
+

📅 Today

+
+
+
📊 Avg Models per Conversation
+
${stats.today.modelSwitching.averageModelsPerSession.toFixed(1)}
+
+
+
🔄 Switching Frequency
+
${stats.today.modelSwitching.switchingFrequency.toFixed(0)}%
+
Sessions with >1 model
+
+
+
📈 Max Models in Session
+
${stats.today.modelSwitching.maxModelsPerSession || 0}
+
+
+
+
Models by Tier:
+ ${stats.today.modelSwitching.standardModels.length > 0 ? ` +
+ 🔵 Standard: + ${stats.today.modelSwitching.standardModels.join(', ')} +
+ ` : ''} + ${stats.today.modelSwitching.premiumModels.length > 0 ? ` +
+ ⭐ Premium: + ${stats.today.modelSwitching.premiumModels.join(', ')} +
+ ` : ''} + ${stats.today.modelSwitching.unknownModels.length > 0 ? ` +
+ ❓ Unknown: + ${stats.today.modelSwitching.unknownModels.join(', ')} +
+ ` : ''} + ${stats.today.modelSwitching.mixedTierSessions > 0 ? ` +
+ 🔀 Mixed tier sessions: ${stats.today.modelSwitching.mixedTierSessions} +
+ ` : ''} +
+
+
+

📊 This Month

+
+
+
📊 Avg Models per Conversation
+
${stats.month.modelSwitching.averageModelsPerSession.toFixed(1)}
+
+
+
🔄 Switching Frequency
+
${stats.month.modelSwitching.switchingFrequency.toFixed(0)}%
+
Sessions with >1 model
+
+
+
📈 Max Models in Session
+
${stats.month.modelSwitching.maxModelsPerSession || 0}
+
+
+
+
Models by Tier:
+ ${stats.month.modelSwitching.standardModels.length > 0 ? ` +
+ 🔵 Standard: + ${stats.month.modelSwitching.standardModels.join(', ')} +
+ ` : ''} + ${stats.month.modelSwitching.premiumModels.length > 0 ? ` +
+ ⭐ Premium: + ${stats.month.modelSwitching.premiumModels.join(', ')} +
+ ` : ''} + ${stats.month.modelSwitching.unknownModels.length > 0 ? ` +
+ ❓ Unknown: + ${stats.month.modelSwitching.unknownModels.join(', ')} +
+ ` : ''} + ${stats.month.modelSwitching.mixedTierSessions > 0 ? ` +
+ 🔀 Mixed tier sessions: ${stats.month.modelSwitching.mixedTierSessions} +
+ ` : ''} +
+
+
+
+
📈Sessions Summary
From b3bba964868f9498849529b6918d47876c3561cf Mon Sep 17 00:00:00 2001 From: Fokko Veegens <24793348+FokkoVeegens@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:12:36 +0100 Subject: [PATCH 2/6] Remove unused extractModelTierFromTooltip function Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/extension.ts | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/extension.ts b/src/extension.ts index fefa58a..aca65ef 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -2733,18 +2733,6 @@ class CopilotTokenTracker implements vscode.Disposable { return 'unknown'; } - private extractModelTierFromTooltip(tooltip: string): 'standard' | 'premium' | 'unknown' { - // Standard models have text like "does not count towards your premium request limit" - if (tooltip && tooltip.toLowerCase().includes('does not count towards your premium request limit')) { - return 'standard'; - } - // Premium models may have multiplier text like "3x" or "counted at a" - if (tooltip && (tooltip.includes('•') || tooltip.toLowerCase().includes('counted at'))) { - return 'premium'; - } - return 'unknown'; - } - private estimateTokensFromText(text: string, model: string = 'gpt-4'): number { // Token estimation based on character count and model let tokensPerChar = 0.25; // default From b20647a7aa9d41795bd9d213078158df343f0fce Mon Sep 17 00:00:00 2001 From: Fokko Veegens <24793348+FokkoVeegens@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:19:37 +0100 Subject: [PATCH 3/6] Fixed missing commas --- src/modelPricing.json | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/modelPricing.json b/src/modelPricing.json index 8735cff..95bc44f 100644 --- a/src/modelPricing.json +++ b/src/modelPricing.json @@ -35,7 +35,7 @@ "outputCostPerMillion": 10.0, "category": "GPT-5 models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["GPT-5"] }, "gpt-5-codex": { @@ -106,7 +106,7 @@ "outputCostPerMillion": 12.0, "category": "GPT-4 models", "tier": "unknown", - "multiplier": 1 + "multiplier": 1, "displayNames": ["GPT-4"] }, "gpt-4.1": { @@ -114,7 +114,7 @@ "outputCostPerMillion": 12.0, "category": "GPT-4 models", "tier": "standard", - "multiplier": 0 + "multiplier": 0, "displayNames": ["GPT-4.1"] }, "gpt-4.1-mini": { @@ -136,7 +136,7 @@ "outputCostPerMillion": 10.0, "category": "GPT-4 models", "tier": "standard", - "multiplier": 0 + "multiplier": 0, "displayNames": ["GPT-4o"] }, "gpt-4o-mini": { @@ -144,7 +144,7 @@ "outputCostPerMillion": 0.6, "category": "GPT-4 models", "tier": "standard", - "multiplier": 0 + "multiplier": 0, "displayNames": ["GPT-4o-mini", "GPT-4o Mini"] }, "claude-sonnet-3.5": { @@ -152,7 +152,7 @@ "outputCostPerMillion": 15.0, "category": "Claude models (Anthropic)", "tier": "unknown", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Claude Sonnet 3.5"] }, "claude-sonnet-3.7": { @@ -160,7 +160,7 @@ "outputCostPerMillion": 15.0, "category": "Claude models (Anthropic)", "tier": "unknown", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Claude Sonnet 3.7"] }, "claude-sonnet-4": { @@ -168,7 +168,7 @@ "outputCostPerMillion": 15.0, "category": "Claude models (Anthropic)", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Claude Sonnet 4"] }, "claude-sonnet-4.5": { @@ -211,7 +211,7 @@ "outputCostPerMillion": 16.0, "category": "OpenAI reasoning models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["o3-mini"] }, "o4-mini": { @@ -219,7 +219,7 @@ "outputCostPerMillion": 16.0, "category": "OpenAI reasoning models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["o4-mini"] }, "gpt-3.5-turbo": { @@ -227,7 +227,7 @@ "outputCostPerMillion": 1.5, "category": "Legacy models", "tier": "standard", - "multiplier": 0 + "multiplier": 0, "displayNames": ["GPT-3.5-Turbo", "GPT-3.5 Turbo"] }, "gemini-2.5-pro": { @@ -235,7 +235,7 @@ "outputCostPerMillion": 10.0, "category": "Google Gemini models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Gemini 2.5 Pro"] }, "gemini-2.5-flash": { @@ -278,7 +278,7 @@ "outputCostPerMillion": 12.0, "category": "Google Gemini models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Gemini 3 Pro"] }, "gemini-3-pro-preview": { @@ -286,7 +286,7 @@ "outputCostPerMillion": 12.0, "category": "Google Gemini models", "tier": "premium", - "multiplier": 1 + "multiplier": 1, "displayNames": ["Gemini 3 Pro (Preview)"] }, "grok-code-fast-1": { From 83b28425e04c25cebf46c8e46cf79dff716e1657 Mon Sep 17 00:00:00 2001 From: Fokko Veegens <24793348+FokkoVeegens@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:21:32 +0100 Subject: [PATCH 4/6] Clarify comment on legacy Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/extension.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/extension.ts b/src/extension.ts index aca65ef..a1973b3 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -1185,7 +1185,9 @@ class CopilotTokenTracker implements vscode.Disposable { if (event.kind === 0) { const modelId = event.v?.selectedModel?.identifier || event.v?.selectedModel?.metadata?.id || - event.v?.inputState?.selectedModel?.metadata?.id; // Legacy fallback + // Legacy fallback: older Copilot Chat session logs stored selectedModel under v.inputState. + // This is kept for backward compatibility so we can still read existing logs from those versions. + event.v?.inputState?.selectedModel?.metadata?.id; if (modelId) { defaultModel = modelId.replace(/^copilot\//, ''); } From f9a2844223ed44ea34c5ccf8b51bafeb07cdbc23 Mon Sep 17 00:00:00 2001 From: Rob Bos Date: Mon, 2 Feb 2026 18:53:59 +0100 Subject: [PATCH 5/6] Add data for last 30 days --- src/extension.ts | 96 +++++++-------------------------------- src/webview/usage/main.ts | 76 ++++++++++++++++++++++++++++--- 2 files changed, 86 insertions(+), 86 deletions(-) diff --git a/src/extension.ts b/src/extension.ts index b2eeabb..6a11ad3 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -132,6 +132,7 @@ interface ModelSwitchingAnalysis { interface UsageAnalysisStats { today: UsageAnalysisPeriod; + last30Days: UsageAnalysisPeriod; month: UsageAnalysisPeriod; lastUpdated: Date; } @@ -221,7 +222,6 @@ class CopilotTokenTracker implements vscode.Disposable { private co2Per1kTokens = 0.2; // gCO2e per 1000 tokens, a rough estimate private co2AbsorptionPerTreePerYear = 21000; // grams of CO2 per tree per year private waterUsagePer1kTokens = 0.3; // liters of water per 1000 tokens, based on data center usage estimates - private _modelDebugFileCount = 0; // Counter for debug logging private _cacheHits = 0; // Counter for cache hits during usage analysis private _cacheMisses = 0; // Counter for cache misses during usage analysis @@ -964,10 +964,10 @@ class CopilotTokenTracker implements vscode.Disposable { private async calculateUsageAnalysisStats(): Promise { const now = new Date(); const todayStart = new Date(now.getFullYear(), now.getMonth(), now.getDate()); + const last30DaysStart = new Date(now.getTime() - 30 * 24 * 60 * 60 * 1000); const monthStart = new Date(now.getFullYear(), now.getMonth(), 1); this.log('🔍 [Usage Analysis] Starting calculation...'); - this._modelDebugFileCount = 0; // Reset debug counter this._cacheHits = 0; // Reset cache hit counter this._cacheMisses = 0; // Reset cache miss counter @@ -1000,6 +1000,7 @@ class CopilotTokenTracker implements vscode.Disposable { }); const todayStats = emptyPeriod(); + const last30DaysStats = emptyPeriod(); const monthStats = emptyPeriod(); try { @@ -1013,22 +1014,23 @@ class CopilotTokenTracker implements vscode.Disposable { try { const fileStats = fs.statSync(sessionFile); - if (fileStats.mtime >= monthStart) { + // Check if file is within the last 30 days (widest range) + if (fileStats.mtime >= last30DaysStart) { const analysis = await this.getUsageAnalysisFromSessionCached(sessionFile, fileStats.mtime.getTime()); - // Add to month stats - monthStats.sessions++; - this.mergeUsageAnalysis(monthStats, analysis); + // Add to last 30 days stats + last30DaysStats.sessions++; + this.mergeUsageAnalysis(last30DaysStats, analysis); + + // Add to month stats if modified this calendar month + if (fileStats.mtime >= monthStart) { + monthStats.sessions++; + this.mergeUsageAnalysis(monthStats, analysis); + } // Add to today stats if modified today if (fileStats.mtime >= todayStart) { todayStats.sessions++; - // Debug: log today session model data - const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; - const modelCount = analysis.modelSwitching?.modelCount || 0; - if (todayStats.sessions <= 10) { - this.log(`[DEBUG Today] Session #${todayStats.sessions}: ${fileName}, modelCount=${modelCount}, models=[${analysis.modelSwitching?.uniqueModels?.join(', ') || 'none'}]`); - } this.mergeUsageAnalysis(todayStats, analysis); } } @@ -1049,14 +1051,9 @@ class CopilotTokenTracker implements vscode.Disposable { // Log cache statistics this.log(`🔍 [Usage Analysis] Cache stats: ${this._cacheHits} hits, ${this._cacheMisses} misses`); - // Log model switching statistics for debugging (always log Today to debug the issue) - this.logModelSwitchingStats('Today', todayStats.modelSwitching); - if (monthStats.modelSwitching.totalSessions > 0) { - this.logModelSwitchingStats('This Month', monthStats.modelSwitching); - } - return { today: todayStats, + last30Days: last30DaysStats, month: monthStats, lastUpdated: now }; @@ -1142,18 +1139,6 @@ class CopilotTokenTracker implements vscode.Disposable { } } - private logModelSwitchingStats(label: string, stats: ModelSwitchingAnalysis): void { - this.log(`[${label}] Model Switching Summary:`); - this.log(` - Total Sessions: ${stats.totalSessions}`); - this.log(` - Avg Models/Session: ${stats.averageModelsPerSession.toFixed(2)}`); - this.log(` - Max Models: ${stats.maxModelsPerSession}`); - this.log(` - Switching Freq: ${stats.switchingFrequency.toFixed(1)}%`); - this.log(` - Standard Models (${stats.standardModels.length}): ${stats.standardModels.join(', ') || 'none'}`); - this.log(` - Premium Models (${stats.premiumModels.length}): ${stats.premiumModels.join(', ') || 'none'}`); - this.log(` - Unknown Models (${stats.unknownModels.length}): ${stats.unknownModels.join(', ') || 'none'}`); - this.log(` - Mixed Tier Sessions: ${stats.mixedTierSessions}`); - } - private async countInteractionsInSession(sessionFile: string): Promise { try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); @@ -1205,11 +1190,6 @@ class CopilotTokenTracker implements vscode.Disposable { private async getModelUsageFromSession(sessionFile: string): Promise { const modelUsage: ModelUsage = {}; const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; - - // Debug: log every file processed (only log first 5 files to avoid noise) - if (!this._modelDebugFileCount) { this._modelDebugFileCount = 0; } - this._modelDebugFileCount++; - const shouldLogDebug = this._modelDebugFileCount <= 5; try { const fileContent = await fs.promises.readFile(sessionFile, 'utf8'); @@ -1217,10 +1197,6 @@ class CopilotTokenTracker implements vscode.Disposable { // Detect JSONL content: either by extension or by content analysis const isJsonlContent = sessionFile.endsWith('.jsonl') || this.isJsonlContent(fileContent); - if (shouldLogDebug) { - this.log(`[DEBUG Model] Processing file ${this._modelDebugFileCount}: ${fileName}, isJsonl: ${isJsonlContent}, size: ${fileContent.length} bytes`); - } - // Handle .jsonl files OR .json files with JSONL content (Copilot CLI format and VS Code incremental format) if (isJsonlContent) { const lines = fileContent.trim().split('\n'); @@ -1323,34 +1299,13 @@ class CopilotTokenTracker implements vscode.Disposable { // Skip malformed lines } } - // Debug: log JSONL model extraction results - if (shouldLogDebug) { - const models = Object.keys(modelUsage); - this.log(`[DEBUG Model] JSONL file ${fileName}: found ${models.length} models: ${models.join(', ') || 'none'}, defaultModel=${defaultModel}`); - } return modelUsage; } // Handle regular .json files const sessionContent = JSON.parse(fileContent); - - // Debug: log JSON structure for first few files - if (shouldLogDebug) { - const hasRequests = Array.isArray(sessionContent.requests); - const requestCount = hasRequests ? sessionContent.requests.length : 0; - const topLevelKeys = Object.keys(sessionContent).slice(0, 5).join(', '); - this.log(`[DEBUG Model] JSON file ${fileName}: hasRequests=${hasRequests}, requestCount=${requestCount}, keys=[${topLevelKeys}]`); - } if (sessionContent.requests && Array.isArray(sessionContent.requests)) { - // Debug: log first request to understand structure - if (sessionContent.requests.length > 0 && shouldLogDebug) { - const firstReq = sessionContent.requests[0]; - const model = this.getModelFromRequest(firstReq); - const reqKeys = Object.keys(firstReq).slice(0, 5).join(', '); - this.log(`[DEBUG Model] First request keys: [${reqKeys}], modelId=${firstReq.modelId}, result.metadata.modelId=${firstReq.result?.metadata?.modelId}, detected=${model}`); - } - for (const request of sessionContent.requests) { // Get model for this request const model = this.getModelFromRequest(request); @@ -1650,12 +1605,6 @@ class CopilotTokenTracker implements vscode.Disposable { const modelUsage = await this.getModelUsageFromSession(sessionFile); const modelCount = modelUsage ? Object.keys(modelUsage).length : 0; - // Debug: log model extraction results for first few files (regardless of success) - const fileName = sessionFile.split(/[/\\]/).pop() || sessionFile; - if (this._modelDebugFileCount <= 10) { - this.log(`[DEBUG analyzeSession] File #${this._modelDebugFileCount}: ${fileName}, models found: ${modelCount}${modelCount > 0 ? ` (${Object.keys(modelUsage).join(', ')})` : ''}`); - } - // Skip if modelUsage is undefined or empty (not a valid session file) if (!modelUsage || modelCount === 0) { return; @@ -2977,19 +2926,6 @@ class CopilotTokenTracker implements vscode.Disposable { // Get usage analysis stats const analysisStats = await this.calculateUsageAnalysisStats(); - - // Log the data being sent to webview for debugging - this.log('[DEBUG] Usage Analysis Data:'); - this.log(JSON.stringify({ - today: { - sessions: analysisStats.today.sessions, - modelSwitching: analysisStats.today.modelSwitching - }, - month: { - sessions: analysisStats.month.sessions, - modelSwitching: analysisStats.month.modelSwitching - } - }, null, 2)); // Create webview panel this.analysisPanel = vscode.window.createWebviewPanel( @@ -3725,7 +3661,6 @@ class CopilotTokenTracker implements vscode.Disposable { } break; case 'openSettings': - this.log('[DEBUG] openSettings message received from diagnostics webview'); await vscode.commands.executeCommand('workbench.action.openSettings', 'copilotTokenTracker.backend'); break; } @@ -4077,6 +4012,7 @@ class CopilotTokenTracker implements vscode.Disposable { const initialData = JSON.stringify({ today: stats.today, + last30Days: stats.last30Days, month: stats.month, lastUpdated: stats.lastUpdated.toISOString() }).replace(/
@@ -331,7 +333,7 @@ function renderLayout(stats: UsageAnalysisStats): void {
🔧Tool Usage
Functions and tools invoked by Copilot during interactions
-
+

📅 Today

@@ -346,6 +348,13 @@ function renderLayout(stats: UsageAnalysisStats): void { ${renderToolsTable(stats.month.toolCalls.byTool, 10)}
+
+

📆 Last 30 Days

+
+
Total Tool Calls: ${stats.last30Days.toolCalls.total}
+ ${renderToolsTable(stats.last30Days.toolCalls.byTool, 10)} +
+
@@ -353,7 +362,7 @@ function renderLayout(stats: UsageAnalysisStats): void {
🔌MCP Tools
Model Context Protocol (MCP) server and tool usage
-
+

📅 Today

@@ -374,6 +383,16 @@ function renderLayout(stats: UsageAnalysisStats): void { ` : '
No MCP tools used yet
'}
+
+

📆 Last 30 Days

+
+
Total MCP Calls: ${stats.last30Days.mcpTools.total}
+ ${stats.last30Days.mcpTools.total > 0 ? ` +
By Server:
${renderToolsTable(stats.last30Days.mcpTools.byServer, 8)}
+
By Tool:
${renderToolsTable(stats.last30Days.mcpTools.byTool, 8)}
+ ` : '
No MCP tools used yet
'} +
+
@@ -381,7 +400,7 @@ function renderLayout(stats: UsageAnalysisStats): void {
🔀Multi-Model Usage
Track model diversity and switching patterns in your conversations
-
+

📅 Today

@@ -427,7 +446,7 @@ function renderLayout(stats: UsageAnalysisStats): void {
-

📊 This Month

+

� This Month

📊 Avg Models per Conversation
@@ -470,6 +489,50 @@ function renderLayout(stats: UsageAnalysisStats): void { ` : ''}
+
+

📆 Last 30 Days

+
+
+
📊 Avg Models per Conversation
+
${stats.last30Days.modelSwitching.averageModelsPerSession.toFixed(1)}
+
+
+
🔄 Switching Frequency
+
${stats.last30Days.modelSwitching.switchingFrequency.toFixed(0)}%
+
Sessions with >1 model
+
+
+
📈 Max Models in Session
+
${stats.last30Days.modelSwitching.maxModelsPerSession || 0}
+
+
+
+
Models by Tier:
+ ${stats.last30Days.modelSwitching.standardModels.length > 0 ? ` +
+ 🔵 Standard: + ${stats.last30Days.modelSwitching.standardModels.join(', ')} +
+ ` : ''} + ${stats.last30Days.modelSwitching.premiumModels.length > 0 ? ` +
+ ⭐ Premium: + ${stats.last30Days.modelSwitching.premiumModels.join(', ')} +
+ ` : ''} + ${stats.last30Days.modelSwitching.unknownModels.length > 0 ? ` +
+ ❓ Unknown: + ${stats.last30Days.modelSwitching.unknownModels.join(', ')} +
+ ` : ''} + ${stats.last30Days.modelSwitching.mixedTierSessions > 0 ? ` +
+ 🔀 Mixed tier sessions: ${stats.last30Days.modelSwitching.mixedTierSessions} +
+ ` : ''} +
+
@@ -478,7 +541,8 @@ function renderLayout(stats: UsageAnalysisStats): void {
📈Sessions Summary
📅 Today Sessions
${stats.today.sessions}
-
📊 Month Sessions
${stats.month.sessions}
+
� This Month Sessions
${stats.month.sessions}
+
📆 Last 30 Days Sessions
${stats.last30Days.sessions}
From 3a3a0c13f20124bca32ec7ee672aeaeb842c9997 Mon Sep 17 00:00:00 2001 From: Rob Bos Date: Mon, 2 Feb 2026 18:58:30 +0100 Subject: [PATCH 6/6] Add data for last 30 days --- src/extension.ts | 64 +++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/src/extension.ts b/src/extension.ts index 6a11ad3..6f82f50 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -194,7 +194,7 @@ interface SessionLogData { class CopilotTokenTracker implements vscode.Disposable { // Cache version - increment this when making changes that require cache invalidation - private static readonly CACHE_VERSION = 7; // Fix JSONL model extraction for Today data (2026-01-30) + private static readonly CACHE_VERSION = 8; // Skip sessions with 0 models in avg calculation (2026-02-02) private diagnosticsPanel?: vscode.WebviewPanel; // Tracks whether the diagnostics panel has already received its session files @@ -1104,38 +1104,42 @@ class CopilotTokenTracker implements vscode.Disposable { }; } - period.modelSwitching.totalSessions++; - period.modelSwitching.modelsPerSession.push(analysis.modelSwitching.modelCount); - - // Track unique models by tier - for (const model of analysis.modelSwitching.tiers.standard) { - if (!period.modelSwitching.standardModels.includes(model)) { - period.modelSwitching.standardModels.push(model); + // Only count sessions with at least 1 model detected for model switching stats + // Sessions without detected models (modelCount === 0) should not affect the average + if (analysis.modelSwitching.modelCount > 0) { + period.modelSwitching.totalSessions++; + period.modelSwitching.modelsPerSession.push(analysis.modelSwitching.modelCount); + + // Track unique models by tier + for (const model of analysis.modelSwitching.tiers.standard) { + if (!period.modelSwitching.standardModels.includes(model)) { + period.modelSwitching.standardModels.push(model); + } } - } - for (const model of analysis.modelSwitching.tiers.premium) { - if (!period.modelSwitching.premiumModels.includes(model)) { - period.modelSwitching.premiumModels.push(model); + for (const model of analysis.modelSwitching.tiers.premium) { + if (!period.modelSwitching.premiumModels.includes(model)) { + period.modelSwitching.premiumModels.push(model); + } } - } - for (const model of analysis.modelSwitching.tiers.unknown) { - if (!period.modelSwitching.unknownModels.includes(model)) { - period.modelSwitching.unknownModels.push(model); + for (const model of analysis.modelSwitching.tiers.unknown) { + if (!period.modelSwitching.unknownModels.includes(model)) { + period.modelSwitching.unknownModels.push(model); + } + } + + // Count sessions with mixed tiers + if (analysis.modelSwitching.hasMixedTiers) { + period.modelSwitching.mixedTierSessions++; + } + + // Calculate aggregate statistics + if (period.modelSwitching.modelsPerSession.length > 0) { + const counts = period.modelSwitching.modelsPerSession; + period.modelSwitching.averageModelsPerSession = counts.reduce((a, b) => a + b, 0) / counts.length; + period.modelSwitching.maxModelsPerSession = Math.max(...counts); + period.modelSwitching.minModelsPerSession = Math.min(...counts); + period.modelSwitching.switchingFrequency = (counts.filter(c => c > 1).length / counts.length) * 100; } - } - - // Count sessions with mixed tiers - if (analysis.modelSwitching.hasMixedTiers) { - period.modelSwitching.mixedTierSessions++; - } - - // Calculate aggregate statistics - if (period.modelSwitching.modelsPerSession.length > 0) { - const counts = period.modelSwitching.modelsPerSession; - period.modelSwitching.averageModelsPerSession = counts.reduce((a, b) => a + b, 0) / counts.length; - period.modelSwitching.maxModelsPerSession = Math.max(...counts); - period.modelSwitching.minModelsPerSession = Math.min(...counts); - period.modelSwitching.switchingFrequency = (counts.filter(c => c > 1).length / counts.length) * 100; } }