diff --git a/src/apps/desktop/capabilities/default.json b/src/apps/desktop/capabilities/default.json index f3e3ec1e..31bacd46 100644 --- a/src/apps/desktop/capabilities/default.json +++ b/src/apps/desktop/capabilities/default.json @@ -51,7 +51,13 @@ "dialog:allow-message", "opener:default", "opener:allow-open-url", - "opener:allow-open-path", + { + "identifier": "opener:allow-open-path", + "allow": [ + { "path": "$APPDATA/**" }, + { "path": "$HOME/**" } + ] + }, "opener:allow-reveal-item-in-dir", "fs:default", "fs:allow-read-file", diff --git a/src/apps/desktop/src/api/insights_api.rs b/src/apps/desktop/src/api/insights_api.rs new file mode 100644 index 00000000..6ddde659 --- /dev/null +++ b/src/apps/desktop/src/api/insights_api.rs @@ -0,0 +1,67 @@ +use bitfun_core::agentic::insights::{InsightsReport, InsightsReportMeta, InsightsService}; +use log::{error, info}; +use serde::Deserialize; + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GenerateInsightsRequest { + pub days: Option, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LoadInsightsReportRequest { + pub path: String, +} + +#[tauri::command] +pub async fn generate_insights( + request: GenerateInsightsRequest, +) -> Result { + let days = request.days.unwrap_or(30); + info!("Generating insights for the last {} days", days); + + InsightsService::generate(days) + .await + .map_err(|e| { + error!("Failed to generate insights: {}", e); + format!("Failed to generate insights: {}", e) + }) +} + +#[tauri::command] +pub async fn get_latest_insights() -> Result, String> { + InsightsService::load_latest_reports().await.map_err(|e| { + error!("Failed to load latest insights: {}", e); + format!("Failed to load latest insights: {}", e) + }) +} + +#[tauri::command] +pub async fn load_insights_report( + request: LoadInsightsReportRequest, +) -> Result { + InsightsService::load_report(&request.path).await.map_err(|e| { + error!("Failed to load insights report: {}", e); + format!("Failed to load insights report: {}", e) + }) +} + +#[tauri::command] +pub async fn has_insights_data( + request: GenerateInsightsRequest, +) -> Result { + let days = request.days.unwrap_or(30); + InsightsService::has_data(days).await.map_err(|e| { + error!("Failed to check insights data: {}", e); + format!("Failed to check insights data: {}", e) + }) +} + +#[tauri::command] +pub async fn cancel_insights_generation() -> Result<(), String> { + InsightsService::cancel().await.map_err(|e| { + error!("Failed to cancel insights generation: {}", e); + e + }) +} diff --git a/src/apps/desktop/src/api/mod.rs b/src/apps/desktop/src/api/mod.rs index 86d300d6..bdc521a5 100644 --- a/src/apps/desktop/src/api/mod.rs +++ b/src/apps/desktop/src/api/mod.rs @@ -34,5 +34,6 @@ pub mod system_api; pub mod terminal_api; pub mod token_usage_api; pub mod tool_api; +pub mod insights_api; pub use app_state::{AppState, AppStatistics, HealthStatus}; diff --git a/src/apps/desktop/src/lib.rs b/src/apps/desktop/src/lib.rs index f73fe00b..0ad2f237 100644 --- a/src/apps/desktop/src/lib.rs +++ b/src/apps/desktop/src/lib.rs @@ -636,6 +636,12 @@ pub async fn run() { // Browser API api::browser_api::browser_webview_eval, api::browser_api::browser_get_url, + // Insights API + api::insights_api::generate_insights, + api::insights_api::get_latest_insights, + api::insights_api::load_insights_report, + api::insights_api::has_insights_data, + api::insights_api::cancel_insights_generation, ]) .run(tauri::generate_context!()); if let Err(e) = run_result { diff --git a/src/crates/core/src/agentic/image_analysis/processor.rs b/src/crates/core/src/agentic/image_analysis/processor.rs index 33fc6276..fd8c8011 100644 --- a/src/crates/core/src/agentic/image_analysis/processor.rs +++ b/src/crates/core/src/agentic/image_analysis/processor.rs @@ -198,7 +198,8 @@ impl ImageAnalyzer { } fn parse_analysis_response(response: &str, image_id: &str) -> ImageAnalysisResult { - let json_str = Self::extract_json_from_markdown(response).unwrap_or(response); + let extracted = crate::util::extract_json_from_ai_response(response); + let json_str = extracted.as_deref().unwrap_or(response); if let Ok(parsed) = serde_json::from_str::(json_str) { return ImageAnalysisResult { @@ -253,27 +254,4 @@ impl ImageAnalyzer { } } - fn extract_json_from_markdown(text: &str) -> Option<&str> { - if let Some(start_idx) = text.find("<|begin_of_box|>") { - let content_start = start_idx + "<|begin_of_box|>".len(); - if let Some(end_idx) = text[content_start..].find("<|end_of_box|>") { - let json_content = &text[content_start..content_start + end_idx].trim(); - debug!("Extracted Zhipu AI box format JSON"); - return Some(json_content); - } - } - - let start_markers = ["```json\n", "```\n"]; - - for marker in &start_markers { - if let Some(start_idx) = text.find(marker) { - let content_start = start_idx + marker.len(); - if let Some(end_idx) = text[content_start..].find("```") { - return Some(&text[content_start..content_start + end_idx].trim()); - } - } - } - - None - } } diff --git a/src/crates/core/src/agentic/insights/cancellation.rs b/src/crates/core/src/agentic/insights/cancellation.rs new file mode 100644 index 00000000..6fd4cef1 --- /dev/null +++ b/src/crates/core/src/agentic/insights/cancellation.rs @@ -0,0 +1,49 @@ +use log::{debug, info, warn}; +use std::sync::Arc; +use tokio::sync::Mutex; +use tokio_util::sync::CancellationToken; + +type Slot = Arc>>; + +static SLOT: std::sync::OnceLock = std::sync::OnceLock::new(); + +fn get_slot() -> Slot { + SLOT.get_or_init(|| Arc::new(Mutex::new(None))).clone() +} + +/// Registers a new insights generation task, cancelling any previous one. +pub async fn register() -> CancellationToken { + let token = CancellationToken::new(); + let arc = get_slot(); + let mut slot = arc.lock().await; + if let Some(old) = slot.take() { + old.cancel(); + debug!("Cancelled previous insights generation"); + } + *slot = Some(token.clone()); + token +} + +/// Cancels the current insights generation task. +pub async fn cancel() -> Result<(), String> { + let arc = get_slot(); + let mut slot = arc.lock().await; + match slot.take() { + Some(token) => { + token.cancel(); + info!("Insights generation cancelled by user"); + Ok(()) + } + None => { + warn!("No insights generation in progress to cancel"); + Err("No insights generation in progress".into()) + } + } +} + +/// Unregisters the current task (call on completion). +pub async fn unregister() { + let arc = get_slot(); + let mut slot = arc.lock().await; + *slot = None; +} diff --git a/src/crates/core/src/agentic/insights/collector.rs b/src/crates/core/src/agentic/insights/collector.rs new file mode 100644 index 00000000..a1324aa7 --- /dev/null +++ b/src/crates/core/src/agentic/insights/collector.rs @@ -0,0 +1,729 @@ +use crate::agentic::core::{Message, MessageContent, MessageRole, ToolCall, ToolResult}; +use crate::agentic::insights::types::*; +use crate::agentic::persistence::PersistenceManager; +use crate::infrastructure::get_path_manager_arc; +use crate::service::session::{DialogTurnData, TurnStatus}; +use crate::service::workspace::get_global_workspace_service; +use crate::util::errors::BitFunResult; +use chrono::{DateTime, Utc}; +use log::{debug, warn}; +use std::collections::{HashMap, HashSet}; +use std::path::{Path, PathBuf}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +const MAX_TRANSCRIPT_CHARS: usize = 16000; +const MAX_TEXT_PER_MESSAGE: usize = 800; +const TAIL_RESERVE_CHARS: usize = 4000; +/// Gaps longer than this between messages are treated as "user away" and excluded +/// from both active duration and response time calculations. +const ACTIVITY_GAP_THRESHOLD_SECS: u64 = 30 * 60; + +pub struct InsightsCollector; + +impl InsightsCollector { + /// Stage 1: Collect session data from PersistenceManager across all workspaces + pub async fn collect(days: u32) -> BitFunResult<(BaseStats, Vec)> { + let path_manager = get_path_manager_arc(); + let pm = PersistenceManager::new(path_manager)?; + let cutoff = SystemTime::now() - Duration::from_secs(days as u64 * 86400); + + let workspace_paths = Self::collect_workspace_paths().await; + + let mut transcripts = Vec::new(); + let mut base_stats = BaseStats::default(); + let mut seen_session_ids = HashSet::new(); + + for ws_path in &workspace_paths { + let sessions = match pm.list_sessions(ws_path).await { + Ok(s) => s, + Err(e) => { + warn!("Skipping workspace {}: {}", ws_path.display(), e); + continue; + } + }; + + for summary in &sessions { + if summary.last_activity_at < cutoff { + continue; + } + + if !seen_session_ids.insert(summary.session_id.clone()) { + continue; + } + + let session = match pm.load_session(ws_path, &summary.session_id).await { + Ok(s) => s, + Err(e) => { + warn!( + "Skipping session {}: load failed: {}", + summary.session_id, e + ); + continue; + } + }; + + let turns = pm + .load_session_turns(ws_path, &summary.session_id) + .await + .unwrap_or_default(); + + let messages = + match Self::load_session_messages_with_turns( + &pm, ws_path, &summary.session_id, &turns, + ).await { + Ok(m) if !m.is_empty() => m, + Ok(_) => { + debug!( + "Skipping session {}: no messages found", + summary.session_id + ); + continue; + } + Err(e) => { + warn!( + "Skipping session {}: load messages failed: {}", + summary.session_id, e + ); + continue; + } + }; + + let mut transcript = + Self::build_transcript(&summary.session_id, &session, &messages); + transcript.workspace_path = Some(ws_path.to_string_lossy().to_string()); + Self::accumulate_stats(&mut base_stats, &session, &messages); + accumulate_code_stats_from_turns(&mut base_stats, &turns); + transcripts.push(transcript); + } + } + + base_stats.total_sessions = transcripts.len() as u32; + + if let Some(earliest) = transcripts.iter().min_by_key(|t| &t.created_at) { + base_stats.first_session_at = Some(earliest.created_at.clone()); + } + if let Some(latest) = transcripts.iter().max_by_key(|t| &t.created_at) { + base_stats.last_session_at = Some(latest.created_at.clone()); + } + + // Compute response time buckets from raw intervals + if !base_stats.response_times_raw.is_empty() { + base_stats.response_time_buckets = + bucket_response_times(&base_stats.response_times_raw); + let (median, avg) = + compute_response_time_stats(&base_stats.response_times_raw); + base_stats.median_response_time_secs = Some(median); + base_stats.avg_response_time_secs = Some(avg); + } + + debug!( + "Collected {} sessions with {} total messages", + transcripts.len(), + base_stats.total_messages + ); + + Ok((base_stats, transcripts)) + } + + /// Collect all known workspace paths that have session data + async fn collect_workspace_paths() -> Vec { + let mut paths = Vec::new(); + + if let Some(ws_service) = get_global_workspace_service() { + let workspaces = ws_service.list_workspaces().await; + for ws in workspaces { + if ws.root_path.join(".bitfun").join("sessions").exists() { + paths.push(ws.root_path); + } + } + } + + paths + } + + /// Load messages for a session, trying sources in priority order: + /// 1. Latest context snapshot (most complete, includes compression) + /// 2. Rebuild from pre-loaded turn data + async fn load_session_messages_with_turns( + pm: &PersistenceManager, + workspace_path: &Path, + session_id: &str, + turns: &[DialogTurnData], + ) -> BitFunResult> { + if let Ok(Some((_turn_index, messages))) = + pm.load_latest_turn_context_snapshot(workspace_path, session_id) + .await + { + if !messages.is_empty() { + return Ok(messages); + } + } + + if !turns.is_empty() { + return Ok(rebuild_messages_from_turns(turns)); + } + + Ok(vec![]) + } + + fn build_transcript( + session_id: &str, + session: &crate::agentic::core::Session, + messages: &[Message], + ) -> SessionTranscript { + let mut all_parts: Vec = Vec::new(); + let mut tool_names: Vec = Vec::new(); + let mut has_errors = false; + + for msg in messages { + match &msg.content { + MessageContent::Text(text) => { + let role_tag = match msg.role { + MessageRole::User => "[User]", + MessageRole::Assistant => "[Assistant]", + MessageRole::System => continue, + MessageRole::Tool => continue, + }; + let truncated = truncate_text(text, MAX_TEXT_PER_MESSAGE); + all_parts.push(format!("{}: {}", role_tag, truncated)); + } + MessageContent::Mixed { + text, tool_calls, .. + } => { + if !text.is_empty() { + let truncated = truncate_text(text, MAX_TEXT_PER_MESSAGE); + all_parts.push(format!("[Assistant]: {}", truncated)); + } + for tc in tool_calls { + if !tool_names.contains(&tc.tool_name) { + tool_names.push(tc.tool_name.clone()); + } + all_parts.push(format!("[Tool: {}]", tc.tool_name)); + } + } + MessageContent::ToolResult { + tool_name, + is_error, + .. + } => { + if *is_error { + has_errors = true; + all_parts.push(format!("[Tool Error: {}]", tool_name)); + } + } + MessageContent::Multimodal { text, .. } => { + if !text.is_empty() { + let truncated = truncate_text(text, MAX_TEXT_PER_MESSAGE); + all_parts.push(format!("[User]: {} [+images]", truncated)); + } + } + } + } + + let transcript = smart_truncate_parts(&all_parts, MAX_TRANSCRIPT_CHARS, TAIL_RESERVE_CHARS); + + let duration_minutes = Self::compute_active_duration(messages) / 60; + + let created_at = system_time_to_iso(session.created_at); + + SessionTranscript { + session_id: session_id.to_string(), + agent_type: session.agent_type.clone(), + session_name: session.session_name.clone(), + workspace_path: None, + duration_minutes, + message_count: messages.len() as u32, + turn_count: session.dialog_turn_ids.len() as u32, + created_at, + transcript, + tool_names, + has_errors, + } + } + + fn accumulate_stats( + base_stats: &mut BaseStats, + session: &crate::agentic::core::Session, + messages: &[Message], + ) { + base_stats.total_messages += messages.len() as u32; + base_stats.total_turns += session.dialog_turn_ids.len() as u32; + + let active_secs = Self::compute_active_duration(messages); + base_stats.total_duration_minutes += active_secs / 60; + + *base_stats + .agent_types + .entry(session.agent_type.clone()) + .or_insert(0) += 1; + + let mut last_assistant_time: Option = None; + for msg in messages { + if msg.role == MessageRole::User { + if let Ok(dur) = msg.timestamp.duration_since(UNIX_EPOCH) { + let dt = DateTime::::from(UNIX_EPOCH + dur); + let hour = dt.format("%H").to_string().parse::().unwrap_or(0); + *base_stats.hour_counts.entry(hour).or_insert(0) += 1; + } + } + + match &msg.content { + MessageContent::Mixed { tool_calls, .. } => { + for tc in tool_calls { + *base_stats + .tool_usage + .entry(tc.tool_name.clone()) + .or_insert(0) += 1; + } + } + MessageContent::ToolResult { + tool_name, + is_error, + .. + } => { + if *is_error { + *base_stats + .tool_errors + .entry(tool_name.clone()) + .or_insert(0) += 1; + } + } + _ => {} + } + + match msg.role { + MessageRole::Assistant => { + last_assistant_time = Some(msg.timestamp); + } + MessageRole::User => { + if let Some(prev) = last_assistant_time { + if let Ok(duration) = msg.timestamp.duration_since(prev) { + let secs = duration.as_secs(); + if secs >= 2 && secs <= ACTIVITY_GAP_THRESHOLD_SECS { + base_stats.response_times_raw.push(secs as f64); + } + } + } + } + _ => {} + } + } + } + + /// Compute active usage duration by summing adjacent message gaps, + /// capping each gap at `ACTIVITY_GAP_THRESHOLD_SECS`. + fn compute_active_duration(messages: &[Message]) -> u64 { + if messages.len() < 2 { + return 0; + } + let mut total_secs: u64 = 0; + for pair in messages.windows(2) { + if let Ok(gap) = pair[1].timestamp.duration_since(pair[0].timestamp) { + let gap_secs = gap.as_secs(); + if gap_secs <= ACTIVITY_GAP_THRESHOLD_SECS { + total_secs += gap_secs; + } + } + } + total_secs + } + + /// Stage 3: Aggregate facets into InsightsAggregate + pub fn aggregate(base_stats: &BaseStats, facets: &[SessionFacet]) -> InsightsAggregate { + let mut goals: HashMap = HashMap::new(); + let mut outcomes: HashMap = HashMap::new(); + let mut satisfaction: HashMap = HashMap::new(); + let mut friction: HashMap = HashMap::new(); + let mut success: HashMap = HashMap::new(); + let mut languages: HashMap = HashMap::new(); + let mut session_types: HashMap = HashMap::new(); + let mut session_summaries = Vec::new(); + let mut friction_details = Vec::new(); + let mut user_instructions = Vec::new(); + + for facet in facets { + for (k, v) in &facet.goal_categories { + *goals.entry(k.clone()).or_insert(0) += v; + } + *outcomes.entry(facet.outcome.clone()).or_insert(0) += 1; + for (k, v) in &facet.user_satisfaction_counts { + *satisfaction.entry(k.clone()).or_insert(0) += v; + } + for (k, v) in &facet.friction_counts { + *friction.entry(k.clone()).or_insert(0) += v; + } + if !facet.primary_success.is_empty() && facet.primary_success != "none" { + *success + .entry(facet.primary_success.clone()) + .or_insert(0) += 1; + } + for lang in &facet.languages_used { + *languages.entry(lang.clone()).or_insert(0) += 1; + } + *session_types + .entry(facet.session_type.clone()) + .or_insert(0) += 1; + + if !facet.brief_summary.is_empty() { + session_summaries.push(facet.brief_summary.clone()); + } + if !facet.friction_detail.is_empty() { + friction_details.push(facet.friction_detail.clone()); + } + for instr in &facet.user_instructions { + if !user_instructions.contains(instr) { + user_instructions.push(instr.clone()); + } + } + } + + let mut top_tools: Vec<(String, u32)> = base_stats.tool_usage.iter().map(|(k, v)| (k.clone(), *v)).collect(); + top_tools.sort_by(|a, b| b.1.cmp(&a.1)); + top_tools.truncate(15); + + let mut top_goals: Vec<(String, u32)> = goals.iter().map(|(k, v)| (k.clone(), *v)).collect(); + top_goals.sort_by(|a, b| b.1.cmp(&a.1)); + top_goals.truncate(10); + + let hours = base_stats.total_duration_minutes as f32 / 60.0; + let date_range = DateRange { + start: base_stats + .first_session_at + .clone() + .unwrap_or_default(), + end: base_stats + .last_session_at + .clone() + .unwrap_or_default(), + }; + + let days_covered = compute_days_covered(&date_range); + let msgs_per_day = if days_covered > 0 { + base_stats.total_messages as f32 / days_covered as f32 + } else { + base_stats.total_messages as f32 + }; + + InsightsAggregate { + sessions: base_stats.total_sessions, + analyzed: facets.len() as u32, + date_range, + messages: base_stats.total_messages, + hours, + top_tools, + top_goals, + outcomes, + satisfaction, + friction, + success, + languages, + session_summaries, + friction_details, + user_instructions, + session_types, + tool_errors: base_stats.tool_errors.clone(), + hour_counts: base_stats.hour_counts.clone(), + agent_types: base_stats.agent_types.clone(), + msgs_per_day, + response_time_buckets: base_stats.response_time_buckets.clone(), + median_response_time_secs: base_stats.median_response_time_secs, + avg_response_time_secs: base_stats.avg_response_time_secs, + total_lines_added: base_stats.total_lines_added, + total_lines_removed: base_stats.total_lines_removed, + total_files_modified: base_stats.total_files_modified, + } + } +} + +/// Rebuild `Vec` from turn data, including tool call and tool result information +/// needed by `build_transcript` and `accumulate_stats`. +/// Preserves timestamps from turn data and marks cancelled turns with `[Cancelled]`. +fn rebuild_messages_from_turns(turns: &[DialogTurnData]) -> Vec { + let mut messages = Vec::new(); + + for turn in turns { + let user_ts = UNIX_EPOCH + Duration::from_millis(turn.start_time); + let mut user_msg = Message::user(turn.user_message.content.clone()); + user_msg.timestamp = user_ts; + messages.push(user_msg); + + for (round_idx, round) in turn.model_rounds.iter().enumerate() { + let assistant_text = round + .text_items + .iter() + .map(|item| item.content.clone()) + .filter(|c| !c.trim().is_empty()) + .collect::>() + .join("\n\n"); + + let tool_calls: Vec = round + .tool_items + .iter() + .map(|ti| ToolCall { + tool_id: ti.tool_call.id.clone(), + tool_name: ti.tool_name.clone(), + arguments: ti.tool_call.input.clone(), + is_error: false, + }) + .collect(); + + let round_ts = if let Some(end_time) = turn.end_time { + let start = turn.start_time; + let total_rounds = turn.model_rounds.len().max(1) as u64; + let step = (end_time.saturating_sub(start)) / (total_rounds + 1); + UNIX_EPOCH + Duration::from_millis(start + step * (round_idx as u64 + 1)) + } else { + UNIX_EPOCH + Duration::from_millis(turn.start_time + (round_idx as u64 + 1) * 1000) + }; + + if !tool_calls.is_empty() { + let mut msg = + Message::assistant_with_tools(assistant_text.clone(), tool_calls); + msg.timestamp = round_ts; + messages.push(msg); + } else if !assistant_text.trim().is_empty() { + let mut msg = Message::assistant(assistant_text); + msg.timestamp = round_ts; + messages.push(msg); + } + + for ti in &round.tool_items { + if let Some(result_data) = &ti.tool_result { + let mut msg = Message::tool_result(ToolResult { + tool_id: ti.tool_call.id.clone(), + tool_name: ti.tool_name.clone(), + result: result_data.result.clone(), + result_for_assistant: None, + is_error: !result_data.success, + duration_ms: result_data.duration_ms, + }); + msg.timestamp = round_ts; + messages.push(msg); + } + } + } + + if turn.status == TurnStatus::Cancelled { + let cancel_ts = turn + .end_time + .map(|t| UNIX_EPOCH + Duration::from_millis(t)) + .unwrap_or(user_ts); + let mut cancel_msg = Message::assistant("[Cancelled by user]".to_string()); + cancel_msg.timestamp = cancel_ts; + messages.push(cancel_msg); + } + } + + messages +} + +/// Keep head + tail of transcript parts, inserting an omission marker in the middle +/// when total length exceeds `max_chars`. This preserves the beginning (context/goals) +/// and end (final outcome) of a session. +fn smart_truncate_parts(parts: &[String], max_chars: usize, tail_reserve: usize) -> String { + let total: usize = parts.iter().map(|p| p.len() + 1).sum(); + if total <= max_chars { + return parts.join("\n"); + } + + let head_budget = max_chars.saturating_sub(tail_reserve); + let mut head_parts = Vec::new(); + let mut head_used = 0; + let mut head_end_idx = 0; + + for (i, part) in parts.iter().enumerate() { + let cost = part.len() + 1; + if head_used + cost > head_budget { + break; + } + head_parts.push(part.as_str()); + head_used += cost; + head_end_idx = i + 1; + } + + let mut tail_parts = Vec::new(); + let mut tail_used = 0; + let mut tail_start_idx = parts.len(); + + for (i, part) in parts.iter().enumerate().rev() { + if i < head_end_idx { + break; + } + let cost = part.len() + 1; + if tail_used + cost > tail_reserve { + break; + } + tail_parts.push(part.as_str()); + tail_used += cost; + tail_start_idx = i; + } + tail_parts.reverse(); + + let omitted = if tail_start_idx > head_end_idx { + tail_start_idx - head_end_idx + } else { + 0 + }; + + let mut result = head_parts.join("\n"); + if omitted > 0 { + result.push_str(&format!("\n\n[... {} messages omitted ...]\n\n", omitted)); + } + result.push_str(&tail_parts.join("\n")); + result +} + +fn truncate_text(text: &str, max_len: usize) -> String { + let trimmed = text.trim(); + if trimmed.len() <= max_len { + trimmed.to_string() + } else { + let mut end = max_len.min(trimmed.len()); + while end > 0 && !trimmed.is_char_boundary(end) { + end -= 1; + } + format!("{}...", &trimmed[..end]) + } +} + +fn system_time_to_iso(t: SystemTime) -> String { + match t.duration_since(UNIX_EPOCH) { + Ok(dur) => { + let dt = DateTime::::from(UNIX_EPOCH + dur); + dt.to_rfc3339() + } + Err(_) => "unknown".to_string(), + } +} + +fn bucket_response_times(raw: &[f64]) -> HashMap { + let buckets: &[(&str, f64, f64)] = &[ + ("2-10s", 2.0, 10.0), + ("10-30s", 10.0, 30.0), + ("30s-1m", 30.0, 60.0), + ("1-2m", 60.0, 120.0), + ("2-5m", 120.0, 300.0), + ("5-15m", 300.0, 900.0), + (">15m", 900.0, f64::MAX), + ]; + + let mut result: HashMap = HashMap::new(); + for &val in raw { + for &(label, lo, hi) in buckets { + if val >= lo && val < hi { + *result.entry(label.to_string()).or_insert(0) += 1; + break; + } + } + } + result +} + +fn compute_response_time_stats(raw: &[f64]) -> (f64, f64) { + if raw.is_empty() { + return (0.0, 0.0); + } + + let avg = raw.iter().sum::() / raw.len() as f64; + + let mut sorted = raw.to_vec(); + sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); + let median = if sorted.len() % 2 == 0 { + let mid = sorted.len() / 2; + (sorted[mid - 1] + sorted[mid]) / 2.0 + } else { + sorted[sorted.len() / 2] + }; + + (median, avg) +} + +fn compute_days_covered(range: &DateRange) -> u32 { + let parse = |s: &str| -> Option> { + DateTime::parse_from_rfc3339(s).ok().map(|d| d.with_timezone(&Utc)) + }; + + match (parse(&range.start), parse(&range.end)) { + (Some(start), Some(end)) => { + let diff = end.signed_duration_since(start); + let days = diff.num_days().unsigned_abs() as u32; + days.max(1) + } + _ => 1, + } +} + +/// Extract code change statistics from persistent turn data. +/// +/// For Edit tool results: uses `old_end_line - start_line + 1` as lines removed +/// and `new_end_line - start_line + 1` as lines added, falling back to counting +/// newlines in `old_string`/`new_string`. +/// +/// For Write tool: counts newlines in the written content as lines added. +fn accumulate_code_stats_from_turns(base_stats: &mut BaseStats, turns: &[DialogTurnData]) { + let mut modified_files: HashSet = HashSet::new(); + + for turn in turns { + for round in &turn.model_rounds { + for ti in &round.tool_items { + let Some(ref result_data) = ti.tool_result else { + continue; + }; + if !result_data.success { + continue; + } + + match ti.tool_name.as_str() { + "Edit" => { + let result = &result_data.result; + + if let Some(fp) = result.get("file_path").and_then(|v| v.as_str()) { + modified_files.insert(fp.to_string()); + } + + let (lines_removed, lines_added) = + if let (Some(start), Some(old_end), Some(new_end)) = ( + result.get("start_line").and_then(|v| v.as_u64()), + result.get("old_end_line").and_then(|v| v.as_u64()), + result.get("new_end_line").and_then(|v| v.as_u64()), + ) { + let removed = old_end.saturating_sub(start) + 1; + let added = new_end.saturating_sub(start) + 1; + (removed as usize, added as usize) + } else { + let old_lines = result + .get("old_string") + .and_then(|v| v.as_str()) + .map(|s| s.lines().count().max(1)) + .unwrap_or(0); + let new_lines = result + .get("new_string") + .and_then(|v| v.as_str()) + .map(|s| s.lines().count().max(1)) + .unwrap_or(0); + (old_lines, new_lines) + }; + + base_stats.total_lines_removed += lines_removed; + base_stats.total_lines_added += lines_added; + } + "Write" => { + let result = &result_data.result; + + if let Some(fp) = result.get("file_path").and_then(|v| v.as_str()) { + modified_files.insert(fp.to_string()); + } + + let input = &ti.tool_call.input; + if let Some(content) = input.get("content").and_then(|v| v.as_str()) { + base_stats.total_lines_added += content.lines().count().max(1); + } + } + _ => {} + } + } + } + } + + base_stats.total_files_modified += modified_files.len(); +} diff --git a/src/crates/core/src/agentic/insights/html.rs b/src/crates/core/src/agentic/insights/html.rs new file mode 100644 index 00000000..9eab26f2 --- /dev/null +++ b/src/crates/core/src/agentic/insights/html.rs @@ -0,0 +1,1309 @@ +use crate::agentic::insights::types::*; + +/// All user-visible labels in the HTML report, supporting i18n. +pub struct HtmlLabels { + pub title: &'static str, + pub subtitle_template: &'static str, // "{msgs} messages across {sessions} sessions ({analyzed} analyzed) | {start} to {end}" + pub at_a_glance: &'static str, + pub whats_working: &'static str, + pub whats_hindering: &'static str, + pub quick_wins: &'static str, + pub looking_ahead: &'static str, + pub nav_work: &'static str, + pub nav_usage: &'static str, + pub nav_wins: &'static str, + pub nav_friction: &'static str, + pub nav_suggestions: &'static str, + pub nav_horizon: &'static str, + pub stat_sessions: &'static str, + pub stat_messages: &'static str, + pub stat_hours: &'static str, + pub stat_days: &'static str, + pub stat_msgs_per_day: &'static str, + pub stat_median_response: &'static str, + pub stat_avg_response: &'static str, + pub section_work: &'static str, + pub section_usage: &'static str, + pub section_wins: &'static str, + pub section_friction: &'static str, + pub section_suggestions: &'static str, + pub section_horizon: &'static str, + pub chart_goals: &'static str, + pub chart_tools: &'static str, + pub chart_languages: &'static str, + pub chart_session_types: &'static str, + pub chart_tool_errors: &'static str, + pub chart_agent_types: &'static str, + pub chart_response_time: &'static str, + pub chart_time_of_day: &'static str, + pub chart_what_helped: &'static str, + pub chart_outcomes: &'static str, + pub chart_friction_types: &'static str, + pub chart_satisfaction: &'static str, + pub time_morning: &'static str, + pub time_afternoon: &'static str, + pub time_evening: &'static str, + pub time_night: &'static str, + pub sessions_suffix: &'static str, + pub no_data: &'static str, + pub no_project_areas: &'static str, + pub no_interaction_style: &'static str, + pub no_big_wins: &'static str, + pub no_friction: &'static str, + pub no_horizon: &'static str, + pub md_additions: &'static str, + pub copy_all_checked: &'static str, + pub features_to_try: &'static str, + pub usage_patterns: &'static str, + pub try_this_prompt: &'static str, + pub copied: &'static str, + pub median_label: &'static str, + pub average_label: &'static str, + pub stat_lines: &'static str, + pub stat_files: &'static str, +} + +impl HtmlLabels { + pub fn for_locale(locale: &str) -> Self { + if locale.starts_with("zh") { + Self::zh() + } else { + Self::en() + } + } + + pub fn en() -> Self { + HtmlLabels { + title: "BitFun Insights", + subtitle_template: "{msgs} messages across {sessions} sessions ({analyzed} analyzed) | {start} to {end}", + at_a_glance: "At a Glance", + whats_working: "What's working:", + whats_hindering: "What's hindering you:", + quick_wins: "Quick wins to try:", + looking_ahead: "Looking ahead:", + nav_work: "What You Work On", + nav_usage: "How You Use BitFun", + nav_wins: "Impressive Things", + nav_friction: "Where Things Go Wrong", + nav_suggestions: "Suggestions", + nav_horizon: "On the Horizon", + stat_sessions: "Sessions", + stat_messages: "Messages", + stat_hours: "Hours", + stat_days: "Days", + stat_msgs_per_day: "Msgs/Day", + stat_median_response: "Median Response", + stat_avg_response: "Avg Response", + section_work: "What You Work On", + section_usage: "How You Use BitFun", + section_wins: "Impressive Things You Did", + section_friction: "Where Things Go Wrong", + section_suggestions: "Suggestions", + section_horizon: "On the Horizon", + chart_goals: "What You Wanted", + chart_tools: "Top Tools Used", + chart_languages: "Languages", + chart_session_types: "Session Types", + chart_tool_errors: "Tool Errors Encountered", + chart_agent_types: "Agent Types", + chart_response_time: "User Response Time Distribution", + chart_time_of_day: "Messages by Time of Day", + chart_what_helped: "What Helped Most", + chart_outcomes: "Outcomes", + chart_friction_types: "Primary Friction Types", + chart_satisfaction: "Satisfaction (Inferred)", + time_morning: "Morning (6-12)", + time_afternoon: "Afternoon (12-18)", + time_evening: "Evening (18-24)", + time_night: "Night (0-6)", + sessions_suffix: "sessions", + no_data: "No data", + no_project_areas: "No project areas identified.", + no_interaction_style: "No interaction style data available.", + no_big_wins: "No big wins identified yet.", + no_friction: "No significant friction points found.", + no_horizon: "No horizon workflows identified.", + md_additions: "BITFUN.md Additions", + copy_all_checked: "Copy All Checked", + features_to_try: "Features to Try", + usage_patterns: "Usage Patterns", + try_this_prompt: "Try this prompt:", + copied: "Copied!", + median_label: "Median", + average_label: "Average", + stat_lines: "Lines", + stat_files: "Files", + } + } + + pub fn zh() -> Self { + HtmlLabels { + title: "BitFun 洞察", + subtitle_template: "{msgs} 条消息,{sessions} 个会话({analyzed} 个已分析)| {start} 至 {end}", + at_a_glance: "概览", + whats_working: "做得好的:", + whats_hindering: "遇到的阻碍:", + quick_wins: "快速提升:", + looking_ahead: "展望未来:", + nav_work: "工作领域", + nav_usage: "使用方式", + nav_wins: "亮眼成果", + nav_friction: "问题所在", + nav_suggestions: "建议", + nav_horizon: "未来展望", + stat_sessions: "会话", + stat_messages: "消息", + stat_hours: "小时", + stat_days: "天", + stat_msgs_per_day: "消息/天", + stat_median_response: "中位响应", + stat_avg_response: "平均响应", + section_work: "工作领域", + section_usage: "你如何使用 BitFun", + section_wins: "亮眼成果", + section_friction: "问题所在", + section_suggestions: "建议", + section_horizon: "未来展望", + chart_goals: "你的需求", + chart_tools: "常用工具", + chart_languages: "编程语言", + chart_session_types: "会话类型", + chart_tool_errors: "工具错误统计", + chart_agent_types: "智能体类型", + chart_response_time: "用户响应时间分布", + chart_time_of_day: "按时段分布", + chart_what_helped: "最有帮助的方面", + chart_outcomes: "结果分布", + chart_friction_types: "主要摩擦类型", + chart_satisfaction: "满意度(推断)", + time_morning: "上午 (6-12)", + time_afternoon: "下午 (12-18)", + time_evening: "晚上 (18-24)", + time_night: "凌晨 (0-6)", + sessions_suffix: "个会话", + no_data: "暂无数据", + no_project_areas: "未识别到项目领域。", + no_interaction_style: "暂无交互风格数据。", + no_big_wins: "暂未识别到亮眼成果。", + no_friction: "未发现明显摩擦点。", + no_horizon: "暂未识别到未来工作流。", + md_additions: "BITFUN.md 补充", + copy_all_checked: "复制选中项", + features_to_try: "推荐功能", + usage_patterns: "使用模式", + try_this_prompt: "试试这个提示:", + copied: "已复制!", + median_label: "中位数", + average_label: "平均值", + stat_lines: "行", + stat_files: "文件", + } + } +} + +pub fn generate_html(report: &InsightsReport, locale: &str) -> String { + let l = HtmlLabels::for_locale(locale); + + let subtitle = l.subtitle_template + .replace("{msgs}", &report.total_messages.to_string()) + .replace("{sessions}", &report.total_sessions.to_string()) + .replace("{analyzed}", &report.analyzed_sessions.to_string()) + .replace("{start}", &report.date_range.start[..10.min(report.date_range.start.len())]) + .replace("{end}", &report.date_range.end[..10.min(report.date_range.end.len())]); + + let at_a_glance = render_at_a_glance(&report.at_a_glance, &l); + let nav_toc = render_nav_toc(&l); + let stats_row = render_stats_row(report, &l); + let project_areas = render_project_areas(&report.project_areas, &l); + let basic_charts = render_basic_charts(&report.stats, &l); + let interaction_style = render_interaction_style(&report.interaction_style, &l); + let usage_charts = render_usage_charts(&report.stats, &l); + let wins_intro_html = if report.wins_intro.is_empty() { + String::new() + } else { + format!( + r#"

{}

"#, + markdown_inline(&report.wins_intro) + ) + }; + let big_wins = render_big_wins(&report.big_wins, &l); + let outcome_charts = render_outcome_charts(&report.stats, &l); + let friction_intro_html = if report.friction_intro.is_empty() { + String::new() + } else { + format!( + r#"

{}

"#, + markdown_inline(&report.friction_intro) + ) + }; + let friction = render_friction_categories(&report.friction_categories, &l); + let friction_charts = render_friction_charts(&report.stats, &l); + let suggestions = render_suggestions(&report.suggestions, &l); + let horizon = render_horizon(&report.horizon_intro, &report.on_the_horizon, &l); + let fun_ending = render_fun_ending(&report.fun_ending); + + let js_with_labels = JS_SCRIPT + .replace("__COPIED__", l.copied) + .replace("__COPY_ALL_CHECKED__", l.copy_all_checked); + + format!( + r#" + + + + {page_title} + + + + +
+

{page_title}

+

{subtitle}

+ + {at_a_glance} + {nav_toc} + {stats_row} + +

{section_work}

+ {project_areas} + + {basic_charts} + +

{section_usage}

+ {interaction_style} + + {usage_charts} + +

{section_wins}

+ {wins_intro} + {big_wins} + + {outcome_charts} + +

{section_friction}

+ {friction_intro} + {friction} + + {friction_charts} + +

{section_suggestions}

+ {suggestions} + +

{section_horizon}

+ {horizon} + + {fun_ending} +
+ + +"#, + CSS = CSS_STYLES, + JS = js_with_labels, + page_title = html_escape(l.title), + subtitle = html_escape(&subtitle), + section_work = html_escape(l.section_work), + section_usage = html_escape(l.section_usage), + section_wins = html_escape(l.section_wins), + section_friction = html_escape(l.section_friction), + section_suggestions = html_escape(l.section_suggestions), + section_horizon = html_escape(l.section_horizon), + at_a_glance = at_a_glance, + nav_toc = nav_toc, + stats_row = stats_row, + project_areas = project_areas, + basic_charts = basic_charts, + interaction_style = interaction_style, + usage_charts = usage_charts, + wins_intro = wins_intro_html, + big_wins = big_wins, + outcome_charts = outcome_charts, + friction_intro = friction_intro_html, + friction = friction, + friction_charts = friction_charts, + suggestions = suggestions, + horizon = horizon, + fun_ending = fun_ending, + ) +} + +fn render_at_a_glance(aag: &AtAGlance, l: &HtmlLabels) -> String { + format!( + r##"
+
{title}
+
+
{working} {working_text} {nav_wins} →
+
{hindering} {hindering_text} {nav_friction} →
+
{quick} {quick_text} {nav_suggestions} →
+
{ahead} {ahead_text} {nav_horizon} →
+
+
"##, + title = html_escape(l.at_a_glance), + working = html_escape(l.whats_working), + working_text = markdown_inline(&aag.whats_working), + hindering = html_escape(l.whats_hindering), + hindering_text = markdown_inline(&aag.whats_hindering), + quick = html_escape(l.quick_wins), + quick_text = markdown_inline(&aag.quick_wins), + ahead = html_escape(l.looking_ahead), + ahead_text = markdown_inline(&aag.looking_ahead), + nav_wins = html_escape(l.section_wins), + nav_friction = html_escape(l.section_friction), + nav_suggestions = html_escape(l.section_suggestions), + nav_horizon = html_escape(l.section_horizon), + ) +} + +fn render_nav_toc(l: &HtmlLabels) -> String { + format!( + r##""##, + html_escape(l.nav_work), + html_escape(l.nav_usage), + html_escape(l.nav_wins), + html_escape(l.nav_friction), + html_escape(l.nav_suggestions), + html_escape(l.nav_horizon), + ) +} + +fn render_stats_row(report: &InsightsReport, l: &HtmlLabels) -> String { + let response_time_stats = match ( + report.stats.median_response_time_secs, + report.stats.avg_response_time_secs, + ) { + (Some(median), Some(avg)) => format!( + r#"
{}
{}
+
{}
{}
"#, + format_duration_short(median), + html_escape(l.stat_median_response), + format_duration_short(avg), + html_escape(l.stat_avg_response), + ), + _ => String::new(), + }; + + let code_stats = if report.stats.total_lines_added > 0 || report.stats.total_lines_removed > 0 + { + format!( + r#"
+{}/-{}
{}
+
{}
{}
"#, + format_number(report.stats.total_lines_added), + format_number(report.stats.total_lines_removed), + html_escape(l.stat_lines), + format_number(report.stats.total_files_modified), + html_escape(l.stat_files), + ) + } else { + String::new() + }; + + format!( + r#"
+{code_stats} +
{sessions}
{l_sessions}
+
{messages}
{l_messages}
+
{hours:.1}
{l_hours}
+
{days}
{l_days}
+
{mpd:.1}
{l_mpd}
+{response_time_stats} +
"#, + sessions = report.total_sessions, + messages = report.total_messages, + hours = report.stats.total_hours, + days = report.days_covered, + mpd = report.stats.msgs_per_day, + l_sessions = html_escape(l.stat_sessions), + l_messages = html_escape(l.stat_messages), + l_hours = html_escape(l.stat_hours), + l_days = html_escape(l.stat_days), + l_mpd = html_escape(l.stat_msgs_per_day), + ) +} + +fn format_duration_short(secs: f64) -> String { + if secs < 60.0 { + format!("{:.0}s", secs) + } else if secs < 3600.0 { + format!("{:.1}m", secs / 60.0) + } else { + format!("{:.1}h", secs / 3600.0) + } +} + +fn format_number(n: usize) -> String { + if n >= 1_000_000 { + format!("{:.1}M", n as f64 / 1_000_000.0) + } else if n >= 1_000 { + format!("{:.1}K", n as f64 / 1_000.0) + } else { + n.to_string() + } +} + +fn render_project_areas(areas: &[ProjectArea], l: &HtmlLabels) -> String { + if areas.is_empty() { + return format!(r#"
{}
"#, html_escape(l.no_project_areas)); + } + + let items: Vec = areas + .iter() + .map(|a| { + format!( + r#"
+
+ {name} + ~{count} {suffix} +
+
{desc}
+
"#, + name = html_escape(&a.name), + count = a.session_count, + suffix = html_escape(l.sessions_suffix), + desc = markdown_inline(&a.description), + ) + }) + .collect(); + + format!( + r#"
{}
"#, + items.join("\n") + ) +} + +// ============ Charts split by section ============ + +fn render_basic_charts(stats: &InsightsStats, l: &HtmlLabels) -> String { + let goals_chart = render_bar_chart(l.chart_goals, &stats.top_goals, "#2563eb", 6); + let tools_chart = render_bar_chart(l.chart_tools, &stats.top_tools, "#0891b2", 6); + + let mut lang_items: Vec<(String, u32)> = stats.languages.iter().map(|(k, v)| (k.clone(), *v)).collect(); + lang_items.sort_by(|a, b| b.1.cmp(&a.1)); + lang_items.truncate(6); + let lang_chart = render_bar_chart(l.chart_languages, &lang_items, "#10b981", 6); + + let mut type_items: Vec<(String, u32)> = stats.session_types.iter().map(|(k, v)| (k.clone(), *v)).collect(); + type_items.sort_by(|a, b| b.1.cmp(&a.1)); + type_items.truncate(6); + let types_chart = render_bar_chart(l.chart_session_types, &type_items, "#8b5cf6", 6); + + let row1 = wrap_charts_row(&goals_chart, &tools_chart); + let row2 = wrap_charts_row(&lang_chart, &types_chart); + format!("{}{}", row1, row2) +} + +fn render_usage_charts(stats: &InsightsStats, l: &HtmlLabels) -> String { + let mut html = String::new(); + + if !stats.response_time_buckets.is_empty() { + let response_time_chart = render_response_time_chart(&stats.response_time_buckets, stats, l); + html.push_str(&response_time_chart); + } + + let time_of_day_chart = render_time_of_day_chart(&stats.hour_counts, l); + + let mut tool_error_items: Vec<(String, u32)> = stats.tool_errors.iter().map(|(k, v)| (k.clone(), *v)).collect(); + tool_error_items.sort_by(|a, b| b.1.cmp(&a.1)); + tool_error_items.truncate(6); + let tool_errors_chart = render_bar_chart(l.chart_tool_errors, &tool_error_items, "#dc2626", 6); + + let mut agent_types_chart = String::new(); + if !stats.agent_types.is_empty() { + let mut agent_type_items: Vec<(String, u32)> = stats.agent_types.iter().map(|(k, v)| (k.clone(), *v)).collect(); + agent_type_items.sort_by(|a, b| b.1.cmp(&a.1)); + agent_type_items.truncate(6); + agent_types_chart = render_bar_chart(l.chart_agent_types, &agent_type_items, "#f97316", 6); + } + + html.push_str(&wrap_charts_row(&time_of_day_chart, &tool_errors_chart)); + if !agent_types_chart.is_empty() { + html.push_str(&wrap_charts_row(&agent_types_chart, "")); + } + + html +} + +fn render_outcome_charts(stats: &InsightsStats, l: &HtmlLabels) -> String { + let has_success = !stats.success.is_empty(); + let has_outcomes = !stats.outcomes.is_empty(); + + if !has_success && !has_outcomes { + return String::new(); + } + + let mut success_items: Vec<(String, u32)> = stats.success.iter().map(|(k, v)| (k.clone(), *v)).collect(); + success_items.sort_by(|a, b| b.1.cmp(&a.1)); + success_items.truncate(6); + let success_chart = render_bar_chart(l.chart_what_helped, &success_items, "#16a34a", 6); + + let mut outcome_items: Vec<(String, u32)> = stats.outcomes.iter().map(|(k, v)| (k.clone(), *v)).collect(); + outcome_items.sort_by(|a, b| b.1.cmp(&a.1)); + outcome_items.truncate(6); + let outcomes_chart = render_bar_chart(l.chart_outcomes, &outcome_items, "#8b5cf6", 6); + + wrap_charts_row(&success_chart, &outcomes_chart) +} + +fn render_friction_charts(stats: &InsightsStats, l: &HtmlLabels) -> String { + let has_friction = !stats.friction.is_empty(); + let has_satisfaction = !stats.satisfaction.is_empty(); + + if !has_friction && !has_satisfaction { + return String::new(); + } + + let mut friction_items: Vec<(String, u32)> = stats.friction.iter().map(|(k, v)| (k.clone(), *v)).collect(); + friction_items.sort_by(|a, b| b.1.cmp(&a.1)); + friction_items.truncate(6); + let friction_chart = render_bar_chart(l.chart_friction_types, &friction_items, "#dc2626", 6); + + let mut satisfaction_items: Vec<(String, u32)> = stats.satisfaction.iter().map(|(k, v)| (k.clone(), *v)).collect(); + satisfaction_items.sort_by(|a, b| b.1.cmp(&a.1)); + satisfaction_items.truncate(6); + let satisfaction_chart = render_bar_chart(l.chart_satisfaction, &satisfaction_items, "#eab308", 6); + + wrap_charts_row(&friction_chart, &satisfaction_chart) +} + +/// Wraps one or two chart cards into a layout row. +/// - Two non-empty cards → 2-column grid `.charts-row`. +/// - One non-empty card → standalone full-width (no grid wrapper, just margin). +/// - Both empty → empty string. +fn wrap_charts_row(card_a: &str, card_b: &str) -> String { + match (card_a.is_empty(), card_b.is_empty()) { + (true, true) => String::new(), + (false, true) => format!(r#"
{}
"#, card_a), + (true, false) => format!(r#"
{}
"#, card_b), + (false, false) => format!(r#"
{}{}
"#, card_a, card_b), + } +} + +// ============ Chart helpers ============ + +fn render_response_time_chart(buckets: &std::collections::HashMap, stats: &InsightsStats, l: &HtmlLabels) -> String { + let bucket_order = ["2-10s", "10-30s", "30s-1m", "1-2m", "2-5m", "5-15m", ">15m"]; + let ordered_items: Vec<(String, u32)> = bucket_order + .iter() + .filter_map(|&label| { + buckets.get(label).and_then(|&v| if v > 0 { Some((label.to_string(), v)) } else { None }) + }) + .collect(); + + if ordered_items.is_empty() { + return String::new(); + } + + let max_val = ordered_items.iter().map(|(_, v)| *v).max().unwrap_or(1) as f64; + let bars: String = ordered_items.iter().map(|(label, value)| { + let pct = (*value as f64 / max_val) * 100.0; + format!( + r#"
{}
{}
"#, + html_escape(label), pct, value, + ) + }).collect(); + + let footer = match (stats.median_response_time_secs, stats.avg_response_time_secs) { + (Some(median), Some(avg)) => format!( + r#"
{}: {:.1}s • {}: {:.1}s
"#, + html_escape(l.median_label), median, html_escape(l.average_label), avg, + ), + _ => String::new(), + }; + + format!( + r#"
{}
{}{}
"#, + html_escape(l.chart_response_time), bars, footer, + ) +} + +fn render_time_of_day_chart(hour_counts: &std::collections::HashMap, l: &HtmlLabels) -> String { + if hour_counts.is_empty() { + return format!( + r#"
{}
{}
"#, + html_escape(l.chart_time_of_day), html_escape(l.no_data), + ); + } + + let hour_json: Vec = (0..24) + .map(|h| format!("\"{}\":{}", h, hour_counts.get(&h).copied().unwrap_or(0))) + .collect(); + + format!( + r#"
+
+ {title} + +
+
+ +
"#, + title = html_escape(l.chart_time_of_day), + hour_data = hour_json.join(","), + lm = l.time_morning, + la = l.time_afternoon, + le = l.time_evening, + ln = l.time_night, + ) +} + +fn render_bar_chart(title: &str, items: &[(String, u32)], color: &str, max_items: usize) -> String { + let non_zero: Vec<&(String, u32)> = items.iter().filter(|(_, v)| *v > 0).collect(); + + if non_zero.is_empty() { + return String::new(); + } + + let max_val = non_zero.iter().map(|(_, v)| *v).max().unwrap_or(1) as f64; + let bars: Vec = non_zero + .iter() + .take(max_items) + .map(|(label, value)| { + let pct = (*value as f64 / max_val) * 100.0; + let display_label = label + .replace('_', " ") + .split_whitespace() + .map(|w| { + let mut c = w.chars(); + match c.next() { + None => String::new(), + Some(f) => f.to_uppercase().to_string() + c.as_str(), + } + }) + .collect::>() + .join(" "); + format!( + r#"
+
{}
+
+
{}
+
"#, + html_escape(&display_label), + pct, + color, + value, + ) + }) + .collect(); + + format!( + r#"
{}
{}
"#, + html_escape(title), + bars.join("\n"), + ) +} + +// ============ Content sections ============ + +fn render_interaction_style(style: &InteractionStyle, l: &HtmlLabels) -> String { + if style.narrative.is_empty() && style.key_patterns.is_empty() { + return format!(r#"
{}
"#, html_escape(l.no_interaction_style)); + } + + let patterns_html = if style.key_patterns.is_empty() { + String::new() + } else { + let items: Vec = style + .key_patterns + .iter() + .map(|p| format!(r#"
{}
"#, markdown_inline(p))) + .collect(); + items.join("\n") + }; + + format!( + r#"
+

{}

+ {} +
"#, + markdown_inline(&style.narrative), + patterns_html, + ) +} + +fn render_big_wins(wins: &[BigWin], l: &HtmlLabels) -> String { + if wins.is_empty() { + return format!(r#"
{}
"#, html_escape(l.no_big_wins)); + } + + let items: Vec = wins + .iter() + .map(|w| { + let impact_html = if w.impact.is_empty() { + String::new() + } else { + format!( + r#"
{}
"#, + markdown_inline(&w.impact) + ) + }; + format!( + r#"
+
{}
+
{}
+ {} +
"#, + html_escape(&w.title), + markdown_inline(&w.description), + impact_html, + ) + }) + .collect(); + + format!(r#"
{}
"#, items.join("\n")) +} + +fn render_friction_categories(categories: &[FrictionCategory], l: &HtmlLabels) -> String { + if categories.is_empty() { + return format!(r#"
{}
"#, html_escape(l.no_friction)); + } + + let items: Vec = categories + .iter() + .map(|f| { + let examples_html = if f.examples.is_empty() { + String::new() + } else { + let lis: Vec = f + .examples + .iter() + .map(|e| format!("
  • {}
  • ", markdown_inline(e))) + .collect(); + format!( + r#"
      {}
    "#, + lis.join("\n") + ) + }; + + let suggestion_html = if f.suggestion.is_empty() { + String::new() + } else { + format!( + r#"
    {}
    "#, + markdown_inline(&f.suggestion) + ) + }; + + format!( + r#"
    +
    {}
    +
    {}
    + {} + {} +
    "#, + html_escape(&f.category), + markdown_inline(&f.description), + examples_html, + suggestion_html, + ) + }) + .collect(); + + format!( + r#"
    {}
    "#, + items.join("\n") + ) +} + +fn render_suggestions(suggestions: &InsightsSuggestions, l: &HtmlLabels) -> String { + let mut sections = Vec::new(); + + if !suggestions.bitfun_md_additions.is_empty() { + let items: Vec = suggestions + .bitfun_md_additions + .iter() + .enumerate() + .map(|(i, md)| { + format!( + r#"
    + +
    {}
    + +
    {}
    +
    "#, + html_escape(&md.content), + js_escape(&md.content), + html_escape(&md.rationale), + i = i, + ) + }) + .collect(); + + sections.push(format!( + r#"
    +

    {md_title}

    +
    + +
    + {items} +
    "#, + md_title = html_escape(l.md_additions), + copy_all = html_escape(l.copy_all_checked), + items = items.join("\n"), + )); + } + + if !suggestions.features_to_try.is_empty() { + let items: Vec = suggestions + .features_to_try + .iter() + .map(|f| { + let code_html = if f.example_usage.is_empty() { + String::new() + } else { + format!( + r#"
    + {} + +
    "#, + html_escape(&f.example_usage), + js_escape(&f.example_usage), + ) + }; + + format!( + r#"
    +
    {}
    +
    {}
    +
    {}
    + {} +
    "#, + html_escape(&f.feature), + markdown_inline(&f.description), + markdown_inline(&f.benefit), + code_html, + ) + }) + .collect(); + + sections.push(format!( + r#"

    {}

    +
    {}
    "#, + html_escape(l.features_to_try), + items.join("\n") + )); + } + + if !suggestions.usage_patterns.is_empty() { + let items: Vec = suggestions + .usage_patterns + .iter() + .map(|p| { + let detail_html = if p.detail.is_empty() { + String::new() + } else { + format!( + r#"
    {}
    "#, + markdown_inline(&p.detail) + ) + }; + + let prompt_html = if p.suggested_prompt.is_empty() { + String::new() + } else { + format!( + r#"
    +
    {}
    + {} + +
    "#, + html_escape(l.try_this_prompt), + html_escape(&p.suggested_prompt), + js_escape(&p.suggested_prompt), + ) + }; + + format!( + r#"
    +
    {}
    +
    {}
    + {} + {} +
    "#, + html_escape(&p.pattern), + markdown_inline(&p.description), + detail_html, + prompt_html, + ) + }) + .collect(); + + sections.push(format!( + r#"

    {}

    +
    {}
    "#, + html_escape(l.usage_patterns), + items.join("\n") + )); + } + + sections.join("\n") +} + +fn render_horizon(intro: &str, workflows: &[HorizonWorkflow], l: &HtmlLabels) -> String { + if workflows.is_empty() { + return format!(r#"
    {}
    "#, html_escape(l.no_horizon)); + } + + let intro_html = if intro.is_empty() { + String::new() + } else { + format!( + r#"

    {}

    "#, + markdown_inline(intro) + ) + }; + + let items: Vec = workflows + .iter() + .map(|h| { + let how_to_try_html = if h.how_to_try.is_empty() { + String::new() + } else { + format!( + r#"
    {}
    "#, + markdown_inline(&h.how_to_try) + ) + }; + + let prompt_html = if h.copyable_prompt.is_empty() { + String::new() + } else { + let escaped = html_escape(&h.copyable_prompt); + let js_escaped = h.copyable_prompt.replace('\\', "\\\\").replace('\'', "\\'").replace('\n', "\\n"); + format!( + r#"
    +
    {try_prompt}
    +
    + {code} + +
    +
    "#, + try_prompt = html_escape(l.try_this_prompt), + code = escaped, + js_code = js_escaped, + ) + }; + + format!( + r#"
    +
    {}
    +
    {}
    + {} + {} +
    "#, + html_escape(&h.title), + markdown_inline(&h.whats_possible), + how_to_try_html, + prompt_html, + ) + }) + .collect(); + + format!( + r#"{}
    {}
    "#, + intro_html, + items.join("\n") + ) +} + +fn render_fun_ending(ending: &Option) -> String { + match ending { + Some(fe) => format!( + r#"
    +
    {}
    +
    {}
    +
    "#, + html_escape(&fe.headline), + markdown_inline(&fe.detail), + ), + None => String::new(), + } +} + +// ============ Utilities ============ + +fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) + .replace('\'', "'") +} + +/// Convert simple markdown inline formatting to HTML. +/// Handles **bold** and *italic* after html_escape. +fn markdown_inline(s: &str) -> String { + let escaped = html_escape(s); + let mut result = String::with_capacity(escaped.len() + 64); + let chars: Vec = escaped.chars().collect(); + let len = chars.len(); + let mut i = 0; + + while i < len { + if i + 1 < len && chars[i] == '*' && chars[i + 1] == '*' { + if let Some(end) = find_closing_double_star(&chars, i + 2) { + result.push_str(""); + for &c in &chars[i + 2..end] { + result.push(c); + } + result.push_str(""); + i = end + 2; + continue; + } + } + if chars[i] == '*' && (i + 1 < len && chars[i + 1] != '*') { + if let Some(end) = find_closing_single_star(&chars, i + 1) { + result.push_str(""); + for &c in &chars[i + 1..end] { + result.push(c); + } + result.push_str(""); + i = end + 1; + continue; + } + } + result.push(chars[i]); + i += 1; + } + + result +} + +fn find_closing_double_star(chars: &[char], start: usize) -> Option { + let len = chars.len(); + let mut i = start; + while i + 1 < len { + if chars[i] == '*' && chars[i + 1] == '*' { + if i > start { + return Some(i); + } + } + i += 1; + } + None +} + +fn find_closing_single_star(chars: &[char], start: usize) -> Option { + let len = chars.len(); + let mut i = start; + while i < len { + if chars[i] == '*' { + if i + 1 >= len || chars[i + 1] != '*' { + if i > start { + return Some(i); + } + } + } + i += 1; + } + None +} + +fn js_escape(s: &str) -> String { + s.replace('\\', "\\\\") + .replace('\'', "\\'") + .replace('\n', "\\n") + .replace('\r', "") +} + +const CSS_STYLES: &str = r#" + * { box-sizing: border-box; margin: 0; padding: 0; } + body { font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; background: #f8fafc; color: #334155; line-height: 1.65; padding: 48px 24px; } + .container { max-width: 800px; margin: 0 auto; } + h1 { font-size: 32px; font-weight: 700; color: #0f172a; margin-bottom: 8px; } + h2 { font-size: 20px; font-weight: 600; color: #0f172a; margin-top: 48px; margin-bottom: 16px; } + h3 { font-size: 16px; font-weight: 600; color: #0f172a; margin-top: 24px; margin-bottom: 12px; } + .subtitle { color: #64748b; font-size: 15px; margin-bottom: 32px; } + .nav-toc { display: flex; flex-wrap: wrap; gap: 8px; margin: 24px 0 32px 0; padding: 16px; background: white; border-radius: 8px; border: 1px solid #e2e8f0; } + .nav-toc a { font-size: 12px; color: #64748b; text-decoration: none; padding: 6px 12px; border-radius: 6px; background: #f1f5f9; transition: all 0.15s; } + .nav-toc a:hover { background: #e2e8f0; color: #334155; } + .stats-row { display: flex; gap: 24px; margin-bottom: 40px; padding: 20px 0; border-top: 1px solid #e2e8f0; border-bottom: 1px solid #e2e8f0; flex-wrap: wrap; } + .stat { text-align: center; } + .stat-value { font-size: 24px; font-weight: 700; color: #0f172a; } + .stat-label { font-size: 11px; color: #64748b; text-transform: uppercase; } + .at-a-glance { background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%); border: 1px solid #f59e0b; border-radius: 12px; padding: 20px 24px; margin-bottom: 32px; } + .glance-title { font-size: 16px; font-weight: 700; color: #92400e; margin-bottom: 16px; } + .glance-sections { display: flex; flex-direction: column; gap: 12px; } + .glance-section { font-size: 14px; color: #78350f; line-height: 1.6; } + .glance-section strong { color: #92400e; } + .see-more { color: #b45309; text-decoration: none; font-size: 13px; white-space: nowrap; } + .see-more:hover { text-decoration: underline; } + .project-areas { display: flex; flex-direction: column; gap: 12px; margin-bottom: 32px; } + .project-area { background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 16px; } + .area-header { display: flex; justify-content: space-between; align-items: center; margin-bottom: 8px; } + .area-name { font-weight: 600; font-size: 15px; color: #0f172a; } + .area-count { font-size: 12px; color: #64748b; background: #f1f5f9; padding: 2px 8px; border-radius: 4px; } + .area-desc { font-size: 14px; color: #475569; line-height: 1.5; } + .narrative { background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 20px; margin-bottom: 24px; } + .narrative p { margin-bottom: 12px; font-size: 14px; color: #475569; line-height: 1.7; } + .key-insight { background: #f0fdf4; border: 1px solid #bbf7d0; border-radius: 8px; padding: 12px 16px; margin-top: 12px; font-size: 14px; color: #166534; } + .section-intro { font-size: 14px; color: #475569; line-height: 1.6; margin-bottom: 16px; } + .big-wins { display: flex; flex-direction: column; gap: 12px; margin-bottom: 24px; } + .big-win { background: #f0fdf4; border: 1px solid #bbf7d0; border-radius: 8px; padding: 16px; } + .big-win-title { font-weight: 600; font-size: 15px; color: #166534; margin-bottom: 8px; } + .big-win-desc { font-size: 14px; color: #15803d; line-height: 1.5; } + .big-win-impact { font-size: 12px; color: #166534; opacity: 0.8; font-style: italic; margin-top: 6px; } + .friction-categories { display: flex; flex-direction: column; gap: 16px; margin-bottom: 24px; } + .friction-category { background: #fef2f2; border: 1px solid #fca5a5; border-radius: 8px; padding: 16px; } + .friction-title { font-weight: 600; font-size: 15px; color: #991b1b; margin-bottom: 6px; } + .friction-desc { font-size: 13px; color: #7f1d1d; margin-bottom: 10px; } + .friction-examples { margin: 0 0 0 20px; font-size: 13px; color: #334155; } + .friction-examples li { margin-bottom: 4px; } + .claude-md-section { background: #eff6ff; border: 1px solid #bfdbfe; border-radius: 8px; padding: 16px; margin-bottom: 20px; } + .claude-md-section h3 { font-size: 14px; font-weight: 600; color: #1e40af; margin: 0 0 12px 0; } + .claude-md-actions { margin-bottom: 12px; padding-bottom: 12px; border-bottom: 1px solid #dbeafe; } + .copy-all-btn { background: #2563eb; color: white; border: none; border-radius: 4px; padding: 6px 12px; font-size: 12px; cursor: pointer; font-weight: 500; transition: all 0.2s; } + .copy-all-btn:hover { background: #1d4ed8; } + .copy-all-btn.copied { background: #16a34a; } + .claude-md-item { display: flex; flex-wrap: wrap; align-items: flex-start; gap: 8px; padding: 10px 0; border-bottom: 1px solid #dbeafe; } + .claude-md-item:last-child { border-bottom: none; } + .cmd-checkbox { margin-top: 2px; } + .cmd-code { background: white; padding: 8px 12px; border-radius: 4px; font-size: 12px; color: #1e40af; border: 1px solid #bfdbfe; font-family: monospace; display: block; white-space: pre-wrap; word-break: break-word; flex: 1; } + .cmd-why { font-size: 12px; color: #64748b; width: 100%; padding-left: 24px; margin-top: 4px; } + .features-section, .patterns-section { display: flex; flex-direction: column; gap: 12px; margin: 16px 0; } + .feature-card { background: #f0fdf4; border: 1px solid #86efac; border-radius: 8px; padding: 16px; } + .pattern-card { background: #f0f9ff; border: 1px solid #7dd3fc; border-radius: 8px; padding: 16px; } + .feature-title, .pattern-title { font-weight: 600; font-size: 15px; color: #0f172a; margin-bottom: 6px; } + .feature-oneliner { font-size: 14px; color: #475569; margin-bottom: 8px; } + .pattern-summary { font-size: 14px; color: #475569; margin-bottom: 8px; } + .feature-why { font-size: 13px; color: #334155; line-height: 1.5; } + .feature-code { background: #f8fafc; padding: 12px; border-radius: 6px; margin-top: 12px; border: 1px solid #e2e8f0; display: flex; align-items: flex-start; gap: 8px; } + .feature-code code { flex: 1; font-family: monospace; font-size: 12px; color: #334155; white-space: pre-wrap; } + .pattern-prompt { background: #f8fafc; padding: 12px; border-radius: 6px; margin-top: 12px; border: 1px solid #e2e8f0; } + .pattern-prompt code { font-family: monospace; font-size: 12px; color: #334155; display: block; white-space: pre-wrap; margin-bottom: 8px; } + .prompt-label { font-size: 11px; font-weight: 600; text-transform: uppercase; color: #64748b; margin-bottom: 6px; } + .copy-btn { background: #e2e8f0; border: none; border-radius: 4px; padding: 4px 8px; font-size: 11px; cursor: pointer; color: #475569; flex-shrink: 0; } + .copy-btn:hover { background: #cbd5e1; } + .charts-row { display: grid; grid-template-columns: 1fr 1fr; gap: 24px; margin: 24px 0; } + .charts-row-single { grid-template-columns: 1fr; } + .chart-card { background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 16px; } + .chart-title { font-size: 12px; font-weight: 600; color: #64748b; text-transform: uppercase; margin-bottom: 12px; } + .bar-row { display: flex; align-items: center; margin-bottom: 6px; } + .bar-label { width: 100px; font-size: 11px; color: #475569; flex-shrink: 0; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } + .bar-track { flex: 1; height: 6px; background: #f1f5f9; border-radius: 3px; margin: 0 8px; } + .bar-fill { height: 100%; border-radius: 3px; } + .bar-value { width: 28px; font-size: 11px; font-weight: 500; color: #64748b; text-align: right; } + .empty { color: #94a3b8; font-size: 13px; padding: 12px 0; } + .tz-select { font-size: 11px; padding: 2px 6px; border: 1px solid #e2e8f0; border-radius: 4px; background: #f8fafc; color: #475569; cursor: pointer; } + .horizon-section { display: flex; flex-direction: column; gap: 16px; } + .horizon-card { background: linear-gradient(135deg, #faf5ff 0%, #f5f3ff 100%); border: 1px solid #c4b5fd; border-radius: 8px; padding: 16px; } + .horizon-title { font-weight: 600; font-size: 15px; color: #5b21b6; margin-bottom: 8px; } + .horizon-possible { font-size: 14px; color: #334155; margin-bottom: 10px; line-height: 1.5; } + .horizon-steps { margin: 0 0 0 20px; font-size: 13px; color: #6b21a8; } + .horizon-steps li { margin-bottom: 4px; } + .horizon-tip { font-size: 13px; color: #5b21b6; background: #ede9fe; border-radius: 6px; padding: 8px 12px; margin-top: 10px; line-height: 1.5; } + .horizon-prompt { margin-top: 10px; } + .fun-ending { background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%); border: 1px solid #fbbf24; border-radius: 12px; padding: 24px; margin-top: 40px; text-align: center; } + .fun-headline { font-size: 18px; font-weight: 600; color: #78350f; margin-bottom: 8px; } + .fun-detail { font-size: 14px; color: #92400e; } + @media (max-width: 640px) { .charts-row { grid-template-columns: 1fr; } .stats-row { justify-content: center; } } +"#; + +const JS_SCRIPT: &str = r#" + function copyText(btn, text) { + navigator.clipboard.writeText(text).then(function() { + var orig = btn.textContent; + btn.textContent = ' __COPIED__ '; + btn.style.background = '#16a34a'; + btn.style.color = 'white'; + setTimeout(function() { + btn.textContent = orig; + btn.style.background = ''; + btn.style.color = ''; + }, 2000); + }); + } + + function copyAllChecked(btn) { + var section = btn.closest('.claude-md-section'); + var items = section.querySelectorAll('.claude-md-item'); + var texts = []; + items.forEach(function(item) { + var cb = item.querySelector('.cmd-checkbox'); + if (cb && cb.checked) { + var code = item.querySelector('.cmd-code'); + if (code) texts.push(code.textContent.trim()); + } + }); + if (texts.length === 0) return; + navigator.clipboard.writeText(texts.join('\n\n')).then(function() { + btn.textContent = '__COPIED__'; + btn.classList.add('copied'); + setTimeout(function() { + btn.textContent = '__COPY_ALL_CHECKED__'; + btn.classList.remove('copied'); + }, 2000); + }); + } + + (function initTimezoneSelector() { + var sel = document.getElementById('tz-selector'); + if (!sel || !window.__hourCountsUTC) return; + var common = [ + 'UTC', + 'America/New_York','America/Chicago','America/Denver','America/Los_Angeles', + 'Europe/London','Europe/Paris','Europe/Berlin', + 'Asia/Tokyo','Asia/Shanghai','Asia/Kolkata','Asia/Singapore', + 'Australia/Sydney','Pacific/Auckland' + ]; + var localTz = Intl.DateTimeFormat().resolvedOptions().timeZone; + if (common.indexOf(localTz) === -1) common.unshift(localTz); + common.forEach(function(tz) { + var opt = document.createElement('option'); + opt.value = tz; + opt.textContent = tz.replace(/_/g,' '); + if (tz === localTz) opt.selected = true; + sel.appendChild(opt); + }); + updateTimeChart(); + })(); + + function updateTimeChart() { + var sel = document.getElementById('tz-selector'); + var container = document.getElementById('time-bars'); + if (!sel || !container || !window.__hourCountsUTC) return; + var tz = sel.value; + var shifted = {}; + for (var h = 0; h < 24; h++) { + var utcCount = window.__hourCountsUTC[h] || 0; + if (utcCount === 0) continue; + var d = new Date(Date.UTC(2024,0,1,h,0,0)); + var localH = parseInt(d.toLocaleString('en-US',{hour:'numeric',hour12:false,timeZone:tz})); + shifted[localH] = (shifted[localH]||0) + utcCount; + } + var labels = window.__timeLabels; + var periods = [ + {label:labels.morning, hours:[6,7,8,9,10,11]}, + {label:labels.afternoon, hours:[12,13,14,15,16,17]}, + {label:labels.evening, hours:[18,19,20,21,22,23]}, + {label:labels.night, hours:[0,1,2,3,4,5]} + ]; + var maxVal = 0; + var data = periods.map(function(p) { + var count = 0; + p.hours.forEach(function(h){count += shifted[h]||0;}); + if (count > maxVal) maxVal = count; + return {label:p.label, count:count}; + }); + var html = ''; + data.forEach(function(d) { + var pct = maxVal > 0 ? (d.count/maxVal*100) : 0; + html += '
    '+d.label+'' + +'
    ' + +''+d.count+'
    '; + }); + container.innerHTML = html; + } +"#; diff --git a/src/crates/core/src/agentic/insights/mod.rs b/src/crates/core/src/agentic/insights/mod.rs new file mode 100644 index 00000000..fe08b18c --- /dev/null +++ b/src/crates/core/src/agentic/insights/mod.rs @@ -0,0 +1,8 @@ +pub mod cancellation; +pub mod collector; +pub mod html; +pub mod service; +pub mod types; + +pub use service::InsightsService; +pub use types::*; diff --git a/src/crates/core/src/agentic/insights/prompts/areas.md b/src/crates/core/src/agentic/insights/prompts/areas.md new file mode 100644 index 00000000..b254e7ae --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/areas.md @@ -0,0 +1,16 @@ +Analyze this BitFun usage data and identify project areas. + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "areas": [ + {"name": "Area name", "session_count": N, "description": "2-3 sentences about what was worked on and how BitFun was used."} + ] +} + +Include 4-5 areas. Skip internal BitFun operations. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} diff --git a/src/crates/core/src/agentic/insights/prompts/at_a_glance.md b/src/crates/core/src/agentic/insights/prompts/at_a_glance.md new file mode 100644 index 00000000..2e7872a0 --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/at_a_glance.md @@ -0,0 +1,38 @@ +You're writing an "At a Glance" summary for a BitFun usage insights report. The goal is to help users understand their usage and improve how they use AI-assisted coding, especially as models improve. + +You have access to the full analysis results below. Synthesize them into a concise 4-part summary. + +Use this 4-part structure: + +1. **What's working** - What is the user's unique style of interacting with the AI and what are some impactful things they've done? You can include one or two details, but keep it high level since things might not be fresh in the user's memory. Don't be fluffy or overly complimentary. Also, don't focus on the tool calls they use. + +2. **What's hindering you** - Cover both (a) AI's fault (misunderstandings, wrong approaches, bugs) and (b) user-side friction (not providing enough context, environment issues -- ideally more general than just one project) in a single paragraph. Be honest but constructive. + +3. **Quick wins to try** - Specific BitFun features they could try, or a workflow technique if you think it's really compelling. Reference the suggestions analysis below. (Avoid stuff like "Ask AI to confirm before taking actions" or "Type out more context up front" which are less compelling.) + +4. **Looking ahead** - As we move to much more capable models over the next 3-6 months, what should they prepare for? What workflows that seem impossible now will become possible? + +Keep each section to 2-3 not-too-long sentences. Don't overwhelm the user. Don't mention specific numerical stats or underlined_categories from the session data below. Use a coaching tone. + +RESPOND WITH ONLY A VALID JSON OBJECT. Every value MUST be a plain string (never a nested object or array): +{ + "whats_working": "plain string, not an object", + "whats_hindering": "plain string combining both AI-side and user-side points, not an object", + "quick_wins": "plain string, not an object", + "looking_ahead": "plain string, not an object" +} + +SESSION DATA: +{aggregate_json} + +## Project Areas +{areas} + +## Suggestions +{suggestions} + +## Big Wins & Friction Analysis +{wins_and_friction} + +## Interaction Style +{interaction_style} diff --git a/src/crates/core/src/agentic/insights/prompts/facet_extraction.md b/src/crates/core/src/agentic/insights/prompts/facet_extraction.md new file mode 100644 index 00000000..faa2a0c0 --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/facet_extraction.md @@ -0,0 +1,45 @@ +Analyze this BitFun session and extract structured facets. + +CRITICAL GUIDELINES: + +1. **goal_categories**: Count ONLY what the USER explicitly asked for. + - DO NOT count AI's autonomous codebase exploration + - DO NOT count work AI decided to do on its own + - ONLY count when user says "can you...", "please...", "I need...", "let's..." + +2. **user_satisfaction_counts**: Base ONLY on explicit user signals. + - "Yay!", "great!", "perfect!" → happy + - "thanks", "looks good", "that works" → satisfied + - "ok, now let's..." (continuing without complaint) → likely_satisfied + - "that's not right", "try again" → dissatisfied + - "this is broken", "I give up" → frustrated + +3. **friction_counts**: Be specific about what went wrong. + - misunderstood_request: AI interpreted incorrectly + - wrong_approach: Right goal, wrong solution method + - buggy_code: Code didn't work correctly + - user_rejected_action: User said no/stop to a tool call + - excessive_changes: Over-engineered or changed too much + - rate_limit: Hit usage limit + - context_lost: AI lost track of conversation context + +4. If very short or just warmup, use warmup_minimal for goal_category + +SESSION: +{session_transcript} + +RESPOND WITH ONLY A VALID JSON OBJECT matching this schema: +{ + "underlying_goal": "What the user fundamentally wanted to achieve", + "goal_categories": {"category_name": count, ...}, + "outcome": "fully_achieved|mostly_achieved|partially_achieved|not_achieved|unclear_from_transcript", + "user_satisfaction_counts": {"level": count, ...}, + "claude_helpfulness": "unhelpful|slightly_helpful|moderately_helpful|very_helpful|essential", + "session_type": "single_task|multi_task|iterative_refinement|exploration|quick_question", + "friction_counts": {"friction_type": count, ...}, + "friction_detail": "One sentence describing friction or empty", + "primary_success": "fast_accurate_search|correct_code_edits|good_explanations|proactive_help|multi_file_changes|good_debugging", + "brief_summary": "One sentence: what user wanted and whether they got it", + "languages_used": ["programing_language1", "programing_language2"], + "user_instructions": ["Any explicit instructions user gave to AI about how to behave"] +} diff --git a/src/crates/core/src/agentic/insights/prompts/friction.md b/src/crates/core/src/agentic/insights/prompts/friction.md new file mode 100644 index 00000000..0efbd01e --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/friction.md @@ -0,0 +1,28 @@ +Analyze this BitFun usage data and identify where friction occurs. Use second person ("you"). + +Write a brief **intro** (1 sentence summarizing the overall friction situation). + +Then identify 2-3 **friction_categories** — major friction themes. For each: +- Split clearly between (a) AI's fault (misunderstandings, wrong approaches, bugs) and (b) user-side friction +- Provide specific examples from the session data +- Suggest concrete improvements +- Include the approximate count of sessions affected + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "intro": "1 sentence summarizing friction", + "friction_categories": [ + {"category": "Concrete category name", "count": N, "description": "1-2 sentences explaining this category. Use 'you' not 'the user'.", "examples": ["Specific example with consequence", "Another example"], "suggestion": "Concrete suggestion for improvement"} + ] +} + +Include 2-3 friction categories. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} + +FRICTION DETAILS: +{friction_details} diff --git a/src/crates/core/src/agentic/insights/prompts/fun_ending.md b/src/crates/core/src/agentic/insights/prompts/fun_ending.md new file mode 100644 index 00000000..93dab567 --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/fun_ending.md @@ -0,0 +1,15 @@ +Analyze this BitFun usage data and find a memorable moment. + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "headline": "A memorable QUALITATIVE moment from the transcripts - not a statistic. Something human, funny, or surprising.", + "detail": "Brief context about when/where this happened" +} + +Find something genuinely interesting or amusing from the session summaries. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} diff --git a/src/crates/core/src/agentic/insights/prompts/horizon.md b/src/crates/core/src/agentic/insights/prompts/horizon.md new file mode 100644 index 00000000..aa0e7f92 --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/horizon.md @@ -0,0 +1,20 @@ +Analyze this BitFun usage data and identify future opportunities. + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "intro": "1 sentence about evolving AI-assisted development", + "opportunities": [ + {"title": "Short title (4-8 words)", "whats_possible": "2-3 ambitious sentences about autonomous workflows", "how_to_try": "1-2 sentences mentioning relevant tooling", "copyable_prompt": "Detailed prompt to try"} + ] +} + +Include 3 opportunities. Think BIG - autonomous workflows, parallel agents, iterating against tests. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} + +FRICTION DETAILS: +{friction_details} diff --git a/src/crates/core/src/agentic/insights/prompts/interaction_style.md b/src/crates/core/src/agentic/insights/prompts/interaction_style.md new file mode 100644 index 00000000..40f54e7a --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/interaction_style.md @@ -0,0 +1,24 @@ +Analyze this BitFun usage data and describe the user's interaction style. Use second person ("you"). + +Write a **narrative** (2-3 paragraphs) about how this user interacts with the AI: +- What kind of tasks do they delegate vs. do themselves? +- How do they give instructions — detailed upfront or iterative? +- How do they react to mistakes — patient, corrective, frustrated? +- What's their typical session flow — short bursts or long deep dives? +- Do they use the AI more for exploration, implementation, debugging, or review? + +Then identify 2-4 **key_patterns** — short, insightful observations about their usage style. Each pattern should be a single sentence that captures a recurring behavior. + +Don't mention specific numerical stats. Use a coaching tone. Be honest but constructive. + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "narrative": "2-3 paragraphs about how this user works with AI. Use markdown for emphasis.", + "key_patterns": ["pattern1", "pattern2", "pattern3"] +} + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} diff --git a/src/crates/core/src/agentic/insights/prompts/suggestions.md b/src/crates/core/src/agentic/insights/prompts/suggestions.md new file mode 100644 index 00000000..0ac26ef4 --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/suggestions.md @@ -0,0 +1,104 @@ +Analyze this BitFun usage data and suggest improvements. + +## BITFUN FEATURES REFERENCE (pick from these for features_to_try): + +1. **Skills**: Create reusable prompt templates as markdown files that run with a single button or command. + - How to use: Create `.bitfun/skills/commit/SKILL.md` with instructions. Then trigger it from the Skills panel. + - Good for: repetitive workflows - commit messages, code reviews, testing, deployment, or complex multi-step workflows + - Example SKILL.md content: + ```markdown + # Commit Skill + Review all staged changes with `git diff --cached`. + Write a conventional commit message following the project's style. + Run `git commit -m ""` and report the result. + ``` + - Advanced: Skills can reference other files, include conditional logic, and chain multiple steps. + +2. **SubAgents (Task Agents)**: Custom agents you define for specific domains or tasks. SubAgents run in parallel and return results to the parent agent. + - How to use: Create agents in `.bitfun/agents/` with custom prompts and tool configurations. + - Good for: domain-specific tasks, parallel exploration, focused code review + - Example agent config (`.bitfun/agents/security-reviewer/agent.json`): + ```json + { + "name": "Security Reviewer", + "description": "Reviews code for security vulnerabilities", + "prompt_file": "prompt.md", + "tools": ["Read", "Grep", "Glob"] + } + ``` + - Parallel exploration: Launch multiple SubAgents to investigate different parts of the codebase simultaneously, then synthesize their findings. + +3. **MCP Servers**: Connect BitFun to external tools, databases, and APIs via Model Context Protocol. + - How to use: Configure MCP servers in settings to connect to external services. + - Good for: database queries, API integration, connecting to internal tools + - Common integrations: + - **Database**: Query PostgreSQL/MySQL directly from chat — `SELECT * FROM users WHERE ...` + - **GitHub**: Create issues, review PRs, manage releases without leaving BitFun + - **Slack/Discord**: Post messages, read channels, manage notifications + - **Notion/Linear**: Create and update project management items + - Example config: + ```json + { + "mcpServers": { + "postgres": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-postgres", "postgresql://localhost/mydb"] + } + } + } + ``` + +4. **Multiple Modes**: Switch between Agentic, Cowork, Plan, and Debug modes for different tasks. + - How to use: Select the appropriate mode from the mode switcher based on your task. + - Mode comparison: + | Mode | Best for | AI behavior | + |------|----------|-------------| + | **Agentic** | Autonomous implementation | AI plans and executes independently | + | **Cowork** | Collaborative editing | AI suggests, you approve each change | + | **Plan** | Architecture & design | AI creates detailed plans before coding | + | **Debug** | Troubleshooting | AI systematically investigates issues | + - Tip: Start with Plan mode for complex tasks, then switch to Agentic for implementation. + +5. **CLI Exec (Headless)**: Run BitFun non-interactively from scripts and CI/CD pipelines. + - How to use: `bitfun exec "fix lint errors" --tools "Edit,Read,Bash"` + - Good for: CI/CD integration, batch code fixes, automated reviews + - CI/CD examples: + ```bash + # Pre-commit hook: auto-fix lint errors + bitfun exec "fix all lint errors in staged files" --tools "Edit,Read,Bash" + + # PR review bot + bitfun exec "review changes in this PR for security issues" --tools "Read,Grep,Glob" + + # Automated documentation + bitfun exec "update API docs for all changed endpoints" --tools "Read,Edit,Glob" + ``` + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "bitfun_md_additions": [ + {"section": "Section name in BITFUN.md", "content": "A specific line or block to add based on workflow patterns", "rationale": "1 sentence explaining why this would help based on actual sessions"} + ], + "features_to_try": [ + {"feature": "Feature name from BITFUN FEATURES REFERENCE above", "description": "What it does", "example_usage": "Actual command or config to copy", "benefit": "Why this would help YOU based on your sessions"} + ], + "usage_patterns": [ + {"pattern": "Short title", "description": "1-2 sentence summary of the pattern", "detail": "3-4 sentences explaining how this applies to YOUR work", "suggested_prompt": "A specific prompt to copy and try"} + ] +} + +IMPORTANT for bitfun_md_additions: PRIORITIZE instructions that appear MULTIPLE TIMES in the user data. If user told AI the same thing in 2+ sessions (e.g., 'always run tests', 'use TypeScript'), that's a PRIME candidate - they shouldn't have to repeat themselves. + +IMPORTANT for features_to_try: Pick 2-3 from the BITFUN FEATURES REFERENCE above. Include concrete, copy-pasteable example_usage for each. Tailor the benefit to the user's actual workflow patterns. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} + +FRICTION DETAILS: +{friction_details} + +USER INSTRUCTIONS TO AI: +{user_instructions} diff --git a/src/crates/core/src/agentic/insights/prompts/wins.md b/src/crates/core/src/agentic/insights/prompts/wins.md new file mode 100644 index 00000000..f5d0b79b --- /dev/null +++ b/src/crates/core/src/agentic/insights/prompts/wins.md @@ -0,0 +1,25 @@ +Analyze this BitFun usage data and identify what's working well for this user. Use second person ("you"). + +Write a brief **intro** (1 sentence of context about the user's overall usage). + +Then identify 2-3 **impressive_workflows** — impressive accomplishments or effective workflows the user demonstrated. Focus on: +- Unique or effective ways they used AI tools +- Successful outcomes from their sessions +- Smart workflow patterns they've developed +Don't be fluffy or overly complimentary. Be specific about what made these impressive. + +RESPOND WITH ONLY A VALID JSON OBJECT: +{ + "intro": "1 sentence of context", + "impressive_workflows": [ + {"title": "Short title (3-6 words)", "description": "2-3 sentences describing the impressive workflow or approach. Use 'you' not 'the user'.", "impact": "One sentence about the concrete impact."} + ] +} + +Include 2-3 impressive workflows. + +DATA: +{aggregate_json} + +SESSION SUMMARIES: +{summaries} diff --git a/src/crates/core/src/agentic/insights/service.rs b/src/crates/core/src/agentic/insights/service.rs new file mode 100644 index 00000000..ace6c46a --- /dev/null +++ b/src/crates/core/src/agentic/insights/service.rs @@ -0,0 +1,1591 @@ +use crate::agentic::insights::cancellation; +use crate::agentic::insights::collector::InsightsCollector; +use crate::agentic::insights::html::generate_html; +use crate::agentic::insights::types::*; +use crate::infrastructure::ai::get_global_ai_client_factory; +use crate::infrastructure::ai::AIClient; +use crate::infrastructure::events::{emit_global_event, BackendEvent}; +use crate::infrastructure::get_path_manager_arc; +use crate::service::config::get_global_config_service; +use crate::service::config::AppConfig; +use crate::service::workspace::get_global_workspace_service; +use crate::util::errors::{BitFunError, BitFunResult}; +use crate::util::types::Message; +use log::{debug, info, warn}; +use serde_json::Value; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::sync::Semaphore; +use tokio_util::sync::CancellationToken; + +const FACET_PROMPT_TEMPLATE: &str = include_str!("prompts/facet_extraction.md"); +const SUGGESTIONS_PROMPT_TEMPLATE: &str = include_str!("prompts/suggestions.md"); +const AREAS_PROMPT_TEMPLATE: &str = include_str!("prompts/areas.md"); +const WINS_PROMPT_TEMPLATE: &str = include_str!("prompts/wins.md"); +const FRICTION_PROMPT_TEMPLATE: &str = include_str!("prompts/friction.md"); +const INTERACTION_STYLE_PROMPT_TEMPLATE: &str = include_str!("prompts/interaction_style.md"); +const AT_A_GLANCE_PROMPT_TEMPLATE: &str = include_str!("prompts/at_a_glance.md"); +const HORIZON_PROMPT_TEMPLATE: &str = include_str!("prompts/horizon.md"); +const FUN_ENDING_PROMPT_TEMPLATE: &str = include_str!("prompts/fun_ending.md"); + +const MAX_CONCURRENT_FACET_EXTRACTIONS: usize = 5; + +pub struct InsightsService; + +impl InsightsService { + async fn get_user_language() -> String { + match get_global_config_service().await { + Ok(config_service) => { + match config_service.get_config::(Some("app")).await { + Ok(app_config) => app_config.language, + Err(_) => "en-US".to_string(), + } + } + Err(_) => "en-US".to_string(), + } + } + + fn build_language_instruction(lang: &str) -> String { + let json_rule = concat!( + "\n\nCRITICAL JSON RULE: Inside JSON string values you MUST escape every literal double-quote as \\\".", + " Do NOT place unescaped \" characters inside string values.", + " For example, write \"he said \\\"hello\\\"\" instead of \"he said \"hello\"\".", + ); + + if lang.starts_with("en") { + json_rule.to_string() + } else { + let lang_name = match lang { + "zh-CN" => "Simplified Chinese (简体中文)", + "zh-TW" => "Traditional Chinese (繁體中文)", + "ja" | "ja-JP" => "Japanese (日本語)", + "ko" | "ko-KR" => "Korean (한국어)", + "fr" | "fr-FR" => "French (Français)", + "de" | "de-DE" => "German (Deutsch)", + "es" | "es-ES" => "Spanish (Español)", + "pt" | "pt-BR" => "Portuguese (Português)", + "ru" | "ru-RU" => "Russian (Русский)", + _ => lang, + }; + format!( + "\n\nIMPORTANT: All descriptive text, summaries, suggestions, and narrative content in your response MUST be written in {}. Keep JSON keys and enum values in English.{}", + lang_name, json_rule + ) + } + } + + /// Main entry: run the full insights pipeline + pub async fn generate(days: u32) -> BitFunResult { + let token = cancellation::register().await; + let result = Self::generate_inner(days, &token).await; + cancellation::unregister().await; + result + } + + /// Cancel the current insights generation. + pub async fn cancel() -> Result<(), String> { + cancellation::cancel().await + } + + async fn generate_inner( + days: u32, + token: &CancellationToken, + ) -> BitFunResult { + let user_lang = Self::get_user_language().await; + let lang_instruction = Self::build_language_instruction(&user_lang); + debug!("Insights generation using language: {}", user_lang); + + // Stage 1: Data Collection + Self::emit_progress("Collecting session data...", "data_collection", 0, 0).await; + let (base_stats, transcripts) = InsightsCollector::collect(days).await?; + + if transcripts.is_empty() { + return Err(BitFunError::service( + "No sessions found in the specified time range", + )); + } + + info!( + "Collected {} sessions, {} messages", + transcripts.len(), + base_stats.total_messages + ); + + Self::check_cancelled(token)?; + + // Stage 2: Parallel Facet Extraction (fast model) + let ai_factory = get_global_ai_client_factory() + .await + .map_err(|e| BitFunError::service(format!("Failed to get AI client factory: {}", e)))?; + let ai_client_fast = ai_factory + .get_client_resolved("fast") + .await + .map_err(|e| BitFunError::service(format!("Failed to resolve fast model: {}", e)))?; + + // Primary model for analysis stages — falls back to fast if not configured + let ai_client_primary = match ai_factory.get_client_resolved("primary").await { + Ok(client) => client, + Err(_) => { + warn!("Primary model not configured, falling back to fast model for analysis"); + ai_client_fast.clone() + } + }; + + let facets = + Self::extract_facets_adaptive(&ai_client_fast, &transcripts, &lang_instruction, token) + .await?; + + info!("Extracted facets for {} sessions", facets.len()); + + Self::check_cancelled(token)?; + + // Stage 3: Aggregation (Rust-side, no AI) + Self::emit_progress("Aggregating analysis...", "aggregation", 0, 0).await; + let aggregate = InsightsCollector::aggregate(&base_stats, &facets); + + Self::check_cancelled(token)?; + + // Stage 4a: Parallel analysis (primary model) — 7 independent tasks + Self::emit_progress("Analyzing patterns...", "analysis", 0, 0).await; + + let (suggestions, areas, wins_friction, interaction, horizon, fun_ending) = + Self::generate_analysis_parallel( + &ai_client_primary, + &aggregate, + &lang_instruction, + ) + .await; + + Self::check_cancelled(token)?; + + // Stage 4b: Synthesis (primary model) — at_a_glance depends on 4a results + Self::emit_progress("Writing summary...", "synthesis", 0, 0).await; + + let at_a_glance = Self::generate_synthesis( + &ai_client_primary, + &aggregate, + &suggestions, + &areas, + &wins_friction, + &interaction, + &lang_instruction, + ) + .await; + + Self::check_cancelled(token)?; + + // Stage 5: Assembly + Self::emit_progress("Assembling report...", "assembly", 0, 0).await; + let report = Self::assemble_report( + base_stats, + aggregate, + suggestions, + areas, + wins_friction, + interaction, + at_a_glance, + horizon, + fun_ending, + ); + + let report = Self::save_report(report, &user_lang).await?; + + Self::emit_progress("Complete!", "complete", 0, 0).await; + info!("Insights report generated successfully"); + + Ok(report) + } + + fn check_cancelled(token: &CancellationToken) -> BitFunResult<()> { + if token.is_cancelled() { + Err(BitFunError::service("Insights generation cancelled")) + } else { + Ok(()) + } + } + + // ============ Stage 2: Facet Extraction ============ + + async fn extract_facets_adaptive( + ai_client: &Arc, + transcripts: &[SessionTranscript], + lang_instruction: &str, + token: &CancellationToken, + ) -> BitFunResult> { + let total = transcripts.len(); + let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_FACET_EXTRACTIONS)); + let counter = Arc::new(AtomicUsize::new(0)); + let rate_limited = Arc::new(AtomicBool::new(false)); + let cancelled = Arc::new(AtomicBool::new(false)); + + let handles: Vec<_> = transcripts + .iter() + .enumerate() + .map(|(idx, t)| { + let client = ai_client.clone(); + let sem = semaphore.clone(); + let transcript = t.clone(); + let cnt = counter.clone(); + let rl = rate_limited.clone(); + let cl = cancelled.clone(); + let lang = lang_instruction.to_string(); + let child_token = token.clone(); + + tokio::spawn(async move { + let _permit = sem + .acquire() + .await + .map_err(|e| BitFunError::service(format!("Semaphore error: {}", e)))?; + + if cl.load(Ordering::Relaxed) || child_token.is_cancelled() { + return Err(BitFunError::service("Insights generation cancelled")); + } + + if rl.load(Ordering::Relaxed) { + return Err(BitFunError::service("skipped_rate_limited")); + } + + let n = cnt.fetch_add(1, Ordering::Relaxed) + 1; + Self::emit_progress( + &format!("Analyzing session {}/{}...", n, total), + "facet_extraction", + n, + total, + ) + .await; + + let result = + Self::extract_single_facet(&client, &transcript, &lang).await; + + if let Err(ref e) = result { + if is_rate_limit_error(e) { + rl.store(true, Ordering::Relaxed); + } + } + + result.map(|facet| (idx, facet)) + }) + }) + .collect(); + + let mut facets = Vec::new(); + let mut failed_indices: Vec = Vec::new(); + let mut hit_rate_limit = false; + + for (idx, handle) in handles.into_iter().enumerate() { + if token.is_cancelled() { + return Err(BitFunError::service("Insights generation cancelled")); + } + match handle.await { + Ok(Ok((_orig_idx, facet))) => facets.push(facet), + Ok(Err(e)) => { + let err_str = e.to_string(); + if err_str.contains("cancelled") { + return Err(e); + } + if err_str.contains("skipped_rate_limited") || is_rate_limit_error(&e) { + hit_rate_limit = true; + failed_indices.push(idx); + } else { + warn!("Facet extraction failed for session {}: {}", idx, e); + } + } + Err(e) => warn!("Facet task panicked: {}", e), + } + } + + if hit_rate_limit && !failed_indices.is_empty() { + let retry_count = failed_indices.len(); + warn!( + "Rate limit detected, retrying {} sessions sequentially", + retry_count + ); + Self::emit_progress( + &format!( + "Rate limited. Retrying {} sessions sequentially...", + retry_count + ), + "facet_retry", + 0, + retry_count, + ) + .await; + + tokio::time::sleep(Duration::from_secs(3)).await; + + for (i, idx) in failed_indices.iter().enumerate() { + Self::check_cancelled(token)?; + + Self::emit_progress( + &format!("Retrying session {}/{}...", i + 1, retry_count), + "facet_retry", + i + 1, + retry_count, + ) + .await; + + match Self::extract_single_facet( + ai_client, + &transcripts[*idx], + lang_instruction, + ) + .await + { + Ok(facet) => facets.push(facet), + Err(e) => warn!( + "Sequential retry also failed for session {}: {}", + idx, e + ), + } + + if i + 1 < retry_count { + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + } + + Ok(facets) + } + + async fn extract_single_facet( + ai_client: &Arc, + transcript: &SessionTranscript, + lang_instruction: &str, + ) -> BitFunResult { + let session_info = format!( + "Session: {}\nAgent: {}\nName: {}\nDate: {}\nDuration: {} min\n\n{}", + transcript.session_id, + transcript.agent_type, + transcript.session_name, + transcript.created_at, + transcript.duration_minutes, + transcript.transcript + ); + + let prompt = format!( + "{}{}", + FACET_PROMPT_TEMPLATE.replace("{session_transcript}", &session_info), + lang_instruction + ); + let messages = vec![Message::user(prompt)]; + + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("AI call failed: {}", e)))?; + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse facet JSON: {}", e)) + })?; + + Ok(SessionFacet { + session_id: transcript.session_id.clone(), + underlying_goal: value["underlying_goal"] + .as_str() + .unwrap_or("") + .to_string(), + goal_categories: parse_string_u32_map(&value["goal_categories"]), + outcome: value["outcome"].as_str().unwrap_or("unclear_from_transcript").to_string(), + user_satisfaction_counts: parse_string_u32_map(&value["user_satisfaction_counts"]), + claude_helpfulness: value["claude_helpfulness"] + .as_str() + .unwrap_or("moderately_helpful") + .to_string(), + session_type: value["session_type"] + .as_str() + .unwrap_or("single_task") + .to_string(), + friction_counts: parse_string_u32_map(&value["friction_counts"]), + friction_detail: value["friction_detail"] + .as_str() + .unwrap_or("") + .to_string(), + primary_success: value["primary_success"] + .as_str() + .unwrap_or("") + .to_string(), + brief_summary: value["brief_summary"] + .as_str() + .unwrap_or("") + .to_string(), + languages_used: value["languages_used"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(), + user_instructions: value["user_instructions"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(), + }) + } + + // ============ Stage 4a: Parallel Analysis ============ + + async fn generate_analysis_parallel( + ai_client: &Arc, + aggregate: &InsightsAggregate, + lang_instruction: &str, + ) -> (InsightsSuggestions, Vec, WinsFrictionResult, InteractionStyleResult, HorizonResult, Option) { + let aggregate_json = + serde_json::to_string_pretty(aggregate).unwrap_or_else(|_| "{}".to_string()); + let summaries_text = format!("- {}", aggregate.session_summaries.join("\n- ")); + let friction_text = format!("- {}", aggregate.friction_details.join("\n- ")); + + let semaphore = Arc::new(Semaphore::new(3)); + + // Task 1: Suggestions + let client_1 = ai_client.clone(); + let agg_1 = aggregate.clone(); + let lang_1 = lang_instruction.to_string(); + let sem_1 = semaphore.clone(); + let suggestions_handle = tokio::spawn(async move { + let _permit = sem_1.acquire().await.unwrap(); + Self::generate_suggestions(&client_1, &agg_1, &lang_1).await + }); + + // Task 2: Areas + let client_2 = ai_client.clone(); + let agg_2 = aggregate.clone(); + let lang_2 = lang_instruction.to_string(); + let sem_2 = semaphore.clone(); + let areas_handle = tokio::spawn(async move { + let _permit = sem_2.acquire().await.unwrap(); + Self::identify_areas(&client_2, &agg_2, &lang_2).await + }); + + // Task 3a: Wins + let client_3a = ai_client.clone(); + let agg_json_3a = aggregate_json.clone(); + let summaries_3a = summaries_text.clone(); + let lang_3a = lang_instruction.to_string(); + let sem_3a = semaphore.clone(); + let wins_handle = tokio::spawn(async move { + let _permit = sem_3a.acquire().await.unwrap(); + Self::analyze_wins(&client_3a, &agg_json_3a, &summaries_3a, &lang_3a).await + }); + + // Task 3b: Friction + let client_3b = ai_client.clone(); + let agg_json_3b = aggregate_json.clone(); + let summaries_3b = summaries_text.clone(); + let friction_3b = friction_text.clone(); + let lang_3b = lang_instruction.to_string(); + let sem_3b = semaphore.clone(); + let friction_handle = tokio::spawn(async move { + let _permit = sem_3b.acquire().await.unwrap(); + Self::analyze_friction(&client_3b, &agg_json_3b, &summaries_3b, &friction_3b, &lang_3b).await + }); + + // Task 4: Interaction Style + let client_4 = ai_client.clone(); + let agg_json_4 = aggregate_json.clone(); + let summaries_4 = summaries_text.clone(); + let lang_4 = lang_instruction.to_string(); + let sem_4 = semaphore.clone(); + let interaction_handle = tokio::spawn(async move { + let _permit = sem_4.acquire().await.unwrap(); + Self::analyze_interaction_style(&client_4, &agg_json_4, &summaries_4, &lang_4).await + }); + + // Task 5: Horizon + let client_5 = ai_client.clone(); + let agg_json_5 = aggregate_json.clone(); + let summaries_5 = summaries_text.clone(); + let friction_5 = friction_text.clone(); + let lang_5 = lang_instruction.to_string(); + let sem_5 = semaphore.clone(); + let horizon_handle = tokio::spawn(async move { + let _permit = sem_5.acquire().await.unwrap(); + Self::generate_horizon(&client_5, &agg_json_5, &summaries_5, &friction_5, &lang_5).await + }); + + // Task 6: Fun Ending + let client_6 = ai_client.clone(); + let agg_json_6 = aggregate_json.clone(); + let summaries_6 = summaries_text.clone(); + let lang_6 = lang_instruction.to_string(); + let sem_6 = semaphore.clone(); + let fun_ending_handle = tokio::spawn(async move { + let _permit = sem_6.acquire().await.unwrap(); + Self::generate_fun_ending(&client_6, &agg_json_6, &summaries_6, &lang_6).await + }); + + // Collect results with retry on transient failures + let suggestions = Self::resolve_with_retry( + suggestions_handle, + "Suggestions", + || async { Self::generate_suggestions(ai_client, aggregate, lang_instruction).await }, + || default_suggestions(), + ).await; + + let areas = Self::resolve_with_retry( + areas_handle, + "Areas", + || async { Self::identify_areas(ai_client, aggregate, lang_instruction).await }, + Vec::new, + ).await; + + let wins_result = Self::resolve_with_retry( + wins_handle, + "Wins", + || async { + Self::analyze_wins( + ai_client, &aggregate_json, &summaries_text, lang_instruction, + ).await + }, + WinsResult::default, + ).await; + + let friction_result = Self::resolve_with_retry( + friction_handle, + "Friction", + || async { + Self::analyze_friction( + ai_client, &aggregate_json, &summaries_text, &friction_text, lang_instruction, + ).await + }, + FrictionResult::default, + ).await; + + let wins_friction = WinsFrictionResult { + wins_intro: wins_result.intro, + big_wins: wins_result.big_wins, + friction_intro: friction_result.intro, + friction_categories: friction_result.friction_categories, + }; + + let interaction = Self::resolve_with_retry( + interaction_handle, + "Interaction Style", + || async { + Self::analyze_interaction_style( + ai_client, &aggregate_json, &summaries_text, lang_instruction, + ).await + }, + InteractionStyleResult::default, + ).await; + + let horizon = Self::resolve_with_retry( + horizon_handle, + "Horizon", + || async { + Self::generate_horizon( + ai_client, &aggregate_json, &summaries_text, &friction_text, lang_instruction, + ).await + }, + HorizonResult::default, + ).await; + + let fun_ending = Self::resolve_with_retry( + fun_ending_handle, + "Fun Ending", + || async { + Self::generate_fun_ending( + ai_client, &aggregate_json, &summaries_text, lang_instruction, + ).await + }, + || None, + ).await; + + (suggestions, areas, wins_friction, interaction, horizon, fun_ending) + } + + /// Generic helper to resolve a spawned task with retry on transient failures. + /// + /// Retries on rate-limit errors, empty AI responses, and JSON extraction failures. + async fn resolve_with_retry( + handle: tokio::task::JoinHandle>, + label: &str, + retry_fn: RetryFn, + default_fn: DefaultFn, + ) -> T + where + RetryFut: std::future::Future>, + RetryFn: FnOnce() -> RetryFut, + DefaultFn: FnOnce() -> T, + { + let result = handle + .await + .map_err(|e| BitFunError::service(format!("{} task panicked: {}", label, e))); + + match result { + Ok(Ok(val)) => val, + Ok(Err(e)) if is_retryable_error(&e) => { + warn!("{} failed (retryable): {}, retrying after delay", label, e); + Self::emit_progress( + &format!("Retrying {}...", label.to_lowercase()), + "analysis_retry", + 0, + 0, + ) + .await; + tokio::time::sleep(Duration::from_secs(3)).await; + retry_fn().await.unwrap_or_else(|e| { + warn!("{} retry failed: {}, using defaults", label, e); + default_fn() + }) + } + Ok(Err(e)) => { + warn!("{} failed: {}, using defaults", label, e); + default_fn() + } + Err(e) => { + warn!("{} task error: {}, using defaults", label, e); + default_fn() + } + } + } + + // ============ Stage 4b: Synthesis ============ + + async fn generate_synthesis( + ai_client: &Arc, + aggregate: &InsightsAggregate, + suggestions: &InsightsSuggestions, + areas: &[ProjectArea], + wins_friction: &WinsFrictionResult, + interaction: &InteractionStyleResult, + lang_instruction: &str, + ) -> AtAGlance { + let aggregate_json = + serde_json::to_string_pretty(aggregate).unwrap_or_else(|_| "{}".to_string()); + + let areas_text = areas + .iter() + .map(|a| format!("- {}: {}", a.name, a.description)) + .collect::>() + .join("\n"); + let suggestions_text = + serde_json::to_string_pretty(suggestions).unwrap_or_else(|_| "{}".to_string()); + let wins_friction_text = + serde_json::to_string_pretty(wins_friction).unwrap_or_else(|_| "{}".to_string()); + let interaction_text = + serde_json::to_string_pretty(interaction).unwrap_or_else(|_| "{}".to_string()); + + match Self::generate_at_a_glance( + ai_client, + &aggregate_json, + &areas_text, + &suggestions_text, + &wins_friction_text, + &interaction_text, + lang_instruction, + ) + .await + { + Ok(val) => val, + Err(e) => { + warn!("At a Glance generation failed: {}, using defaults", e); + AtAGlance::default() + } + } + } + + // ============ Individual Analysis Methods ============ + + async fn generate_suggestions( + ai_client: &Arc, + aggregate: &InsightsAggregate, + lang_instruction: &str, + ) -> BitFunResult { + let aggregate_json = + serde_json::to_string_pretty(aggregate).unwrap_or_else(|_| "{}".to_string()); + let summaries = aggregate.session_summaries.join("\n- "); + let friction_details = aggregate.friction_details.join("\n- "); + let user_instructions = if aggregate.user_instructions.is_empty() { + "None captured".to_string() + } else { + aggregate.user_instructions.join("\n- ") + }; + + let prompt = format!( + "{}{}", + SUGGESTIONS_PROMPT_TEMPLATE + .replace("{aggregate_json}", &aggregate_json) + .replace("{summaries}", &format!("- {}", summaries)) + .replace("{friction_details}", &format!("- {}", friction_details)) + .replace("{user_instructions}", &user_instructions), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Suggestions AI call failed: {}", e)))?; + + info!("Suggestions response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Suggestions text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!( + "Failed to parse suggestions JSON: {}. Raw: {}", + e, + safe_truncate(&json_str, 500) + )) + })?; + + debug!( + "Suggestions parsed: md_additions={}, features={}, patterns={}", + value["bitfun_md_additions"].as_array().map(|a| a.len()).unwrap_or(0), + value["features_to_try"].as_array().map(|a| a.len()).unwrap_or(0), + value["usage_patterns"].as_array().map(|a| a.len()).unwrap_or(0), + ); + + Ok(InsightsSuggestions { + bitfun_md_additions: value["bitfun_md_additions"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(MdAddition { + section: v["section"].as_str()?.to_string(), + content: v["content"].as_str()?.to_string(), + rationale: v["rationale"] + .as_str() + .or(v["why"].as_str()) + .unwrap_or("") + .to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + features_to_try: value["features_to_try"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(FeatureRecommendation { + feature: v["feature"].as_str()?.to_string(), + description: v["description"] + .as_str() + .or(v["one_liner"].as_str()) + .unwrap_or("") + .to_string(), + example_usage: v["example_usage"] + .as_str() + .or(v["example_code"].as_str()) + .unwrap_or("") + .to_string(), + benefit: v["benefit"] + .as_str() + .or(v["why_for_you"].as_str()) + .unwrap_or("") + .to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + usage_patterns: value["usage_patterns"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(UsagePattern { + pattern: v["pattern"] + .as_str() + .or(v["title"].as_str()) + .unwrap_or("") + .to_string(), + description: v["description"] + .as_str() + .or(v["suggestion"].as_str()) + .unwrap_or("") + .to_string(), + detail: v["detail"] + .as_str() + .unwrap_or("") + .to_string(), + suggested_prompt: v["suggested_prompt"] + .as_str() + .or(v["copyable_prompt"].as_str()) + .unwrap_or("") + .to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + }) + } + + async fn identify_areas( + ai_client: &Arc, + aggregate: &InsightsAggregate, + lang_instruction: &str, + ) -> BitFunResult> { + let aggregate_json = + serde_json::to_string_pretty(aggregate).unwrap_or_else(|_| "{}".to_string()); + let summaries = aggregate.session_summaries.join("\n- "); + + let prompt = format!( + "{}{}", + AREAS_PROMPT_TEMPLATE + .replace("{aggregate_json}", &aggregate_json) + .replace("{summaries}", &format!("- {}", summaries)), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Areas AI call failed: {}", e)))?; + + info!("Areas response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Areas text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse areas JSON: {}", e)) + })?; + + Ok(value["areas"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(ProjectArea { + name: v["name"].as_str()?.to_string(), + session_count: v["session_count"].as_u64().unwrap_or(0) as u32, + description: v["description"].as_str()?.to_string(), + }) + }) + .collect() + }) + .unwrap_or_default()) + } + + async fn analyze_wins( + ai_client: &Arc, + aggregate_json: &str, + summaries: &str, + lang_instruction: &str, + ) -> BitFunResult { + let prompt = format!( + "{}{}", + WINS_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{summaries}", summaries), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Wins AI call failed: {}", e)))?; + + info!("Wins response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Wins text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse wins JSON: {}", e)) + })?; + + Ok(WinsResult { + intro: value["intro"].as_str().unwrap_or("").to_string(), + big_wins: value["impressive_workflows"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(BigWin { + title: v["title"].as_str()?.to_string(), + description: v["description"].as_str()?.to_string(), + impact: v["impact"].as_str().unwrap_or("").to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + }) + } + + async fn analyze_friction( + ai_client: &Arc, + aggregate_json: &str, + summaries: &str, + friction_details: &str, + lang_instruction: &str, + ) -> BitFunResult { + let prompt = format!( + "{}{}", + FRICTION_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{summaries}", summaries) + .replace("{friction_details}", friction_details), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Friction AI call failed: {}", e)))?; + + info!("Friction response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Friction text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse friction JSON: {}", e)) + })?; + + Ok(FrictionResult { + intro: value["intro"].as_str().unwrap_or("").to_string(), + friction_categories: value["friction_categories"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(FrictionCategory { + category: v["category"].as_str()?.to_string(), + count: v["count"].as_u64().unwrap_or(0) as u32, + description: v["description"].as_str()?.to_string(), + examples: v["examples"] + .as_array() + .map(|a| { + a.iter() + .filter_map(|e| e.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(), + suggestion: v["suggestion"].as_str().unwrap_or("").to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + }) + } + + async fn analyze_interaction_style( + ai_client: &Arc, + aggregate_json: &str, + summaries: &str, + lang_instruction: &str, + ) -> BitFunResult { + let prompt = format!( + "{}{}", + INTERACTION_STYLE_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{summaries}", summaries), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Interaction Style AI call failed: {}", e)))?; + + info!("Interaction Style response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Interaction Style text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse interaction style JSON: {}", e)) + })?; + + Ok(InteractionStyleResult { + narrative: value["narrative"].as_str().unwrap_or("").to_string(), + key_patterns: value["key_patterns"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(), + }) + } + + async fn generate_at_a_glance( + ai_client: &Arc, + aggregate_json: &str, + areas_text: &str, + suggestions_text: &str, + wins_friction_text: &str, + interaction_text: &str, + lang_instruction: &str, + ) -> BitFunResult { + let prompt = format!( + "{}{}", + AT_A_GLANCE_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{areas}", areas_text) + .replace("{suggestions}", suggestions_text) + .replace("{wins_and_friction}", wins_friction_text) + .replace("{interaction_style}", interaction_text), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("At a Glance AI call failed: {}", e)))?; + + info!("At a Glance response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("At a Glance text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse at-a-glance JSON: {}", e)) + })?; + + let looking_ahead = { + let v = json_value_to_string(&value["looking_ahead"]); + if v.is_empty() { + json_value_to_string(&value["ambitious_workflows"]) + } else { + v + } + }; + + Ok(AtAGlance { + whats_working: json_value_to_string(&value["whats_working"]), + whats_hindering: json_value_to_string(&value["whats_hindering"]), + quick_wins: json_value_to_string(&value["quick_wins"]), + looking_ahead, + }) + } + + async fn generate_horizon( + ai_client: &Arc, + aggregate_json: &str, + summaries: &str, + friction_details: &str, + lang_instruction: &str, + ) -> BitFunResult { + let prompt = format!( + "{}{}", + HORIZON_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{summaries}", summaries) + .replace("{friction_details}", friction_details), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Horizon AI call failed: {}", e)))?; + + info!("Horizon response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Horizon text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse horizon JSON: {}", e)) + })?; + + Ok(HorizonResult { + intro: value["intro"].as_str().unwrap_or("").to_string(), + opportunities: value["opportunities"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| { + Some(HorizonWorkflow { + title: v["title"].as_str()?.to_string(), + whats_possible: v["whats_possible"].as_str()?.to_string(), + how_to_try: v["how_to_try"].as_str().unwrap_or("").to_string(), + copyable_prompt: v["copyable_prompt"].as_str().unwrap_or("").to_string(), + }) + }) + .collect() + }) + .unwrap_or_default(), + }) + } + + async fn generate_fun_ending( + ai_client: &Arc, + aggregate_json: &str, + summaries: &str, + lang_instruction: &str, + ) -> BitFunResult> { + let prompt = format!( + "{}{}", + FUN_ENDING_PROMPT_TEMPLATE + .replace("{aggregate_json}", aggregate_json) + .replace("{summaries}", summaries), + lang_instruction + ); + + let messages = vec![Message::user(prompt)]; + let response = ai_client + .send_message(messages, None) + .await + .map_err(|e| BitFunError::service(format!("Fun Ending AI call failed: {}", e)))?; + + info!("Fun Ending response: len={}, finish={:?}", response.text.len(), response.finish_reason); + debug!("Fun Ending text: {}", safe_truncate(&response.text, 300)); + + let json_str = extract_json_from_response(&response.text)?; + let value: Value = serde_json::from_str(&json_str).map_err(|e| { + BitFunError::Deserialization(format!("Failed to parse fun ending JSON: {}", e)) + })?; + + Ok(Some(FunEnding { + headline: value["headline"] + .as_str() + .or(value["title"].as_str()) + .unwrap_or("") + .to_string(), + detail: value["detail"] + .as_str() + .or(value["message"].as_str()) + .unwrap_or("") + .to_string(), + })) + } + + // ============ Stage 5: Assembly ============ + + fn assemble_report( + _base_stats: BaseStats, + aggregate: InsightsAggregate, + suggestions: InsightsSuggestions, + areas: Vec, + wins_friction: WinsFrictionResult, + interaction: InteractionStyleResult, + at_a_glance: AtAGlance, + horizon: HorizonResult, + fun_ending: Option, + ) -> InsightsReport { + let days_covered = if !aggregate.date_range.start.is_empty() + && !aggregate.date_range.end.is_empty() + { + let parse = |s: &str| -> Option> { + chrono::DateTime::parse_from_rfc3339(s) + .ok() + .map(|d| d.with_timezone(&chrono::Utc)) + }; + match ( + parse(&aggregate.date_range.start), + parse(&aggregate.date_range.end), + ) { + (Some(start), Some(end)) => { + end.signed_duration_since(start).num_days().unsigned_abs() as u32 + } + _ => 1, + } + .max(1) + } else { + 1 + }; + + InsightsReport { + generated_at: SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + date_range: aggregate.date_range.clone(), + total_sessions: aggregate.sessions, + analyzed_sessions: aggregate.analyzed, + total_messages: aggregate.messages, + days_covered, + stats: InsightsStats { + total_hours: aggregate.hours, + msgs_per_day: aggregate.msgs_per_day, + top_tools: aggregate.top_tools.clone(), + top_goals: aggregate.top_goals.clone(), + outcomes: aggregate.outcomes.clone(), + satisfaction: aggregate.satisfaction.clone(), + session_types: aggregate.session_types.clone(), + languages: aggregate.languages.clone(), + hour_counts: aggregate.hour_counts.clone(), + agent_types: aggregate.agent_types.clone(), + response_time_buckets: aggregate.response_time_buckets.clone(), + median_response_time_secs: aggregate.median_response_time_secs, + avg_response_time_secs: aggregate.avg_response_time_secs, + friction: aggregate.friction.clone(), + success: aggregate.success.clone(), + tool_errors: aggregate.tool_errors.clone(), + total_lines_added: aggregate.total_lines_added, + total_lines_removed: aggregate.total_lines_removed, + total_files_modified: aggregate.total_files_modified, + }, + at_a_glance, + interaction_style: InteractionStyle { + narrative: interaction.narrative, + key_patterns: interaction.key_patterns, + }, + project_areas: areas, + wins_intro: wins_friction.wins_intro, + big_wins: wins_friction.big_wins, + friction_intro: wins_friction.friction_intro, + friction_categories: wins_friction.friction_categories, + suggestions, + horizon_intro: horizon.intro, + on_the_horizon: horizon.opportunities, + fun_ending, + html_report_path: None, + } + } + + // ============ Save / Load / Utility ============ + + async fn save_report(mut report: InsightsReport, locale: &str) -> BitFunResult { + let path_manager = get_path_manager_arc(); + let usage_dir = path_manager.user_data_dir().join("usage-data"); + tokio::fs::create_dir_all(&usage_dir) + .await + .map_err(|e| BitFunError::io(format!("Failed to create usage-data dir: {}", e)))?; + + let timestamp = report.generated_at; + + let html_content = generate_html(&report, locale); + let html_path = usage_dir.join(format!("insights-{}.html", timestamp)); + tokio::fs::write(&html_path, &html_content) + .await + .map_err(|e| BitFunError::io(format!("Failed to write HTML report: {}", e)))?; + + report.html_report_path = Some(html_path.to_string_lossy().to_string()); + + let json_path = usage_dir.join(format!("insights-{}.json", timestamp)); + let json_str = serde_json::to_string_pretty(&report) + .map_err(|e| BitFunError::serialization(format!("Failed to serialize report: {}", e)))?; + tokio::fs::write(&json_path, &json_str) + .await + .map_err(|e| BitFunError::io(format!("Failed to write report JSON: {}", e)))?; + + info!( + "Report saved: json={}, html={}", + json_path.display(), + html_path.display() + ); + + Self::cleanup_old_reports(&usage_dir, 5).await; + + Ok(report) + } + + async fn cleanup_old_reports(usage_dir: &std::path::Path, keep: usize) { + let mut entries = match tokio::fs::read_dir(usage_dir).await { + Ok(dir) => dir, + Err(_) => return, + }; + + let mut json_files: Vec = Vec::new(); + while let Ok(Some(entry)) = entries.next_entry().await { + let name = entry.file_name().to_string_lossy().to_string(); + if name.starts_with("insights-") && name.ends_with(".json") { + json_files.push(entry.path()); + } + } + + json_files.sort(); + json_files.reverse(); + + for old in json_files.into_iter().skip(keep) { + let _ = tokio::fs::remove_file(&old).await; + let html = old.with_extension("html"); + let _ = tokio::fs::remove_file(&html).await; + } + } + + pub async fn has_data(days: u32) -> BitFunResult { + let path_manager = get_path_manager_arc(); + let pm = PersistenceManager::new(path_manager)?; + let cutoff = SystemTime::now() - std::time::Duration::from_secs(days as u64 * 86400); + + if let Some(ws_service) = get_global_workspace_service() { + let workspaces = ws_service.list_workspaces().await; + for ws in workspaces { + if !ws.root_path.join(".bitfun").join("sessions").exists() { + continue; + } + if let Ok(sessions) = pm.list_sessions(&ws.root_path).await { + if sessions.iter().any(|s| s.last_activity_at >= cutoff) { + return Ok(true); + } + } + } + } + + Ok(false) + } + + pub async fn load_report(path: &str) -> BitFunResult { + let json_str = tokio::fs::read_to_string(path) + .await + .map_err(|e| BitFunError::io(format!("Failed to read report file: {}", e)))?; + let report: InsightsReport = serde_json::from_str(&json_str) + .map_err(|e| BitFunError::Deserialization(format!("Failed to parse report: {}", e)))?; + Ok(report) + } + + pub async fn load_latest_reports() -> BitFunResult> { + let path_manager = get_path_manager_arc(); + let usage_dir = path_manager.user_data_dir().join("usage-data"); + + if !usage_dir.exists() { + return Ok(vec![]); + } + + let mut entries = tokio::fs::read_dir(&usage_dir) + .await + .map_err(|e| BitFunError::io(format!("Failed to read usage-data dir: {}", e)))?; + + let mut json_files: Vec = Vec::new(); + while let Ok(Some(entry)) = entries.next_entry().await { + let name = entry.file_name().to_string_lossy().to_string(); + if name.starts_with("insights-") && name.ends_with(".json") { + json_files.push(entry.path()); + } + } + + json_files.sort(); + json_files.reverse(); + + let mut reports = Vec::new(); + for json_path in json_files.iter().take(10) { + match tokio::fs::read_to_string(json_path).await { + Ok(json_str) => match serde_json::from_str::(&json_str) { + Ok(report) => { + let top_goals: Vec = report + .stats + .top_goals + .iter() + .take(3) + .map(|(name, _)| name.clone()) + .collect(); + let mut lang_entries: Vec<_> = + report.stats.languages.iter().collect(); + lang_entries + .sort_by(|(_, a), (_, b)| b.cmp(a)); + let languages: Vec = lang_entries + .iter() + .take(3) + .map(|(name, _)| name.to_string()) + .collect(); + + reports.push(InsightsReportMeta { + generated_at: report.generated_at, + total_sessions: report.total_sessions, + analyzed_sessions: report.analyzed_sessions, + date_range: report.date_range, + path: json_path.to_string_lossy().to_string(), + total_messages: report.total_messages, + days_covered: report.days_covered, + total_hours: report.stats.total_hours, + top_goals, + languages, + }); + } + Err(e) => { + warn!("Failed to parse report {}: {}", json_path.display(), e); + } + }, + Err(e) => { + warn!("Failed to read report {}: {}", json_path.display(), e); + } + } + } + + Ok(reports) + } + + async fn emit_progress(message: &str, stage: &str, current: usize, total: usize) { + let payload = serde_json::json!({ + "message": message, + "stage": stage, + "current": current, + "total": total, + }); + if let Err(e) = emit_global_event(BackendEvent::Custom { + event_name: "insights-progress".to_string(), + payload, + }) + .await + { + debug!("Failed to emit progress event: {}", e); + } + } +} + +use crate::agentic::persistence::PersistenceManager; + +// ============ Intermediate result types (internal to service) ============ + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +struct WinsFrictionResult { + #[serde(default)] + wins_intro: String, + big_wins: Vec, + #[serde(default)] + friction_intro: String, + friction_categories: Vec, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +struct WinsResult { + intro: String, + big_wins: Vec, +} + +impl WinsResult { + fn default() -> Self { + Self { + intro: String::new(), + big_wins: Vec::new(), + } + } +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +struct FrictionResult { + intro: String, + friction_categories: Vec, +} + +impl FrictionResult { + fn default() -> Self { + Self { + intro: String::new(), + friction_categories: Vec::new(), + } + } +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +struct InteractionStyleResult { + narrative: String, + key_patterns: Vec, +} + +impl InteractionStyleResult { + fn default() -> Self { + Self { + narrative: String::new(), + key_patterns: Vec::new(), + } + } +} + +#[derive(Debug, Clone)] +struct HorizonResult { + intro: String, + opportunities: Vec, +} + +impl HorizonResult { + fn default() -> Self { + Self { + intro: String::new(), + opportunities: Vec::new(), + } + } +} + +impl AtAGlance { + fn default() -> Self { + Self { + whats_working: "Analysis in progress...".to_string(), + whats_hindering: String::new(), + quick_wins: String::new(), + looking_ahead: String::new(), + } + } +} + +// ============ Helper functions ============ + +fn is_rate_limit_error(e: &BitFunError) -> bool { + let msg = e.to_string().to_lowercase(); + msg.contains("429") + || msg.contains("rate limit") + || msg.contains("too many requests") + || msg.contains("rate_limit") +} + +fn is_retryable_error(e: &BitFunError) -> bool { + if is_rate_limit_error(e) { + return true; + } + let msg = e.to_string().to_lowercase(); + msg.contains("cannot extract json") + || msg.contains("sse stream closed") + || msg.contains("stream closed before") + || msg.contains("connection reset") +} + +fn default_suggestions() -> InsightsSuggestions { + InsightsSuggestions { + bitfun_md_additions: Vec::new(), + features_to_try: Vec::new(), + usage_patterns: Vec::new(), + } +} + +fn parse_string_u32_map(value: &Value) -> std::collections::HashMap { + let mut map = std::collections::HashMap::new(); + if let Some(obj) = value.as_object() { + for (k, v) in obj { + if let Some(n) = v.as_u64() { + map.insert(k.clone(), n as u32); + } else if let Some(n) = v.as_f64() { + map.insert(k.clone(), n as u32); + } + } + } + map +} + +fn safe_truncate(s: &str, max_bytes: usize) -> &str { + if s.len() <= max_bytes { + return s; + } + let mut end = max_bytes; + while end > 0 && !s.is_char_boundary(end) { + end -= 1; + } + &s[..end] +} + +fn extract_json_from_response(response: &str) -> BitFunResult { + crate::util::extract_json_from_ai_response(response).ok_or_else(|| { + BitFunError::service("Cannot extract JSON from AI response") + }) +} + +/// Extract a string from a JSON value that may be a plain string or a nested object. +/// When the value is an object, concatenate all string values with spaces. +fn json_value_to_string(value: &Value) -> String { + match value { + Value::String(s) => s.clone(), + Value::Object(map) => map + .values() + .filter_map(|v| match v { + Value::String(s) => Some(s.as_str()), + _ => None, + }) + .collect::>() + .join(" "), + Value::Array(arr) => arr + .iter() + .filter_map(|v| v.as_str()) + .collect::>() + .join(" "), + _ => String::new(), + } +} diff --git a/src/crates/core/src/agentic/insights/types.rs b/src/crates/core/src/agentic/insights/types.rs new file mode 100644 index 00000000..66168158 --- /dev/null +++ b/src/crates/core/src/agentic/insights/types.rs @@ -0,0 +1,296 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +// ============ Stage 1: Data Collection ============ + +/// Compact session transcript built from PersistenceManager data +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SessionTranscript { + pub session_id: String, + pub agent_type: String, + pub session_name: String, + pub workspace_path: Option, + pub duration_minutes: u64, + pub message_count: u32, + pub turn_count: u32, + pub created_at: String, + /// Compact text transcript ([User]: ... [Tool: xxx] [Assistant]: ...) + pub transcript: String, + pub tool_names: Vec, + pub has_errors: bool, +} + +/// Basic statistics accumulated during data collection (pre-AI) +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct BaseStats { + pub total_sessions: u32, + pub total_messages: u32, + pub total_turns: u32, + pub total_duration_minutes: u64, + pub first_session_at: Option, + pub last_session_at: Option, + pub tool_usage: HashMap, + pub tool_errors: HashMap, + pub hour_counts: HashMap, + pub agent_types: HashMap, + /// Raw response time intervals in seconds (intermediate, not serialized to report) + #[serde(skip)] + pub response_times_raw: Vec, + #[serde(default)] + pub response_time_buckets: HashMap, + #[serde(default)] + pub median_response_time_secs: Option, + #[serde(default)] + pub avg_response_time_secs: Option, + #[serde(default)] + pub total_lines_added: usize, + #[serde(default)] + pub total_lines_removed: usize, + #[serde(default)] + pub total_files_modified: usize, +} + +// ============ Stage 2: Facet Extraction (AI) ============ + +/// AI-extracted facets per session (aligned with Claude Code) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SessionFacet { + pub session_id: String, + pub underlying_goal: String, + pub goal_categories: HashMap, + /// fully_achieved | partially_achieved | abandoned | unknown + pub outcome: String, + pub user_satisfaction_counts: HashMap, + pub claude_helpfulness: String, + pub session_type: String, + pub friction_counts: HashMap, + pub friction_detail: String, + pub primary_success: String, + pub brief_summary: String, + #[serde(default)] + pub languages_used: Vec, + #[serde(default)] + pub user_instructions: Vec, +} + +// ============ Stage 3: Aggregation ============ + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DateRange { + pub start: String, + pub end: String, +} + +/// Aggregated data from all sessions (Rust-side computation) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InsightsAggregate { + pub sessions: u32, + pub analyzed: u32, + pub date_range: DateRange, + pub messages: u32, + pub hours: f32, + pub top_tools: Vec<(String, u32)>, + pub top_goals: Vec<(String, u32)>, + pub outcomes: HashMap, + pub satisfaction: HashMap, + pub friction: HashMap, + pub success: HashMap, + pub languages: HashMap, + pub session_summaries: Vec, + pub friction_details: Vec, + pub user_instructions: Vec, + pub session_types: HashMap, + pub tool_errors: HashMap, + pub hour_counts: HashMap, + pub agent_types: HashMap, + pub msgs_per_day: f32, + #[serde(default)] + pub response_time_buckets: HashMap, + #[serde(default)] + pub median_response_time_secs: Option, + #[serde(default)] + pub avg_response_time_secs: Option, + #[serde(default)] + pub total_lines_added: usize, + #[serde(default)] + pub total_lines_removed: usize, + #[serde(default)] + pub total_files_modified: usize, +} + +// ============ Stage 4: AI Analysis Results ============ + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AtAGlance { + pub whats_working: String, + pub whats_hindering: String, + pub quick_wins: String, + pub looking_ahead: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InteractionStyle { + pub narrative: String, + pub key_patterns: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProjectArea { + pub name: String, + pub session_count: u32, + pub description: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BigWin { + pub title: String, + pub description: String, + pub impact: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FrictionCategory { + pub category: String, + pub count: u32, + pub description: String, + pub examples: Vec, + pub suggestion: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MdAddition { + pub section: String, + pub content: String, + pub rationale: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FeatureRecommendation { + pub feature: String, + pub description: String, + pub example_usage: String, + pub benefit: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UsagePattern { + pub pattern: String, + pub description: String, + #[serde(default)] + pub detail: String, + pub suggested_prompt: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InsightsSuggestions { + pub bitfun_md_additions: Vec, + pub features_to_try: Vec, + pub usage_patterns: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct HorizonWorkflow { + pub title: String, + pub whats_possible: String, + pub how_to_try: String, + #[serde(default)] + pub copyable_prompt: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FunEnding { + pub headline: String, + pub detail: String, +} + +// ============ Stage 5: Final Report ============ + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InsightsStats { + pub total_hours: f32, + pub msgs_per_day: f32, + pub top_tools: Vec<(String, u32)>, + pub top_goals: Vec<(String, u32)>, + pub outcomes: HashMap, + pub satisfaction: HashMap, + pub session_types: HashMap, + pub languages: HashMap, + pub hour_counts: HashMap, + pub agent_types: HashMap, + #[serde(default)] + pub response_time_buckets: HashMap, + #[serde(default)] + pub median_response_time_secs: Option, + #[serde(default)] + pub avg_response_time_secs: Option, + #[serde(default)] + pub friction: HashMap, + #[serde(default)] + pub success: HashMap, + #[serde(default)] + pub tool_errors: HashMap, + #[serde(default)] + pub total_lines_added: usize, + #[serde(default)] + pub total_lines_removed: usize, + #[serde(default)] + pub total_files_modified: usize, +} + +/// The final insights report (shared between backend and frontend) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InsightsReport { + pub generated_at: u64, + pub date_range: DateRange, + pub total_sessions: u32, + pub analyzed_sessions: u32, + pub total_messages: u32, + pub days_covered: u32, + + pub stats: InsightsStats, + + pub at_a_glance: AtAGlance, + pub interaction_style: InteractionStyle, + pub project_areas: Vec, + #[serde(default)] + pub wins_intro: String, + pub big_wins: Vec, + #[serde(default)] + pub friction_intro: String, + pub friction_categories: Vec, + pub suggestions: InsightsSuggestions, + #[serde(default)] + pub horizon_intro: String, + pub on_the_horizon: Vec, + pub fun_ending: Option, + + /// Path to the generated HTML file + pub html_report_path: Option, +} + +/// Metadata for listing saved reports +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InsightsReportMeta { + pub generated_at: u64, + pub total_sessions: u32, + pub analyzed_sessions: u32, + pub date_range: DateRange, + pub path: String, + #[serde(default)] + pub total_messages: u32, + #[serde(default)] + pub days_covered: u32, + #[serde(default)] + pub total_hours: f32, + #[serde(default)] + pub top_goals: Vec, + #[serde(default)] + pub languages: Vec, +} + +// ============ API Request/Response ============ + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GenerateInsightsRequest { + pub days: Option, +} diff --git a/src/crates/core/src/agentic/mod.rs b/src/crates/core/src/agentic/mod.rs index e9513a9f..b44a4a69 100644 --- a/src/crates/core/src/agentic/mod.rs +++ b/src/crates/core/src/agentic/mod.rs @@ -31,6 +31,9 @@ pub mod workspace; mod util; +// Insights module +pub mod insights; + pub use agents::*; pub use coordination::*; pub use core::*; diff --git a/src/crates/core/src/function_agents/git-func-agent/ai_service.rs b/src/crates/core/src/function_agents/git-func-agent/ai_service.rs index f2d18259..a9c86acd 100644 --- a/src/crates/core/src/function_agents/git-func-agent/ai_service.rs +++ b/src/crates/core/src/function_agents/git-func-agent/ai_service.rs @@ -114,11 +114,12 @@ impl AIAnalysisService { } fn parse_commit_response(&self, response: &str) -> AgentResult { - let json_str = self.extract_json_from_response(response)?; + let json_str = crate::util::extract_json_from_ai_response(response) + .ok_or_else(|| AgentError::analysis_error("Cannot extract JSON from response"))?; + + let value: Value = serde_json::from_str(&json_str) + .map_err(|e| AgentError::analysis_error(format!("Failed to parse AI response: {}", e)))?; - let value: Value = serde_json::from_str(&json_str).map_err(|e| { - AgentError::analysis_error(format!("Failed to parse AI response: {}", e)) - })?; Ok(AICommitAnalysis { commit_type: self.parse_commit_type(value["type"].as_str().unwrap_or("chore"))?, @@ -137,33 +138,6 @@ impl AIAnalysisService { }) } - fn extract_json_from_response(&self, response: &str) -> AgentResult { - let trimmed = response.trim(); - - if trimmed.starts_with('{') { - return Ok(trimmed.to_string()); - } - - if let Some(start) = trimmed.find("```json") { - let json_start = start + 7; - if let Some(end_offset) = trimmed[json_start..].find("```") { - let json_end = json_start + end_offset; - let json_str = trimmed[json_start..json_end].trim(); - return Ok(json_str.to_string()); - } - } - - if let Some(start) = trimmed.find('{') { - if let Some(end) = trimmed.rfind('}') { - let json_str = &trimmed[start..=end]; - return Ok(json_str.to_string()); - } - } - - Err(AgentError::analysis_error( - "Cannot extract JSON from response", - )) - } fn truncate_diff_if_needed(&self, diff: &str, max_chars: usize) -> String { if diff.len() <= max_chars { diff --git a/src/crates/core/src/function_agents/startchat-func-agent/ai_service.rs b/src/crates/core/src/function_agents/startchat-func-agent/ai_service.rs index 57fca80a..9f0447f4 100644 --- a/src/crates/core/src/function_agents/startchat-func-agent/ai_service.rs +++ b/src/crates/core/src/function_agents/startchat-func-agent/ai_service.rs @@ -139,25 +139,20 @@ impl AIWorkStateService { } fn parse_complete_analysis(&self, response: &str) -> AgentResult { - let json_str = if let Some(start) = response.find('{') { - if let Some(end) = response.rfind('}') { - &response[start..=end] - } else { - response - } - } else { - response - }; + let json_str = crate::util::extract_json_from_ai_response(response) + .ok_or_else(|| { + error!("Failed to extract JSON from analysis response: {}", response); + AgentError::internal_error("Failed to extract JSON from analysis response") + })?; debug!("Parsing JSON response: length={}", json_str.len()); - let parsed: serde_json::Value = serde_json::from_str(json_str).map_err(|e| { - error!( - "Failed to parse complete analysis response: {}, response: {}", - e, response - ); - AgentError::internal_error(format!("Failed to parse complete analysis response: {}", e)) - })?; + let parsed: serde_json::Value = serde_json::from_str(&json_str) + .map_err(|e| { + error!("Failed to parse complete analysis response: {}, response: {}", e, response); + AgentError::internal_error(format!("Failed to parse complete analysis response: {}", e)) + })?; + let summary = parsed["summary"] .as_str() diff --git a/src/crates/core/src/util/json_extract.rs b/src/crates/core/src/util/json_extract.rs new file mode 100644 index 00000000..7c9b2f17 --- /dev/null +++ b/src/crates/core/src/util/json_extract.rs @@ -0,0 +1,416 @@ +/// Robust JSON extraction from AI model responses. +/// +/// AI models often wrap JSON in markdown code blocks (`` ```json ... ``` ``), +/// or include leading/trailing prose. This module provides a single public +/// helper that handles all common formats and falls back gracefully. +/// +/// When the extracted text is not valid JSON (e.g. the model emitted unescaped +/// quotes inside string values), a best-effort repair pass is attempted before +/// giving up. + +use log::{debug, warn}; + +/// Extract a JSON object string from an AI response. +/// +/// Tries the following strategies in order: +/// 1. Raw JSON — response starts with `{` after trimming. +/// 2. Markdown code block — `` ```json\n...\n``` `` or `` ```\n...\n``` ``. +/// 3. Zhipu AI box format — `<|begin_of_box|>...<|end_of_box|>`. +/// 4. Greedy brace match — first `{` to last `}`. +/// +/// Each candidate is validated with `serde_json::from_str`. If validation +/// fails, a repair pass ([`try_repair_json`]) is attempted before moving on. +pub fn extract_json_from_ai_response(response: &str) -> Option { + let trimmed = response.trim(); + + if trimmed.is_empty() { + return None; + } + + // Collect candidates from the various extraction strategies. + let mut candidates: Vec = Vec::new(); + + // Strategy 1: raw JSON object + if trimmed.starts_with('{') { + candidates.push(trimmed.to_string()); + } + + // Strategy 2: markdown code blocks (```json ... ``` or ``` ... ```) + if let Some(extracted) = extract_from_code_block(trimmed) { + candidates.push(extracted); + } + + // Strategy 3: Zhipu AI box format + if let Some(extracted) = extract_from_zhipu_box(trimmed) { + candidates.push(extracted); + } + + // Strategy 4: greedy first-`{` to last-`}` + if let Some(extracted) = extract_greedy_braces(trimmed) { + candidates.push(extracted); + } + + // First pass: try each candidate as-is. + for candidate in &candidates { + if serde_json::from_str::(candidate).is_ok() { + return Some(candidate.clone()); + } + } + + // Second pass: attempt repair on each candidate. + for candidate in &candidates { + if let Some(repaired) = try_repair_json(candidate) { + debug!("JSON repair succeeded (original length={}, repaired length={})", candidate.len(), repaired.len()); + return Some(repaired); + } + } + + warn!( + "Cannot extract valid JSON from AI response (length={}). Preview: {}", + response.len(), + safe_preview(trimmed, 300), + ); + None +} + +fn extract_from_code_block(text: &str) -> Option { + let start_markers = ["```json\n", "```json\r\n", "```\n", "```\r\n"]; + + for marker in &start_markers { + if let Some(start_idx) = text.find(marker) { + let content_start = start_idx + marker.len(); + if let Some(end_offset) = text[content_start..].find("```") { + let json_str = text[content_start..content_start + end_offset].trim(); + if !json_str.is_empty() { + return Some(json_str.to_string()); + } + } + } + } + None +} + +fn extract_from_zhipu_box(text: &str) -> Option { + let begin_tag = "<|begin_of_box|>"; + let end_tag = "<|end_of_box|>"; + if let Some(start_idx) = text.find(begin_tag) { + let content_start = start_idx + begin_tag.len(); + if let Some(end_offset) = text[content_start..].find(end_tag) { + let json_str = text[content_start..content_start + end_offset].trim(); + if !json_str.is_empty() { + return Some(json_str.to_string()); + } + } + } + None +} + +fn extract_greedy_braces(text: &str) -> Option { + let start = text.find('{')?; + let end = text.rfind('}')?; + if end > start { + Some(text[start..=end].to_string()) + } else { + None + } +} + +/// Best-effort repair of malformed JSON produced by AI models. +/// +/// Common breakage: the model writes unescaped `"` inside string values +/// (e.g. Chinese text like `"你到底是什么模型"` where the inner quotes are +/// plain ASCII U+0022). This function walks the JSON character-by-character, +/// tracking brace/bracket depth and string state, and escapes interior quotes +/// that would otherwise break the parse. +fn try_repair_json(input: &str) -> Option { + let trimmed = input.trim(); + if !trimmed.starts_with('{') || !trimmed.ends_with('}') { + return None; + } + + let mut out = String::with_capacity(trimmed.len() + 64); + let chars: Vec = trimmed.chars().collect(); + let len = chars.len(); + let mut i = 0; + + // We track whether we are inside a JSON string that was opened by a + // *structural* quote (i.e. one that is part of JSON syntax). + let mut in_string = false; + + while i < len { + let ch = chars[i]; + + if !in_string { + out.push(ch); + if ch == '"' { + in_string = true; + } + i += 1; + continue; + } + + // We are inside a string. + if ch == '\\' { + // Escape sequence — copy verbatim. + out.push(ch); + if i + 1 < len { + out.push(chars[i + 1]); + i += 2; + } else { + i += 1; + } + continue; + } + + if ch == '"' { + // Is this the *closing* structural quote, or a rogue interior quote? + // Heuristic: look at what follows the quote (skipping whitespace). + // A structural close-quote is followed by `,` `}` `]` `:` or EOF. + let next_significant = next_non_whitespace(&chars, i + 1); + if is_structural_follower(next_significant) { + // Structural close. + out.push('"'); + in_string = false; + } else { + // Rogue interior quote — escape it. + out.push('\\'); + out.push('"'); + } + i += 1; + continue; + } + + out.push(ch); + i += 1; + } + + if serde_json::from_str::(&out).is_ok() { + Some(out) + } else { + None + } +} + +fn next_non_whitespace(chars: &[char], start: usize) -> Option { + chars[start..].iter().find(|c| !c.is_ascii_whitespace()).copied() +} + +/// Characters that legitimately follow a closing `"` in JSON. +fn is_structural_follower(ch: Option) -> bool { + match ch { + None => true, // EOF + Some(',' | '}' | ']' | ':') => true, + _ => false, + } +} + +fn safe_preview(s: &str, max_bytes: usize) -> &str { + if s.len() <= max_bytes { + return s; + } + let mut end = max_bytes; + while end > 0 && !s.is_char_boundary(end) { + end -= 1; + } + &s[..end] +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn raw_json_object() { + let input = r#"{"key": "value"}"#; + assert_eq!( + extract_json_from_ai_response(input), + Some(input.to_string()) + ); + } + + #[test] + fn json_in_code_block() { + let input = "```json\n{\"key\": \"value\"}\n```"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + #[test] + fn json_in_plain_code_block() { + let input = "```\n{\"key\": \"value\"}\n```"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + #[test] + fn json_with_leading_prose() { + let input = "Here is the result:\n{\"key\": \"value\"}"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + #[test] + fn json_with_trailing_prose() { + let input = "{\"key\": \"value\"}\nHope this helps!"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + #[test] + fn zhipu_box_format() { + let input = "<|begin_of_box|>{\"key\": \"value\"}<|end_of_box|>"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + #[test] + fn nested_json_with_arrays() { + let input = "```json\n{\"items\": [{\"name\": \"a\"}, {\"name\": \"b\"}]}\n```"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"items": [{"name": "a"}, {"name": "b"}]}"#.to_string()) + ); + } + + #[test] + fn multiline_json_in_code_block() { + let input = r#"```json +{ + "narrative": "Hello **world**.\n\nSecond paragraph.", + "key_patterns": ["pattern1", "pattern2"] +} +```"#; + let result = extract_json_from_ai_response(input); + assert!(result.is_some()); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert_eq!(parsed["key_patterns"].as_array().unwrap().len(), 2); + } + + #[test] + fn json_with_chinese_quotes_in_values() { + let input = "```json\n{\"text\": \"用户询问\\\"模型是什么\\\"的问题\"}\n```"; + let result = extract_json_from_ai_response(input); + assert!(result.is_some()); + } + + #[test] + fn empty_input_returns_none() { + assert_eq!(extract_json_from_ai_response(""), None); + assert_eq!(extract_json_from_ai_response(" "), None); + } + + #[test] + fn no_json_returns_none() { + assert_eq!( + extract_json_from_ai_response("This is just plain text."), + None + ); + } + + #[test] + fn invalid_json_in_code_block_falls_through() { + let input = "```json\n{\"key\": \"value\"\n```"; + // Missing closing brace — code block extraction finds it but validation fails. + // Greedy brace match also finds `{...}` but it's still invalid. + assert_eq!(extract_json_from_ai_response(input), None); + } + + #[test] + fn greedy_brace_fallback() { + let input = "Some text before {\"ok\": true} and after"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"ok": true}"#.to_string()) + ); + } + + #[test] + fn code_block_with_crlf() { + let input = "```json\r\n{\"key\": \"value\"}\r\n```"; + assert_eq!( + extract_json_from_ai_response(input), + Some(r#"{"key": "value"}"#.to_string()) + ); + } + + // ── Repair: unescaped interior quotes ── + + #[test] + fn repair_unescaped_chinese_style_quotes() { + // AI writes: "headline": "用户问AI"你是什么模型"" — inner quotes are ASCII U+0022 + let input = "```json\n{\"headline\": \"用户问AI\"你是什么模型\"\", \"detail\": \"ok\"}\n```"; + let result = extract_json_from_ai_response(input); + assert!(result.is_some(), "repair should succeed"); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert!(parsed["headline"].as_str().unwrap().contains("你是什么模型")); + assert_eq!(parsed["detail"].as_str().unwrap(), "ok"); + } + + #[test] + fn repair_multiple_rogue_quotes_in_one_value() { + // "text": "他说"你好"然后又说"再见"" + let input = r#"{"text": "他说"你好"然后又说"再见"", "other": "fine"}"#; + let result = extract_json_from_ai_response(input); + assert!(result.is_some(), "repair should handle multiple rogue quotes"); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert!(parsed["text"].as_str().unwrap().contains("你好")); + assert!(parsed["text"].as_str().unwrap().contains("再见")); + } + + #[test] + fn repair_rogue_quotes_in_array_values() { + let input = r#"{"items": ["他说"你好"", "正常值"]}"#; + let result = extract_json_from_ai_response(input); + assert!(result.is_some()); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert_eq!(parsed["items"].as_array().unwrap().len(), 2); + } + + #[test] + fn repair_preserves_already_escaped_quotes() { + let input = r#"{"text": "properly \"escaped\" quotes"}"#; + let result = extract_json_from_ai_response(input); + assert!(result.is_some()); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert!(parsed["text"].as_str().unwrap().contains("escaped")); + } + + #[test] + fn repair_real_world_fun_ending() { + // Reproduces the exact pattern from the failing log + let input = "```json\n{\n \"headline\": \"用户直接问AI\"你到底是什么模型\",AI巧妙地回避了问题\",\n \"detail\": \"AI像个守口如瓶的特工\"\n}\n```"; + let result = extract_json_from_ai_response(input); + assert!(result.is_some(), "should repair the fun ending JSON"); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert!(parsed["headline"].as_str().unwrap().contains("你到底是什么模型")); + } + + #[test] + fn repair_real_world_interaction_style() { + // Reproduces: "narrative": "...围绕着"这个项目是什么?""现在改了什么?"..." + let input = "```json\n{\n \"narrative\": \"会话围绕着\"这个项目是什么?\"和\"现在改了什么?\"展开\",\n \"key_patterns\": [\"pattern1\"]\n}\n```"; + let result = extract_json_from_ai_response(input); + assert!(result.is_some(), "should repair interaction style JSON"); + let parsed: serde_json::Value = serde_json::from_str(&result.unwrap()).unwrap(); + assert!(parsed["narrative"].as_str().unwrap().contains("这个项目是什么")); + } + + #[test] + fn repair_does_not_break_valid_json() { + let input = r#"{"key": "value", "num": 42, "arr": [1, 2]}"#; + assert_eq!( + extract_json_from_ai_response(input), + Some(input.to_string()) + ); + } +} diff --git a/src/crates/core/src/util/mod.rs b/src/crates/core/src/util/mod.rs index 6e0da5c7..9422a8c7 100644 --- a/src/crates/core/src/util/mod.rs +++ b/src/crates/core/src/util/mod.rs @@ -3,6 +3,7 @@ pub mod errors; pub mod front_matter_markdown; pub mod json_checker; +pub mod json_extract; pub mod process_manager; pub mod token_counter; pub mod types; @@ -10,6 +11,7 @@ pub mod types; pub use errors::*; pub use front_matter_markdown::FrontMatterMarkdown; pub use json_checker::JsonChecker; +pub use json_extract::extract_json_from_ai_response; pub use process_manager::*; pub use token_counter::*; pub use types::*; diff --git a/src/web-ui/src/app/components/NavPanel/components/PersistentFooterActions.tsx b/src/web-ui/src/app/components/NavPanel/components/PersistentFooterActions.tsx index 03ff0365..9ec14984 100644 --- a/src/web-ui/src/app/components/NavPanel/components/PersistentFooterActions.tsx +++ b/src/web-ui/src/app/components/NavPanel/components/PersistentFooterActions.tsx @@ -10,6 +10,7 @@ import { useToolbarModeContext } from '@/flow_chat/components/toolbar-mode/Toolb import { useCurrentWorkspace } from '@/infrastructure/contexts/WorkspaceContext'; import { useNotification } from '@/shared/notification-system'; import NotificationButton from '../../TitleBar/NotificationButton'; +import InsightsButton from '../../TitleBar/InsightsButton'; import { AboutDialog } from '../../AboutDialog'; import { RemoteConnectDialog } from '../../RemoteConnectDialog'; import { @@ -222,6 +223,10 @@ const PersistentFooterActions: React.FC = () => { + setShowAbout(false)} /> setShowRemoteConnect(false)} /> @@ -239,6 +244,7 @@ const PersistentFooterActions: React.FC = () => { onAgree={handleAgreeDisclaimer} /> + ); }; diff --git a/src/web-ui/src/app/components/TitleBar/InsightsButton.scss b/src/web-ui/src/app/components/TitleBar/InsightsButton.scss new file mode 100644 index 00000000..8a29b422 --- /dev/null +++ b/src/web-ui/src/app/components/TitleBar/InsightsButton.scss @@ -0,0 +1,65 @@ +@use '../../../component-library/styles/tokens.scss' as *; + +.insights-btn { + display: flex; + align-items: center; + justify-content: center; + min-width: 28px; + height: 28px; + border: none; + border-radius: $size-radius-sm; + background: transparent; + color: var(--color-text-muted); + cursor: pointer; + padding: 0 $size-gap-1; + transition: color $motion-fast $easing-standard, + background $motion-fast $easing-standard; + + &:hover { + background: var(--element-bg-soft); + color: var(--color-text-primary); + } + + &:active { + transform: scale(0.95); + } + + svg { + display: block; + } + + // ── Generating state ────────────────────────────── + &--generating { + color: var(--color-accent-400); + } + + &__progress { + display: flex; + align-items: center; + gap: 4px; + } + + &__progress-text { + font-size: $font-size-xs; + color: var(--color-text-secondary); + white-space: nowrap; + } + + &__spinner { + animation: insights-btn-spin 1s linear infinite; + } +} + +@keyframes insights-btn-spin { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +@media (prefers-reduced-motion: reduce) { + .insights-btn__spinner { + animation: none; + } + .insights-btn { + transition: none; + } +} diff --git a/src/web-ui/src/app/components/TitleBar/InsightsButton.tsx b/src/web-ui/src/app/components/TitleBar/InsightsButton.tsx new file mode 100644 index 00000000..984bf955 --- /dev/null +++ b/src/web-ui/src/app/components/TitleBar/InsightsButton.tsx @@ -0,0 +1,73 @@ +import React, { useCallback } from 'react'; +import { BarChart3 } from 'lucide-react'; +import { Tooltip } from '@/component-library'; +import { useI18n } from '@/infrastructure/i18n/hooks/useI18n'; +import { useSceneStore } from '@/app/stores/sceneStore'; +import { useMyAgentStore } from '@/app/scenes/my-agent/myAgentStore'; +import { useInsightsStore } from '@/app/scenes/my-agent/insightsStore'; +import './InsightsButton.scss'; + +interface InsightsButtonProps { + className?: string; + tooltipPlacement?: 'top' | 'bottom' | 'left' | 'right'; +} + +const InsightsButton: React.FC = ({ className, tooltipPlacement = 'bottom' }) => { + const { t } = useI18n('common'); + const generating = useInsightsStore((s) => s.generating); + const progress = useInsightsStore((s) => s.progress); + + const handleClick = useCallback(() => { + useMyAgentStore.getState().setActiveView('insights'); + useSceneStore.getState().openScene('my-agent'); + }, []); + + const progressText = generating && progress.total > 0 + ? `${progress.current}/${progress.total}` + : undefined; + + const tooltipContent = generating + ? progress.message || t('insights.generating') + : t('insights.buttonTooltip'); + + return ( + + + + ); +}; + +export default InsightsButton; diff --git a/src/web-ui/src/app/components/TitleBar/TitleBar.tsx b/src/web-ui/src/app/components/TitleBar/TitleBar.tsx index 1afccc91..30fab75d 100644 --- a/src/web-ui/src/app/components/TitleBar/TitleBar.tsx +++ b/src/web-ui/src/app/components/TitleBar/TitleBar.tsx @@ -355,6 +355,7 @@ const TitleBar: React.FC = ({ log.debug('Workspace selected', { workspace }); }} /> + ); }; diff --git a/src/web-ui/src/app/scenes/my-agent/InsightsScene.scss b/src/web-ui/src/app/scenes/my-agent/InsightsScene.scss new file mode 100644 index 00000000..f6562fe7 --- /dev/null +++ b/src/web-ui/src/app/scenes/my-agent/InsightsScene.scss @@ -0,0 +1,912 @@ +@use '../../../component-library/styles/tokens.scss' as *; + +.insights-scene { + display: flex; + flex-direction: column; + height: 100%; + overflow: hidden; + + &__header { + display: flex; + align-items: center; + justify-content: space-between; + padding: $size-gap-5 $size-gap-6 0; + flex-shrink: 0; + } + + &__header-left { + display: flex; + align-items: center; + gap: $size-gap-3; + + h2 { + font-size: $font-size-xl; + font-weight: $font-weight-semibold; + color: var(--color-text-primary); + margin: 0; + } + + svg { + color: var(--color-text-muted); + } + } + + &__error { + display: flex; + align-items: center; + gap: $size-gap-2; + margin: $size-gap-3 $size-gap-6 0; + padding: $size-gap-3 $size-gap-4; + background: $color-error-bg; + border: 1px solid $color-error-border; + border-radius: $size-radius-base; + font-size: $font-size-sm; + color: $color-error; + + span { flex: 1; } + + button { + background: none; + border: none; + color: inherit; + cursor: pointer; + font-size: $font-size-xl; + line-height: 1; + padding: 0 $size-gap-1; + opacity: $opacity-disabled; + &:hover { opacity: 1; } + } + } + + &__generate { + padding: $size-gap-5 $size-gap-6 0; + flex-shrink: 0; + } + + &__generate-label, + &__history-label { + font-size: 11px; + font-weight: $font-weight-semibold; + text-transform: uppercase; + color: var(--color-text-muted); + margin-bottom: $size-gap-3; + letter-spacing: 0.5px; + display: flex; + align-items: center; + gap: $size-gap-3; + } + + &__history-hint { + font-weight: $font-weight-normal; + text-transform: none; + font-size: 10px; + color: var(--color-text-muted); + opacity: 0.7; + letter-spacing: 0; + } + + &__generate-row { + display: flex; + align-items: center; + gap: $size-gap-3; + } + + &__day-selector { + display: flex; + gap: 0; + border: 1px solid $border-base; + border-radius: $size-radius-base; + overflow: hidden; + } + + &__day-btn { + padding: $size-gap-2 $size-gap-4; + border: none; + background: transparent; + color: var(--color-text-muted); + font-size: $font-size-xs; + font-weight: $font-weight-medium; + cursor: pointer; + transition: all $motion-fast $easing-standard; + border-right: 1px solid $border-base; + + &:last-child { border-right: none; } + + &:hover { + background: $element-bg-soft; + color: var(--color-text-primary); + } + + &.is-active { + background: $color-accent-200; + color: $color-accent-500; + font-weight: $font-weight-semibold; + } + } + + &__generate-btn { + display: flex; + align-items: center; + gap: $size-gap-2; + padding: $size-gap-2 $size-gap-5; + border: 1px solid $color-accent-600; + border-radius: $size-radius-base; + background: $color-accent-200; + color: $color-accent-500; + font-size: $font-size-sm; + font-weight: $font-weight-semibold; + cursor: pointer; + transition: all $motion-fast $easing-standard; + + &:hover { + background: $color-accent-300; + } + + &:active { + background: $color-accent-400; + } + } + + &__cancel-btn { + display: flex; + align-items: center; + gap: $size-gap-2; + padding: $size-gap-2 $size-gap-5; + border: 1px solid $color-error-border; + border-radius: $size-radius-base; + background: $color-error-bg; + color: $color-error; + font-size: $font-size-sm; + font-weight: $font-weight-medium; + cursor: pointer; + transition: all $motion-fast $easing-standard; + + &:hover { + background: rgba(199, 112, 112, 0.18); + border-color: $color-error; + } + } + + &__spinner { + animation: insights-spin 1s linear infinite; + } + + &__progress-info { + display: flex; + align-items: center; + gap: $size-gap-2; + font-size: $font-size-xs; + color: var(--color-text-muted); + animation: insights-fade-in $motion-base $easing-standard; + min-width: 0; + } + + &__progress-count { + font-weight: $font-weight-semibold; + color: $color-accent-600; + white-space: nowrap; + } + + &__progress-message { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + + &__history { + padding: $size-gap-5 $size-gap-6; + flex: 1; + overflow-y: auto; + min-height: 0; + } + + &__loading { + display: flex; + justify-content: center; + padding: $size-gap-6; + color: var(--color-text-muted); + } + + &__empty { + text-align: center; + padding: $size-gap-8 0; + color: var(--color-text-muted); + font-size: $font-size-sm; + } + + &__report-list { + display: flex; + flex-direction: column; + gap: $size-gap-2; + } +} + +// ============ Report Meta Card ============ + +.insights-meta-card { + @include card-base; + display: flex; + flex-direction: column; + gap: $size-gap-2; + padding: $size-gap-3 $size-gap-4; + cursor: pointer; + text-align: left; + width: 100%; + + &:hover { + @include card-hover; + border-color: $border-accent-soft; + } + + &__top { + display: flex; + align-items: center; + justify-content: space-between; + } + + &__date { + font-size: $font-size-sm; + font-weight: $font-weight-medium; + color: var(--color-text-primary); + } + + &__range { + font-size: 11px; + color: var(--color-text-muted); + white-space: nowrap; + } + + &__stats-row { + display: flex; + flex-wrap: wrap; + gap: $size-gap-2; + } + + &__stat { + display: inline-flex; + align-items: center; + gap: 3px; + font-size: 11px; + color: var(--color-text-muted); + background: $element-bg-subtle; + padding: 1px $size-gap-2; + border-radius: $size-radius-sm; + } + + &__tags { + display: flex; + flex-wrap: wrap; + gap: $size-gap-1; + } + + &__tag { + font-size: 10px; + padding: 1px $size-gap-2; + border-radius: $size-radius-sm; + background: $color-accent-100; + color: $color-accent-600; + border: 1px solid $border-accent-soft; + + &--lang { + background: $color-success-bg; + color: $color-success; + border-color: $color-success-border; + } + } +} + +// ============ Report View ============ + +.insights-report-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: $size-gap-3 $size-gap-6; + border-bottom: 1px solid $border-subtle; + flex-shrink: 0; + + &__back { + display: flex; + align-items: center; + gap: $size-gap-2; + padding: $size-gap-1 $size-gap-3; + border: none; + border-radius: $size-radius-sm; + background: transparent; + color: var(--color-text-secondary); + font-size: $font-size-sm; + cursor: pointer; + transition: all $motion-fast $easing-standard; + + &:hover { + background: $element-bg-soft; + color: var(--color-text-primary); + } + } + + &__actions { + display: flex; + align-items: center; + gap: $size-gap-2; + } + + &__html-btn { + display: flex; + align-items: center; + gap: $size-gap-1; + padding: $size-gap-1 $size-gap-3; + border: 1px solid $border-base; + border-radius: $size-radius-sm; + background: transparent; + color: var(--color-text-secondary); + font-size: $font-size-xs; + cursor: pointer; + transition: all $motion-fast $easing-standard; + + &:hover:not(:disabled) { + background: $element-bg-soft; + color: var(--color-text-primary); + } + + &:disabled { + opacity: $opacity-disabled; + cursor: not-allowed; + } + } +} + +.insights-report-subtitle { + display: flex; + align-items: center; + gap: $size-gap-4; + padding: $size-gap-3 $size-gap-6; + font-size: $font-size-xs; + color: var(--color-text-muted); + border-bottom: 1px solid $border-subtle; + flex-shrink: 0; + flex-wrap: wrap; + + span { + display: flex; + align-items: center; + gap: $size-gap-1; + } +} + +.insights-report-body { + flex: 1; + overflow-y: auto; + padding: $size-gap-5 $size-gap-6; + min-height: 0; + + &::-webkit-scrollbar { width: 6px; } + &::-webkit-scrollbar-track { background: transparent; } + &::-webkit-scrollbar-thumb { + background: $border-subtle; + border-radius: 3px; + } +} + +.insights-scene--report { + overflow: hidden; +} + +// ============ At a Glance ============ + +.insights-glance { + background: $color-accent-100; + border: 1px solid $border-accent-soft; + border-radius: $size-radius-lg; + padding: $size-gap-4 $size-gap-5; + margin-bottom: $size-gap-5; + + &__title { + font-size: $font-size-sm; + font-weight: $font-weight-bold; + color: $color-accent-500; + margin-bottom: $size-gap-3; + } + + &__sections { + display: flex; + flex-direction: column; + gap: $size-gap-3; + } + + &__item { + font-size: $font-size-sm; + color: var(--color-text-secondary); + line-height: $line-height-relaxed; + + strong { + color: $color-accent-500; + } + } +} + +// ============ Interaction Style ============ + +.insights-interaction { + @include card-base; + padding: $size-gap-4; + + &__narrative { + font-size: $font-size-sm; + color: var(--color-text-secondary); + line-height: 1.7; + margin: 0 0 $size-gap-3; + } + + &__patterns { + display: flex; + flex-direction: column; + gap: $size-gap-2; + } + + &__pattern { + background: $color-success-bg; + border: 1px solid $color-success-border; + border-radius: $size-radius-sm; + padding: $size-gap-2 $size-gap-3; + font-size: $font-size-xs; + color: $color-success; + } +} + +// ============ Stats ============ + +.insights-stats-row { + display: flex; + gap: $size-gap-3; + padding: $size-gap-4 0; + border-top: 1px solid $border-subtle; + border-bottom: 1px solid $border-subtle; + margin-bottom: $size-gap-5; + flex-wrap: wrap; +} + +.insights-stat { + display: flex; + flex-direction: column; + align-items: center; + flex: 1; + min-width: 80px; + + &__icon { + color: var(--color-text-muted); + margin-bottom: $size-gap-1; + } + + &__value { + font-size: $font-size-2xl; + font-weight: $font-weight-bold; + color: var(--color-text-primary); + } + + &__label { + font-size: 10px; + color: var(--color-text-muted); + text-transform: uppercase; + } +} + +// ============ Charts ============ + +.insights-charts-row { + display: grid; + grid-template-columns: 1fr 1fr; + gap: $size-gap-4; + margin-top: $size-gap-4; + margin-bottom: $size-gap-5; + + &--full { + grid-template-columns: 1fr; + } +} + +.insights-chart-card { + @include card-base; + background: var(--color-bg-primary, $element-bg-subtle); + padding: $size-gap-3; + + &__title { + font-size: 11px; + font-weight: $font-weight-semibold; + color: var(--color-text-muted); + text-transform: uppercase; + margin-bottom: $size-gap-3; + } +} + +.insights-bar-row { + display: flex; + align-items: center; + margin-bottom: $size-gap-1; + + &__label { + width: 100px; + font-size: 10px; + color: var(--color-text-secondary); + flex-shrink: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + } + + &__track { + flex: 1; + height: 5px; + background: $element-bg-subtle; + border-radius: 3px; + margin: 0 $size-gap-2; + } + + &__fill { + height: 100%; + border-radius: 3px; + transition: width $motion-base $easing-standard; + } + + &__value { + width: 28px; + font-size: 10px; + font-weight: $font-weight-medium; + color: var(--color-text-muted); + text-align: right; + } +} + +// ============ Section common ============ + +.insights-section { + margin-bottom: $size-gap-6; + + h3 { + font-size: $font-size-base; + font-weight: $font-weight-semibold; + color: var(--color-text-primary); + margin-bottom: $size-gap-3; + } + + h4 { + font-size: $font-size-sm; + font-weight: $font-weight-semibold; + color: var(--color-text-secondary); + margin-bottom: $size-gap-2; + margin-top: $size-gap-4; + } +} + +// ============ Project Areas ============ + +.insights-areas { + display: flex; + flex-direction: column; + gap: $size-gap-2; +} + +.insights-area-card { + @include card-base; + padding: $size-gap-3; + + &__header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: $size-gap-2; + } + + &__name { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: var(--color-text-primary); + } + + &__count { + font-size: 11px; + color: var(--color-text-muted); + background: $element-bg-subtle; + padding: 2px $size-gap-2; + border-radius: $size-radius-sm; + } + + &__desc { + font-size: $font-size-xs; + color: var(--color-text-secondary); + line-height: $line-height-base; + margin: 0; + } +} + +// ============ Big Wins ============ + +.insights-wins { + display: flex; + flex-direction: column; + gap: $size-gap-2; +} + +.insights-win-card { + background: $color-success-bg; + border: 1px solid $color-success-border; + border-radius: $size-radius-base; + padding: $size-gap-3; + + &__title { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: $color-success; + margin-bottom: $size-gap-1; + } + + &__desc { + font-size: $font-size-xs; + color: $color-success; + opacity: 0.85; + line-height: $line-height-base; + margin: 0; + } + + &__impact { + font-size: 11px; + color: $color-success; + opacity: 0.7; + margin: $size-gap-1 0 0; + font-style: italic; + } +} + +// ============ Friction ============ + +.insights-friction { + display: flex; + flex-direction: column; + gap: $size-gap-3; +} + +.insights-friction-card { + background: $color-error-bg; + border: 1px solid $color-error-border; + border-radius: $size-radius-base; + padding: $size-gap-3; + + &__title { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: $color-error; + margin-bottom: $size-gap-1; + } + + &__desc { + font-size: $font-size-xs; + color: $color-error; + opacity: 0.85; + margin: 0 0 $size-gap-2; + } + + &__examples { + margin: 0 0 $size-gap-2 $size-gap-4; + font-size: 11px; + color: var(--color-text-secondary); + + li { margin-bottom: 2px; } + } + + &__suggestion { + background: $element-bg-subtle; + border: 1px solid $color-error-border; + border-radius: $size-radius-sm; + padding: $size-gap-2 $size-gap-3; + font-size: $font-size-xs; + color: $color-error; + } +} + +// ============ Suggestions ============ + +.insights-md-section { + background: $color-info-bg; + border: 1px solid $color-info-border; + border-radius: $size-radius-base; + padding: $size-gap-3; + margin-bottom: $size-gap-3; +} + +.insights-md-item { + padding: $size-gap-2 0; + border-bottom: 1px solid $color-info-border; + + &:last-child { border-bottom: none; } + + &__section { + font-size: 10px; + font-weight: $font-weight-semibold; + text-transform: uppercase; + color: var(--color-text-muted); + margin-bottom: $size-gap-1; + } + + &__rationale { + font-size: 11px; + color: var(--color-text-muted); + margin: $size-gap-1 0 0; + } +} + +.insights-feature-card, +.insights-pattern-card { + border-radius: $size-radius-base; + padding: $size-gap-3; + margin-bottom: $size-gap-2; +} + +.insights-feature-card { + background: $color-success-bg; + border: 1px solid $color-success-border; + + &__title { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: var(--color-text-primary); + margin-bottom: $size-gap-1; + } + + &__desc { + font-size: $font-size-xs; + color: var(--color-text-secondary); + margin: 0 0 $size-gap-1; + } + + &__benefit { + font-size: 11px; + color: var(--color-text-secondary); + margin: 0; + } +} + +.insights-pattern-card { + background: $color-info-bg; + border: 1px solid $color-info-border; + + &__title { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: var(--color-text-primary); + margin-bottom: $size-gap-1; + } + + &__desc { + font-size: $font-size-xs; + color: var(--color-text-secondary); + margin: 0; + } +} + +// ============ Copyable ============ + +.insights-copyable { + margin-top: $size-gap-2; + + &__label { + font-size: 10px; + font-weight: $font-weight-semibold; + text-transform: uppercase; + color: var(--color-text-muted); + margin-bottom: $size-gap-1; + } + + &__row { + display: flex; + align-items: flex-start; + gap: $size-gap-2; + } + + &__code { + flex: 1; + background: $element-bg-subtle; + padding: $size-gap-2 $size-gap-3; + border-radius: $size-radius-sm; + font-size: 11px; + font-family: $font-family-mono; + color: var(--color-text-primary); + border: 1px solid $border-base; + white-space: pre-wrap; + word-break: break-word; + } + + &__btn { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + border: none; + border-radius: $size-radius-sm; + background: $element-bg-soft; + color: var(--color-text-secondary); + cursor: pointer; + flex-shrink: 0; + transition: all $motion-fast $easing-standard; + + &:hover { background: $element-bg-medium; } + } +} + +// ============ Horizon ============ + +.insights-horizon { + display: flex; + flex-direction: column; + gap: $size-gap-3; +} + +.insights-horizon-card { + background: $color-purple-200; + border: 1px solid $border-purple; + border-radius: $size-radius-base; + padding: $size-gap-3; + + &__title { + font-weight: $font-weight-semibold; + font-size: $font-size-sm; + color: $color-purple-500; + margin-bottom: $size-gap-2; + } + + &__desc { + font-size: $font-size-xs; + color: var(--color-text-secondary); + line-height: $line-height-base; + margin: 0 0 $size-gap-2; + } + + &__steps { + margin: 0 0 0 $size-gap-4; + font-size: 11px; + color: $color-purple-500; + + li { margin-bottom: 2px; } + } +} + +// ============ Fun Ending ============ + +.insights-fun-ending { + background: $color-warning-bg; + border: 1px solid $color-warning-border; + border-radius: $size-radius-lg; + padding: $size-gap-5; + margin-top: $size-gap-6; + text-align: center; + + &__headline { + font-size: $font-size-lg; + font-weight: $font-weight-semibold; + color: $color-warning; + margin-bottom: $size-gap-2; + } + + &__message { + font-size: $font-size-sm; + color: var(--color-text-secondary); + margin: 0; + } +} + +// ============ Animations ============ + +@keyframes insights-spin { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +@keyframes insights-fade-in { + from { opacity: 0; } + to { opacity: 1; } +} + +@media (prefers-reduced-motion: reduce) { + .insights-scene__spinner { + animation: none; + } +} diff --git a/src/web-ui/src/app/scenes/my-agent/InsightsScene.tsx b/src/web-ui/src/app/scenes/my-agent/InsightsScene.tsx new file mode 100644 index 00000000..90a74a14 --- /dev/null +++ b/src/web-ui/src/app/scenes/my-agent/InsightsScene.tsx @@ -0,0 +1,720 @@ +import React, { useEffect, useCallback } from 'react'; +import { + ExternalLink, Copy, Check, ArrowLeft, Loader2, AlertTriangle, + BarChart3, Clock, MessageSquare, Calendar, TrendingUp, X, + FileCode, FolderEdit, +} from 'lucide-react'; +import { openPath } from '@tauri-apps/plugin-opener'; +import { useI18n } from '@/infrastructure/i18n/hooks/useI18n'; +import type { InsightsReport, InsightsReportMeta, InsightsStats } from '@/infrastructure/api/insightsApi'; +import { useInsightsStore } from './insightsStore'; +import { createLogger } from '@/shared/utils/logger'; +import { notificationService } from '@/shared/notification-system'; +import './InsightsScene.scss'; + +const log = createLogger('InsightsScene'); + +const DAY_OPTIONS = [7, 14, 30, 90] as const; + +const InsightsScene: React.FC = () => { + const { t } = useI18n('common'); + const { + view, reportMetas, currentReport, generating, progress, + selectedDays, error, loadingMetas, + setSelectedDays, fetchReportMetas, loadReport, generateReport, cancelGeneration, backToList, clearError, + } = useInsightsStore(); + + useEffect(() => { + fetchReportMetas(); + }, [fetchReportMetas]); + + if (view === 'report' && currentReport) { + return ; + } + + return ( +
    +
    +
    + +

    {t('insights.title')}

    +
    +
    + + {error && ( +
    + + {error} + +
    + )} + +
    +
    {t('insights.generateNew')}
    +
    +
    + {DAY_OPTIONS.map((d) => ( + + ))} +
    + {generating ? ( + + ) : ( + + )} + {generating && progress.message && ( +
    + {progress.current > 0 && progress.total > 0 && ( + {progress.current}/{progress.total} + )} + {progress.message} +
    + )} +
    +
    + +
    +
    + {t('insights.history')} + {t('insights.keepLatest5')} +
    + {loadingMetas ? ( +
    + +
    + ) : reportMetas.length === 0 ? ( +
    {t('insights.noReports')}
    + ) : ( +
    + {reportMetas.map((meta) => ( + + ))} +
    + )} +
    +
    + ); +}; + +const ReportMetaCard: React.FC<{ + meta: InsightsReportMeta; + onSelect: (meta: InsightsReportMeta) => void; +}> = ({ meta, onSelect }) => { + const { t } = useI18n('common'); + const date = new Date(meta.generated_at * 1000); + const dateStr = date.toLocaleDateString(undefined, { year: 'numeric', month: 'short', day: 'numeric' }); + const timeStr = date.toLocaleTimeString(undefined, { hour: '2-digit', minute: '2-digit' }); + const rangeStart = meta.date_range.start.slice(0, 10); + const rangeEnd = meta.date_range.end.slice(0, 10); + const formatGoal = (g: string) => g.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()); + + return ( + + ); +}; + +// ============ Report View ============ + +const ReportView: React.FC<{ report: InsightsReport; onBack: () => void }> = ({ report, onBack }) => { + const { t } = useI18n('common'); + + const handleOpenHtml = useCallback(async () => { + if (report.html_report_path) { + try { + await openPath(report.html_report_path); + } catch (error) { + log.error('Failed to open HTML report', error); + notificationService.error( + String(error), + { title: t('insights.openHtmlFailed'), duration: 5000 } + ); + } + } + }, [report.html_report_path, t]); + + const dateStart = report.date_range.start.slice(0, 10); + const dateEnd = report.date_range.end.slice(0, 10); + + return ( +
    +
    + +
    + +
    +
    + +
    + {report.total_messages} {t('insights.messages')} + {report.total_sessions} {t('insights.sessions')} ({report.analyzed_sessions} {t('insights.analyzed')}) + {dateStart} ~ {dateEnd} +
    + +
    + + + + {/* What You Work On */} + {report.project_areas.length > 0 && ( +
    +

    {t('insights.projectAreas')}

    +
    + {report.project_areas.map((area) => ( +
    +
    + {area.name} + ~{area.session_count} {t('insights.sessions')} +
    +

    +
    + ))} +
    +
    + )} + + + {/* How You Use BitFun */} + {report.interaction_style.narrative && } + + + {/* Impressive Things You Did */} + {report.big_wins.length > 0 && ( +
    +

    {t('insights.bigWins')}

    +
    + {report.big_wins.map((win) => ( +
    +
    {win.title}
    +

    + {win.impact &&

    } +
    + ))} +
    +
    + )} + + + {/* Where Things Go Wrong */} + {report.friction_categories.length > 0 && ( +
    +

    {t('insights.friction')}

    +
    + {report.friction_categories.map((f) => ( +
    +
    {f.category}
    +

    + {f.examples.length > 0 && ( +
      + {f.examples.map((ex, j) =>
    • )} +
    + )} + {f.suggestion &&
    } +
    + ))} +
    +
    + )} + + + + + {report.on_the_horizon.length > 0 && ( +
    +

    {t('insights.horizon')}

    + {report.horizon_intro && ( +

    + )} +
    + {report.on_the_horizon.map((h) => ( +
    +
    {h.title}
    +

    + {h.how_to_try && ( +

    + )} +
    + ))} +
    +
    + )} + + {report.fun_ending && ( +
    +
    {report.fun_ending.headline}
    +

    +
    + )} +
    +
    + ); +}; + +// ============ Sub-components ============ + +const AtAGlanceSection: React.FC<{ report: InsightsReport }> = ({ report }) => { + const { at_a_glance } = report; + const { t } = useI18n('common'); + + return ( +
    +
    {t('insights.atAGlance')}
    +
    +
    + {t('insights.whatsWorking')}: +
    +
    + {t('insights.whatsHindering')}: +
    +
    + {t('insights.quickWins')}: +
    +
    + {t('insights.lookingAhead')}: +
    +
    +
    + ); +}; + +const InteractionStyleSection: React.FC<{ report: InsightsReport }> = ({ report }) => { + const { interaction_style } = report; + const { t } = useI18n('common'); + + return ( +
    +

    {t('insights.interactionStyle')}

    +
    +

    + {interaction_style.key_patterns.length > 0 && ( +
    + {interaction_style.key_patterns.map((pattern, i) => ( +
    + ))} +
    + )} +
    +
    + ); +}; + +const RESPONSE_TIME_ORDER = ['2-10s', '10-30s', '30s-1m', '1-2m', '2-5m', '5-15m', '>15m']; + +const TIME_OF_DAY_PERIODS: { label: string; hours: number[] }[] = [ + { label: 'Morning (6-12)', hours: [6, 7, 8, 9, 10, 11] }, + { label: 'Afternoon (12-18)', hours: [12, 13, 14, 15, 16, 17] }, + { label: 'Evening (18-24)', hours: [18, 19, 20, 21, 22, 23] }, + { label: 'Night (0-6)', hours: [0, 1, 2, 3, 4, 5] }, +]; + +const formatDurationShort = (secs: number): string => { + if (secs < 60) return `${Math.round(secs)}s`; + if (secs < 3600) return `${(secs / 60).toFixed(1)}m`; + return `${(secs / 3600).toFixed(1)}h`; +}; + +const formatNumber = (n: number): string => n.toLocaleString(); + +const StatsRow: React.FC<{ report: InsightsReport }> = ({ report }) => { + const { t } = useI18n('common'); + const { stats } = report; + const hasCodeChanges = (stats.total_lines_added ?? 0) > 0 || (stats.total_lines_removed ?? 0) > 0; + + return ( +
    + {hasCodeChanges && ( + } + value={`+${formatNumber(stats.total_lines_added)}/-${formatNumber(stats.total_lines_removed)}`} + label={t('insights.lines')} + /> + )} + {hasCodeChanges && (stats.total_files_modified ?? 0) > 0 && ( + } value={formatNumber(stats.total_files_modified)} label={t('insights.files')} /> + )} + } value={report.total_sessions.toString()} label={t('insights.sessions')} /> + } value={report.total_messages.toString()} label={t('insights.messages')} /> + } value={stats.total_hours.toFixed(1)} label={t('insights.hours')} /> + } value={report.days_covered.toString()} label={t('insights.days')} /> + } value={stats.msgs_per_day.toFixed(1)} label={t('insights.msgsPerDay')} /> + {stats.median_response_time_secs != null && ( + } value={formatDurationShort(stats.median_response_time_secs)} label={t('insights.medianResponseTime')} /> + )} + {stats.avg_response_time_secs != null && ( + } value={formatDurationShort(stats.avg_response_time_secs)} label={t('insights.avgResponseTime')} /> + )} +
    + ); +}; + +const ChartsRow: React.FC<{ children: React.ReactNode }> = ({ children }) => { + const visible = React.Children.toArray(children).filter(Boolean); + if (visible.length === 0) return null; + const isSingle = visible.length === 1; + return ( +
    + {visible} +
    + ); +}; + +const BasicCharts: React.FC<{ stats: InsightsStats }> = ({ stats }) => { + const { t } = useI18n('common'); + const hasGoals = stats.top_goals.some(([, v]) => v > 0); + const hasTools = stats.top_tools.some(([, v]) => v > 0); + const langItems = Object.entries(stats.languages).sort(([, a], [, b]) => b - a).slice(0, 6); + const hasLangs = langItems.some(([, v]) => v > 0); + const typeItems = Object.entries(stats.session_types).sort(([, a], [, b]) => b - a).slice(0, 6); + const hasTypes = typeItems.some(([, v]) => v > 0); + + return ( + <> + + {hasGoals && } + {hasTools && } + + {(hasLangs || hasTypes) && ( + + {hasLangs && } + {hasTypes && } + + )} + + ); +}; + +const UsageCharts: React.FC<{ stats: InsightsStats }> = ({ stats }) => { + const { t } = useI18n('common'); + + const responseTimeBuckets = stats.response_time_buckets || {}; + const hasResponseTime = Object.keys(responseTimeBuckets).length > 0; + const hourCounts = stats.hour_counts || {}; + const hasTimeOfDay = Object.keys(hourCounts).length > 0; + const toolErrors = stats.tool_errors || {}; + const hasToolErrors = Object.keys(toolErrors).length > 0; + const agentTypes = stats.agent_types || {}; + const hasAgentTypes = Object.keys(agentTypes).length > 0; + + const sortedResponseTime: [string, number][] = RESPONSE_TIME_ORDER + .filter((label) => responseTimeBuckets[label] != null) + .map((label) => [label, responseTimeBuckets[label]]); + + const timeOfDayItems: [string, number][] = TIME_OF_DAY_PERIODS.map(({ label, hours }) => { + const count = hours.reduce((sum, h) => sum + (hourCounts[h] ?? 0), 0); + return [label, count]; + }); + + if (!hasResponseTime && !hasTimeOfDay && !hasToolErrors && !hasAgentTypes) return null; + + return ( + <> + {hasResponseTime && ( + + + + )} + {(hasTimeOfDay || hasToolErrors) && ( + + {hasTimeOfDay && ( + + )} + {hasToolErrors && ( + b - a).slice(0, 6)} + color="#dc2626" + max={6} + /> + )} + + )} + {hasAgentTypes && ( + + b - a)} + color="#f97316" + max={6} + /> + + )} + + ); +}; + +const OutcomeCharts: React.FC<{ stats: InsightsStats }> = ({ stats }) => { + const { t } = useI18n('common'); + const success = stats.success || {}; + const outcomes = stats.outcomes || {}; + const hasSuccess = Object.keys(success).length > 0; + const hasOutcomes = Object.keys(outcomes).length > 0; + + if (!hasSuccess && !hasOutcomes) return null; + + return ( + + {hasSuccess && ( + b - a).slice(0, 6)} + color="#16a34a" + max={6} + /> + )} + {hasOutcomes && ( + b - a).slice(0, 6)} + color="#8b5cf6" + max={6} + /> + )} + + ); +}; + +const FrictionCharts: React.FC<{ stats: InsightsStats }> = ({ stats }) => { + const { t } = useI18n('common'); + const friction = stats.friction || {}; + const satisfaction = stats.satisfaction || {}; + const hasFriction = Object.keys(friction).length > 0; + const hasSatisfaction = Object.keys(satisfaction).length > 0; + + if (!hasFriction && !hasSatisfaction) return null; + + return ( + + {hasFriction && ( + b - a).slice(0, 6)} + color="#dc2626" + max={6} + /> + )} + {hasSatisfaction && ( + b - a).slice(0, 6)} + color="#eab308" + max={6} + /> + )} + + ); +}; + +const StatItem: React.FC<{ icon: React.ReactNode; value: string; label: string }> = ({ icon, value, label }) => ( +
    +
    {icon}
    +
    {value}
    +
    {label}
    +
    +); + +const BarChart: React.FC<{ title: string; items: [string, number][]; color: string; max: number }> = ({ title, items, color, max }) => { + const nonZero = items.filter(([, v]) => v > 0); + const displayed = nonZero.slice(0, max); + const maxVal = Math.max(...displayed.map(([, v]) => v), 1); + + if (displayed.length === 0) return null; + + return ( +
    +
    {title}
    + {displayed.map(([label, value]) => { + const pct = (value / maxVal) * 100; + const displayLabel = label.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase()); + return ( +
    + {displayLabel} +
    +
    +
    + {value} +
    + ); + })} +
    + ); +}; + +const SuggestionsSection: React.FC<{ report: InsightsReport }> = ({ report }) => { + const { suggestions } = report; + const { t } = useI18n('common'); + const hasSuggestions = + suggestions.bitfun_md_additions.length > 0 || + suggestions.features_to_try.length > 0 || + suggestions.usage_patterns.length > 0; + + if (!hasSuggestions) return null; + + return ( +
    +

    {t('insights.suggestions')}

    + + {suggestions.bitfun_md_additions.length > 0 && ( +
    +

    {t('insights.mdAdditions')}

    + {suggestions.bitfun_md_additions.map((md, i) => ( +
    + {md.section &&
    {md.section}
    } + +

    {md.rationale}

    +
    + ))} +
    + )} + + {suggestions.features_to_try.length > 0 && ( +
    +

    {t('insights.featuresToTry')}

    + {suggestions.features_to_try.map((f) => ( +
    +
    {f.feature}
    +

    +

    + {f.example_usage && } +
    + ))} +
    + )} + + {suggestions.usage_patterns.length > 0 && ( +
    +

    {t('insights.usagePatterns')}

    + {suggestions.usage_patterns.map((p) => ( +
    +
    {p.pattern}
    +

    + {p.suggested_prompt && } +
    + ))} +
    + )} +
    + ); +}; + +const MarkdownInline: React.FC<{ text: string }> = ({ text }) => { + const parts: React.ReactNode[] = []; + const regex = /\*\*(.+?)\*\*|\*(.+?)\*/g; + let lastIndex = 0; + let match: RegExpExecArray | null; + + while ((match = regex.exec(text)) !== null) { + if (match.index > lastIndex) { + parts.push(text.slice(lastIndex, match.index)); + } + if (match[1] != null) { + parts.push({match[1]}); + } else if (match[2] != null) { + parts.push({match[2]}); + } + lastIndex = regex.lastIndex; + } + + if (lastIndex < text.length) { + parts.push(text.slice(lastIndex)); + } + + return <>{parts}; +}; + +const CopyableCode: React.FC<{ text: string; label?: string }> = ({ text, label }) => { + const [copied, setCopied] = React.useState(false); + + const handleCopy = useCallback(async () => { + try { + await navigator.clipboard.writeText(text); + setCopied(true); + setTimeout(() => setCopied(false), 2000); + } catch (e) { + log.error('Failed to copy', e); + } + }, [text]); + + return ( +
    + {label &&
    {label}
    } +
    + {text} + +
    +
    + ); +}; + +export default InsightsScene; diff --git a/src/web-ui/src/app/scenes/my-agent/MyAgentScene.tsx b/src/web-ui/src/app/scenes/my-agent/MyAgentScene.tsx index b16fedc1..72518fcb 100644 --- a/src/web-ui/src/app/scenes/my-agent/MyAgentScene.tsx +++ b/src/web-ui/src/app/scenes/my-agent/MyAgentScene.tsx @@ -8,6 +8,7 @@ import './MyAgentScene.scss'; const ProfileScene = lazy(() => import('../profile/ProfileScene')); const AgentsScene = lazy(() => import('../agents/AgentsScene')); const SkillsScene = lazy(() => import('../skills/SkillsScene')); +const InsightsScene = lazy(() => import('./InsightsScene')); interface MyAgentSceneProps { workspacePath?: string; @@ -96,6 +97,7 @@ const MyAgentScene: React.FC = ({ workspacePath }) => { )} {activeView === 'agents' && } {activeView === 'skills' && } + {activeView === 'insights' && }
    ); diff --git a/src/web-ui/src/app/scenes/my-agent/insightsStore.ts b/src/web-ui/src/app/scenes/my-agent/insightsStore.ts new file mode 100644 index 00000000..1904c2f8 --- /dev/null +++ b/src/web-ui/src/app/scenes/my-agent/insightsStore.ts @@ -0,0 +1,145 @@ +import { create } from 'zustand'; +import { listen } from '@tauri-apps/api/event'; +import { insightsApi, type InsightsReport, type InsightsReportMeta, type InsightsProgressEvent } from '@/infrastructure/api/insightsApi'; +import { createLogger } from '@/shared/utils/logger'; + +const log = createLogger('InsightsStore'); + +const RETRY_STAGES = new Set(['facet_retry', 'recommendations_retry']); + +export type InsightsView = 'list' | 'report'; + +interface InsightsProgress { + stage: string; + message: string; + current: number; + total: number; + isRetrying: boolean; +} + +interface InsightsState { + view: InsightsView; + reportMetas: InsightsReportMeta[]; + currentReport: InsightsReport | null; + generating: boolean; + progress: InsightsProgress; + selectedDays: number; + error: string; + loadingMetas: boolean; + + setSelectedDays: (days: number) => void; + fetchReportMetas: () => Promise; + loadReport: (meta: InsightsReportMeta) => Promise; + generateReport: () => Promise; + cancelGeneration: () => Promise; + backToList: () => void; + clearError: () => void; +} + +const defaultProgress: InsightsProgress = { + stage: '', + message: '', + current: 0, + total: 0, + isRetrying: false, +}; + +export const useInsightsStore = create((set, get) => ({ + view: 'list', + reportMetas: [], + currentReport: null, + generating: false, + progress: { ...defaultProgress }, + selectedDays: 30, + error: '', + loadingMetas: false, + + setSelectedDays: (days) => set({ selectedDays: days }), + + fetchReportMetas: async () => { + set({ loadingMetas: true }); + try { + const metas = await insightsApi.getLatestInsights(); + set({ reportMetas: metas, loadingMetas: false }); + } catch (err) { + log.error('Failed to fetch report metas', err); + set({ loadingMetas: false }); + } + }, + + loadReport: async (meta) => { + try { + const report = await insightsApi.loadReport(meta.path); + set({ currentReport: report, view: 'report', error: '' }); + } catch (err) { + log.error('Failed to load report', err); + set({ error: String(err) }); + } + }, + + generateReport: async () => { + const { selectedDays, generating } = get(); + if (generating) return; + + set({ + generating: true, + error: '', + progress: { ...defaultProgress, message: 'Starting...' }, + }); + + const unlisten = await listen('insights-progress', (event) => { + const { message, stage, current, total } = event.payload; + set({ + progress: { + stage, + message, + current, + total, + isRetrying: RETRY_STAGES.has(stage), + }, + }); + }); + + try { + const report = await insightsApi.generateInsights(selectedDays); + log.info('Insights report generated', { + sessions: report.total_sessions, + analyzed: report.analyzed_sessions, + }); + set({ + currentReport: report, + view: 'report', + generating: false, + progress: { ...defaultProgress }, + }); + get().fetchReportMetas(); + } catch (err) { + log.error('Failed to generate insights', err); + set({ + generating: false, + view: 'list', + error: String(err), + progress: { ...defaultProgress }, + }); + } finally { + unlisten(); + } + }, + + cancelGeneration: async () => { + if (!get().generating) return; + try { + await insightsApi.cancelGeneration(); + } catch (err) { + log.error('Failed to cancel insights generation', err); + } + set({ + generating: false, + progress: { ...defaultProgress }, + }); + }, + + backToList: () => set({ view: 'list', currentReport: null }), + + clearError: () => set({ error: '' }), +})); diff --git a/src/web-ui/src/app/scenes/my-agent/myAgentConfig.ts b/src/web-ui/src/app/scenes/my-agent/myAgentConfig.ts index aadb6307..f99872f2 100644 --- a/src/web-ui/src/app/scenes/my-agent/myAgentConfig.ts +++ b/src/web-ui/src/app/scenes/my-agent/myAgentConfig.ts @@ -1,6 +1,6 @@ import type { PanelType } from '@/app/types'; -export type MyAgentView = 'profile' | 'agents' | 'skills'; +export type MyAgentView = 'profile' | 'agents' | 'skills' | 'insights'; export interface MyAgentNavItem { id: MyAgentView; @@ -30,6 +30,13 @@ export const MY_AGENT_NAV_CATEGORIES: MyAgentNavCategory[] = [ { id: 'skills', panelTab: 'skills', labelKey: 'nav.items.skills' }, ], }, + { + id: 'analytics', + nameKey: 'nav.myAgent.categories.analytics', + items: [ + { id: 'insights', panelTab: 'sessions', labelKey: 'nav.items.insights' }, + ], + }, ]; export const DEFAULT_MY_AGENT_VIEW: MyAgentView = 'profile'; diff --git a/src/web-ui/src/infrastructure/api/index.ts b/src/web-ui/src/infrastructure/api/index.ts index 19833a41..2af3d2bb 100644 --- a/src/web-ui/src/infrastructure/api/index.ts +++ b/src/web-ui/src/infrastructure/api/index.ts @@ -29,9 +29,10 @@ import { sessionAPI } from './service-api/SessionAPI'; import { i18nAPI } from './service-api/I18nAPI'; import { btwAPI } from './service-api/BtwAPI'; import { tokenUsageApi } from './tokenUsageApi'; +import { insightsApi } from './insightsApi'; // Export API modules -export { workspaceAPI, configAPI, aiApi, toolAPI, agentAPI, systemAPI, projectAPI, diffAPI, snapshotAPI, globalAPI, contextAPI, gitAPI, gitAgentAPI, gitRepoHistoryAPI, startchatAgentAPI, sessionAPI, i18nAPI, btwAPI, tokenUsageApi }; +export { workspaceAPI, configAPI, aiApi, toolAPI, agentAPI, systemAPI, projectAPI, diffAPI, snapshotAPI, globalAPI, contextAPI, gitAPI, gitAgentAPI, gitRepoHistoryAPI, startchatAgentAPI, sessionAPI, i18nAPI, btwAPI, tokenUsageApi, insightsApi }; // Export types export type { GitRepoHistory }; @@ -57,6 +58,7 @@ export const bitfunAPI = { i18n: i18nAPI, btw: btwAPI, tokenUsage: tokenUsageApi, + insights: insightsApi, }; // Default export diff --git a/src/web-ui/src/infrastructure/api/insightsApi.ts b/src/web-ui/src/infrastructure/api/insightsApi.ts new file mode 100644 index 00000000..de955bec --- /dev/null +++ b/src/web-ui/src/infrastructure/api/insightsApi.ts @@ -0,0 +1,173 @@ +import { invoke } from '@tauri-apps/api/core'; + +// ============ Types (strict 1:1 mirror of Rust types) ============ + +export interface DateRange { + start: string; + end: string; +} + +export interface AtAGlance { + whats_working: string; + whats_hindering: string; + quick_wins: string; + looking_ahead: string; +} + +export interface InteractionStyle { + narrative: string; + key_patterns: string[]; +} + +export interface ProjectArea { + name: string; + session_count: number; + description: string; +} + +export interface BigWin { + title: string; + description: string; + impact: string; +} + +export interface FrictionCategory { + category: string; + count: number; + description: string; + examples: string[]; + suggestion: string; +} + +export interface MdAddition { + section: string; + content: string; + rationale: string; +} + +export interface FeatureRecommendation { + feature: string; + description: string; + example_usage: string; + benefit: string; +} + +export interface UsagePattern { + pattern: string; + description: string; + detail: string; + suggested_prompt: string; +} + +export interface InsightsSuggestions { + bitfun_md_additions: MdAddition[]; + features_to_try: FeatureRecommendation[]; + usage_patterns: UsagePattern[]; +} + +export interface HorizonWorkflow { + title: string; + whats_possible: string; + how_to_try: string; + copyable_prompt: string; +} + +export interface FunEnding { + headline: string; + detail: string; +} + +export interface InsightsStats { + total_hours: number; + msgs_per_day: number; + top_tools: [string, number][]; + top_goals: [string, number][]; + outcomes: Record; + satisfaction: Record; + session_types: Record; + languages: Record; + hour_counts: Record; + agent_types: Record; + response_time_buckets: Record; + median_response_time_secs: number | null; + avg_response_time_secs: number | null; + friction: Record; + success: Record; + tool_errors: Record; + total_lines_added: number; + total_lines_removed: number; + total_files_modified: number; +} + +export interface InsightsReport { + generated_at: number; + date_range: DateRange; + total_sessions: number; + analyzed_sessions: number; + total_messages: number; + days_covered: number; + + stats: InsightsStats; + + at_a_glance: AtAGlance; + interaction_style: InteractionStyle; + project_areas: ProjectArea[]; + big_wins: BigWin[]; + friction_categories: FrictionCategory[]; + suggestions: InsightsSuggestions; + horizon_intro: string; + on_the_horizon: HorizonWorkflow[]; + fun_ending: FunEnding | null; + + html_report_path: string | null; +} + +export interface InsightsReportMeta { + generated_at: number; + total_sessions: number; + analyzed_sessions: number; + date_range: DateRange; + path: string; + total_messages: number; + days_covered: number; + total_hours: number; + top_goals: string[]; + languages: string[]; +} + +export interface InsightsProgressEvent { + message: string; + stage: string; + current: number; + total: number; +} + +// ============ API client ============ + +export const insightsApi = { + async generateInsights(days?: number): Promise { + return invoke('generate_insights', { + request: { days: days ?? 30 }, + }); + }, + + async getLatestInsights(): Promise { + return invoke('get_latest_insights'); + }, + + async loadReport(path: string): Promise { + return invoke('load_insights_report', { + request: { path }, + }); + }, + + async hasInsightsData(days?: number): Promise { + return invoke('has_insights_data', { + request: { days: days ?? 30 }, + }); + }, + + async cancelGeneration(): Promise { + return invoke('cancel_insights_generation'); + }, +}; diff --git a/src/web-ui/src/locales/en-US/common.json b/src/web-ui/src/locales/en-US/common.json index 4f8251d4..741340b5 100644 --- a/src/web-ui/src/locales/en-US/common.json +++ b/src/web-ui/src/locales/en-US/common.json @@ -83,7 +83,8 @@ "tools": "Tools", "terminal": "Shell", "git": "Git", - "miniApps": "Mini App" + "miniApps": "Mini App", + "insights": "Insights" }, "tooltips": { "persona": "Agent profile — identity, rules, memory, knowledge & capabilities", @@ -108,7 +109,8 @@ "title": "My Agent", "categories": { "identity": "Identity", - "collaboration": "Collaboration" + "collaboration": "Collaboration", + "analytics": "Analytics" } }, "shell": { @@ -724,5 +726,59 @@ "newProject": "New Project", "newProjectDesc": "Create a brand new project from scratch." } + }, + "insights": { + "title": "Insights", + "buttonTooltip": "Insights", + "generating": "Generating insights...", + "generateNew": "Generate New Report", + "generateBtn": "Generate", + "history": "Report History", + "noReports": "No reports yet. Generate your first insights report above.", + "backToList": "Back to list", + "openHtml": "Open Full Report", + "atAGlance": "At a Glance", + "whatsWorking": "What's working", + "whatsHindering": "What's hindering you", + "quickWins": "Quick wins to try", + "lookingAhead": "Looking ahead", + "interactionStyle": "How You Use BitFun", + "sessions": "Sessions", + "messages": "Messages", + "analyzed": "analyzed", + "hours": "Hours", + "days": "Days", + "msgsPerDay": "Msgs/Day", + "topGoals": "What You Wanted", + "topTools": "Top Tools Used", + "languages": "Languages", + "sessionTypes": "Session Types", + "projectAreas": "What You Work On", + "bigWins": "Impressive Things You Did", + "friction": "Where Things Go Wrong", + "suggestions": "Suggestions", + "mdAdditions": "BITFUN.md Additions", + "featuresToTry": "Features to Try", + "usagePatterns": "Usage Patterns", + "tryThisPrompt": "Try this prompt", + "horizon": "On the Horizon", + "error": "Failed to generate insights report", + "impact": "Impact", + "rateLimited": "Rate limited, retrying sequentially...", + "keepLatest5": "Only the 5 most recent reports are kept", + "openHtmlFailed": "Failed to open report", + "cancelBtn": "Stop", + "outcomes": "Outcomes", + "satisfaction": "Satisfaction (Inferred)", + "responseTime": "Response Time Distribution", + "agentTypes": "Agent Types", + "medianResponseTime": "Median Response", + "avgResponseTime": "Avg Response", + "frictionTypes": "Primary Friction Types", + "whatHelpedMost": "What Helped Most", + "toolErrors": "Tool Errors Encountered", + "timeOfDay": "Messages by Time of Day", + "lines": "Lines", + "files": "Files" } } diff --git a/src/web-ui/src/locales/zh-CN/common.json b/src/web-ui/src/locales/zh-CN/common.json index 468bac6f..73ad8037 100644 --- a/src/web-ui/src/locales/zh-CN/common.json +++ b/src/web-ui/src/locales/zh-CN/common.json @@ -83,7 +83,8 @@ "tools": "工具", "terminal": "Shell", "git": "Git", - "miniApps": "小应用" + "miniApps": "小应用", + "insights": "洞察" }, "tooltips": { "persona": "智能体档案 — 身份、规则、记忆、知识与能力", @@ -108,7 +109,8 @@ "title": "我的智能体", "categories": { "identity": "身份", - "collaboration": "协作" + "collaboration": "协作", + "analytics": "分析" } }, "shell": { @@ -724,5 +726,59 @@ "newProject": "新建项目", "newProjectDesc": "从零开始创建一个全新的项目" } + }, + "insights": { + "title": "洞察", + "buttonTooltip": "洞察", + "generating": "正在生成洞察...", + "generateNew": "生成新报告", + "generateBtn": "生成", + "history": "历史报告", + "noReports": "暂无报告,点击上方按钮生成第一份洞察报告。", + "backToList": "返回列表", + "openHtml": "打开完整报告", + "atAGlance": "概览", + "whatsWorking": "做得好的", + "whatsHindering": "遇到的阻碍", + "quickWins": "快速提升", + "lookingAhead": "展望未来", + "interactionStyle": "你如何使用 BitFun", + "sessions": "会话", + "messages": "消息", + "analyzed": "已分析", + "hours": "小时", + "days": "天", + "msgsPerDay": "消息/天", + "topGoals": "你的需求", + "topTools": "常用工具", + "languages": "编程语言", + "sessionTypes": "会话类型", + "projectAreas": "工作领域", + "bigWins": "亮眼成果", + "friction": "问题所在", + "suggestions": "建议", + "mdAdditions": "BITFUN.md 补充", + "featuresToTry": "推荐功能", + "usagePatterns": "使用模式", + "tryThisPrompt": "试试这个提示", + "horizon": "未来展望", + "error": "生成洞察报告失败", + "impact": "影响", + "rateLimited": "请求频率受限,正在逐个重试...", + "keepLatest5": "仅保留最近 5 份报告", + "openHtmlFailed": "打开报告失败", + "cancelBtn": "停止", + "outcomes": "结果分布", + "satisfaction": "满意度(推断)", + "responseTime": "响应时间分布", + "agentTypes": "智能体类型", + "medianResponseTime": "中位响应时间", + "avgResponseTime": "平均响应时间", + "frictionTypes": "主要摩擦类型", + "whatHelpedMost": "最有帮助的方面", + "toolErrors": "工具错误统计", + "timeOfDay": "按时段分布", + "lines": "行", + "files": "文件" } }