diff --git a/.gitignore b/.gitignore index 67b13f93..afe4dd29 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ __pycache__/ # Distribution / packaging .Python +.python-version build/ develop-eggs/ dist/ diff --git a/CLAUDE.md b/CLAUDE.md index 0e193d92..2283b125 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -117,6 +117,7 @@ claude-code-log /path/to/directory --from-date "3 days ago" --to-date "yesterday - `claude_code_log/parser.py` - Data extraction and parsing from JSONL files - `claude_code_log/renderer.py` - HTML generation and template rendering +- `claude_code_log/renderer_timings.py` - Performance timing instrumentation - `claude_code_log/converter.py` - High-level conversion orchestration - `claude_code_log/cli.py` - Command-line interface with project discovery - `claude_code_log/models.py` - Pydantic models for transcript data structures @@ -219,6 +220,45 @@ HTML coverage reports are generated in `htmlcov/index.html`. - **Lint and fix**: `ruff check --fix` - **Type checking**: `uv run pyright` and `uv run ty check` +### Performance Profiling + +Enable timing instrumentation to identify performance bottlenecks: + +```bash +# Enable timing output +CLAUDE_CODE_LOG_DEBUG_TIMING=1 claude-code-log path/to/file.jsonl + +# Or export for a session +export CLAUDE_CODE_LOG_DEBUG_TIMING=1 +claude-code-log path/to/file.jsonl +``` + +This outputs detailed timing for each rendering phase: + +``` +[TIMING] Initialization 0.001s (total: 0.001s) +[TIMING] Deduplication (1234 messages) 0.050s (total: 0.051s) +[TIMING] Session summary processing 0.012s (total: 0.063s) +[TIMING] Main message processing loop 5.234s (total: 5.297s) +[TIMING] Template rendering (30MB chars) 15.432s (total: 20.729s) + +[TIMING] Loop statistics: +[TIMING] Total messages: 1234 +[TIMING] Average time per message: 4.2ms +[TIMING] Slowest 10 messages: +[TIMING] Message abc-123 (#42, assistant): 245.3ms +[TIMING] ... + +[TIMING] Pygments highlighting: +[TIMING] Total operations: 89 +[TIMING] Total time: 1.234s +[TIMING] Slowest 10 operations: +[TIMING] def-456: 50.2ms +[TIMING] ... +``` + +The timing module is in `claude_code_log/renderer_timings.py`. + ### Testing & Style Guide - **Unit and Integration Tests**: See [test/README.md](test/README.md) for comprehensive testing documentation diff --git a/claude_code_log/models.py b/claude_code_log/models.py index c0cc169a..e2149402 100644 --- a/claude_code_log/models.py +++ b/claude_code_log/models.py @@ -74,6 +74,7 @@ class ToolResultContent(BaseModel): tool_use_id: str content: Union[str, List[Dict[str, Any]]] is_error: Optional[bool] = None + agentId: Optional[str] = None # Reference to agent file for sub-agent messages class ThinkingContent(BaseModel): @@ -202,6 +203,7 @@ class UserTranscriptEntry(BaseTranscriptEntry): type: Literal["user"] message: UserMessage toolUseResult: Optional[ToolUseResult] = None + agentId: Optional[str] = None # From toolUseResult when present class AssistantTranscriptEntry(BaseTranscriptEntry): @@ -226,17 +228,23 @@ class SystemTranscriptEntry(BaseTranscriptEntry): class QueueOperationTranscriptEntry(BaseModel): - """Queue operations (enqueue/dequeue) for message queueing tracking. + """Queue operations (enqueue/dequeue/remove) for message queueing tracking. - These are internal operations that track when messages are queued and dequeued. + enqueue/dequeue are internal operations that track when messages are queued and dequeued. They are parsed but not rendered, as the content duplicates actual user messages. + + 'remove' operations are out-of-band user inputs made visible to the agent while working + for "steering" purposes. These should be rendered as user messages with a 'steering' CSS class. + Content can be a list of ContentItems or a simple string (for 'remove' operations). """ type: Literal["queue-operation"] - operation: Literal["enqueue", "dequeue"] + operation: Literal["enqueue", "dequeue", "remove", "popAll"] timestamp: str sessionId: str - content: Optional[List[ContentItem]] = None # Only present for enqueue operations + content: Optional[Union[List[ContentItem], str]] = ( + None # List for enqueue, str for remove/popAll + ) TranscriptEntry = Union[ @@ -414,7 +422,7 @@ def parse_transcript_entry(data: Dict[str, Any]) -> TranscriptEntry: return SystemTranscriptEntry.model_validate(data) elif entry_type == "queue-operation": - # Parse content if present (only in enqueue operations) + # Parse content if present (in enqueue and remove operations) data_copy = data.copy() if "content" in data_copy and isinstance(data_copy["content"], list): data_copy["content"] = parse_message_content(data_copy["content"]) diff --git a/claude_code_log/parser.py b/claude_code_log/parser.py index b76e61b8..a34ca5af 100644 --- a/claude_code_log/parser.py +++ b/claude_code_log/parser.py @@ -10,6 +10,7 @@ from .models import ( TranscriptEntry, + UserTranscriptEntry, SummaryTranscriptEntry, parse_transcript_entry, ContentItem, @@ -120,8 +121,22 @@ def load_transcript( from_date: Optional[str] = None, to_date: Optional[str] = None, silent: bool = False, + _loaded_files: Optional[set[Path]] = None, ) -> List[TranscriptEntry]: - """Load and parse JSONL transcript file, using cache if available.""" + """Load and parse JSONL transcript file, using cache if available. + + Args: + _loaded_files: Internal parameter to track loaded files and prevent infinite recursion. + """ + # Initialize loaded files set on first call + if _loaded_files is None: + _loaded_files = set() + + # Prevent infinite recursion by checking if this file is already being loaded + if jsonl_path in _loaded_files: + return [] + + _loaded_files.add(jsonl_path) # Try to load from cache first if cache_manager is not None: # Use filtered loading if date parameters are provided @@ -139,11 +154,12 @@ def load_transcript( # Parse from source file messages: List[TranscriptEntry] = [] + agent_ids: set[str] = set() # Collect agentId references while parsing with open(jsonl_path, "r", encoding="utf-8", errors="replace") as f: if not silent: print(f"Processing {jsonl_path}...") - for line_no, line in enumerate(f): + for line_no, line in enumerate(f, 1): # Start counting from 1 line = line.strip() if line: try: @@ -154,6 +170,25 @@ def load_transcript( ) continue + # Check for agentId BEFORE Pydantic parsing + # agentId can be at top level OR nested in toolUseResult + # For UserTranscriptEntry, we need to copy it to top level so Pydantic preserves it + if "agentId" in entry_dict: + agent_id = entry_dict.get("agentId") + if agent_id: + agent_ids.add(agent_id) + elif "toolUseResult" in entry_dict: + tool_use_result = entry_dict.get("toolUseResult") + if ( + isinstance(tool_use_result, dict) + and "agentId" in tool_use_result + ): + agent_id_value = tool_use_result.get("agentId") # type: ignore[reportUnknownVariableType, reportUnknownMemberType] + if isinstance(agent_id_value, str): + agent_ids.add(agent_id_value) + # Copy agentId to top level for Pydantic to preserve + entry_dict["agentId"] = agent_id_value + entry_type: str | None = entry_dict.get("type") if entry_type in [ @@ -166,6 +201,14 @@ def load_transcript( # Parse using Pydantic models entry = parse_transcript_entry(entry_dict) messages.append(entry) + elif ( + entry_type + in [ + "file-history-snapshot", # Internal Claude Code file backup metadata + ] + ): + # Silently skip internal message types we don't render + pass else: print( f"Line {line_no} of {jsonl_path} is not a recognised message type: {line}" @@ -195,6 +238,47 @@ def load_transcript( "\n{traceback.format_exc()}" ) + # Load agent files if any were referenced + # Build a map of agentId -> agent messages + agent_messages_map: dict[str, List[TranscriptEntry]] = {} + if agent_ids: + parent_dir = jsonl_path.parent + for agent_id in agent_ids: + agent_file = parent_dir / f"agent-{agent_id}.jsonl" + # Skip if the agent file is the same as the current file (self-reference) + if agent_file == jsonl_path: + continue + if agent_file.exists(): + if not silent: + print(f"Loading agent file {agent_file}...") + # Recursively load the agent file (it might reference other agents) + agent_messages = load_transcript( + agent_file, + cache_manager, + from_date, + to_date, + silent=True, + _loaded_files=_loaded_files, + ) + agent_messages_map[agent_id] = agent_messages + + # Insert agent messages at their point of use + if agent_messages_map: + # Iterate through messages and insert agent messages after the message + # that references them (via UserTranscriptEntry.agentId) + result_messages: List[TranscriptEntry] = [] + for message in messages: + result_messages.append(message) + + # Check if this is a UserTranscriptEntry with agentId + if isinstance(message, UserTranscriptEntry) and message.agentId: + agent_id = message.agentId + if agent_id in agent_messages_map: + # Insert agent messages right after this message + result_messages.extend(agent_messages_map[agent_id]) + + messages = result_messages + # Save to cache if cache manager is available if cache_manager is not None: cache_manager.save_cached_entries(jsonl_path, messages) diff --git a/claude_code_log/renderer.py b/claude_code_log/renderer.py index 1a674919..477f584b 100644 --- a/claude_code_log/renderer.py +++ b/claude_code_log/renderer.py @@ -2,6 +2,9 @@ """Render Claude transcript data to HTML format.""" import json +import os +import re +import time from pathlib import Path from typing import List, Optional, Dict, Any, cast, TYPE_CHECKING @@ -12,14 +15,15 @@ import mistune from jinja2 import Environment, FileSystemLoader, select_autoescape from pygments import highlight # type: ignore[reportUnknownVariableType] -from pygments.lexers import get_lexer_for_filename, TextLexer # type: ignore[reportUnknownVariableType] +from pygments.lexers import TextLexer # type: ignore[reportUnknownVariableType] from pygments.formatters import HtmlFormatter # type: ignore[reportUnknownVariableType] from pygments.util import ClassNotFound # type: ignore[reportUnknownVariableType] from .models import ( TranscriptEntry, - SummaryTranscriptEntry, + AssistantTranscriptEntry, SystemTranscriptEntry, + SummaryTranscriptEntry, QueueOperationTranscriptEntry, ContentItem, TextContent, @@ -38,6 +42,13 @@ should_use_as_session_starter, create_session_preview, ) +from .renderer_timings import ( + DEBUG_TIMING, + report_timing_statistics, + timing_stat, + set_timing_var, + log_timing, +) from .cache import get_library_version @@ -232,7 +243,9 @@ def block_code(code: str, info: Optional[str] = None) -> str: cssclass="highlight", wrapcode=True, ) - return str(highlight(code, lexer, formatter)) # type: ignore[reportUnknownArgumentType] + # Track Pygments timing if enabled + with timing_stat("_pygments_timings"): + return str(highlight(code, lexer, formatter)) # type: ignore[reportUnknownArgumentType] else: # No language hint, use default rendering return original_render(code, info) @@ -244,21 +257,23 @@ def block_code(code: str, info: Optional[str] = None) -> str: def render_markdown(text: str) -> str: """Convert markdown text to HTML using mistune with Pygments syntax highlighting.""" - # Configure mistune with GitHub-flavored markdown features - renderer = mistune.create_markdown( - plugins=[ - "strikethrough", - "footnotes", - "table", - "url", - "task_lists", - "def_list", - _create_pygments_plugin(), - ], - escape=False, # Don't escape HTML since we want to render markdown properly - hard_wrap=True, # Line break for newlines (checklists in Assistant messages) - ) - return str(renderer(text)) + # Track markdown rendering time if enabled + with timing_stat("_markdown_timings"): + # Configure mistune with GitHub-flavored markdown features + renderer = mistune.create_markdown( + plugins=[ + "strikethrough", + "footnotes", + "table", + "url", + "task_lists", + "def_list", + _create_pygments_plugin(), + ], + escape=False, # Don't escape HTML since we want to render markdown properly + hard_wrap=True, # Line break for newlines (checklists in Assistant messages) + ) + return str(renderer(text)) def extract_command_info(text_content: str) -> tuple[str, str, str]: @@ -372,9 +387,68 @@ def _highlight_code_with_pygments( Returns: HTML string with syntax-highlighted code """ + # PERFORMANCE FIX: Use Pygments' public API to build filename pattern mapping, avoiding filesystem I/O + # get_lexer_for_filename performs I/O operations (file existence checks, reading bytes) + # which causes severe slowdowns, especially on Windows with antivirus scanning + # Solution: Build a reverse mapping from filename patterns to lexer aliases using get_all_lexers() (done once) + import fnmatch + from pygments.lexers import get_lexer_by_name, get_all_lexers # type: ignore[reportUnknownVariableType] + + # Build pattern->alias mapping on first call (cached as function attribute) + # OPTIMIZATION: Create both direct extension lookup and full pattern cache + if not hasattr(_highlight_code_with_pygments, "_pattern_cache"): + pattern_cache: dict[str, str] = {} + extension_cache: dict[str, str] = {} # Fast lookup for simple *.ext patterns + + # Use public API: get_all_lexers() returns (name, aliases, patterns, mimetypes) tuples + for name, aliases, patterns, mimetypes in get_all_lexers(): # type: ignore[reportUnknownVariableType] + if aliases and patterns: + # Use first alias as the lexer name + lexer_alias = aliases[0] + # Map each filename pattern to this lexer alias + for pattern in patterns: + pattern_lower = pattern.lower() + pattern_cache[pattern_lower] = lexer_alias + # Extract simple extension patterns (*.ext) for fast lookup + if ( + pattern_lower.startswith("*.") + and "*" not in pattern_lower[2:] + and "?" not in pattern_lower[2:] + ): + ext = pattern_lower[2:] # Remove "*." + # Prefer first match for each extension + if ext not in extension_cache: + extension_cache[ext] = lexer_alias + + _highlight_code_with_pygments._pattern_cache = pattern_cache # type: ignore[attr-defined] + _highlight_code_with_pygments._extension_cache = extension_cache # type: ignore[attr-defined] + + # Get basename for matching (patterns are like "*.py") + basename = os.path.basename(file_path).lower() + try: - # Try to get lexer based on filename - lexer = get_lexer_for_filename(file_path, code) # type: ignore[reportUnknownVariableType] + # Get caches + pattern_cache = _highlight_code_with_pygments._pattern_cache # type: ignore[attr-defined] + extension_cache = _highlight_code_with_pygments._extension_cache # type: ignore[attr-defined] + + # OPTIMIZATION: Try fast extension lookup first (O(1) dict lookup) + lexer_alias = None + if "." in basename: + ext = basename.split(".")[-1] # Get last extension (handles .tar.gz, etc.) + lexer_alias = extension_cache.get(ext) + + # Fall back to pattern matching only if extension lookup failed + if lexer_alias is None: + for pattern, lex_alias in pattern_cache.items(): + if fnmatch.fnmatch(basename, pattern): + lexer_alias = lex_alias + break + + # Get lexer or use TextLexer as fallback + if lexer_alias: + lexer = get_lexer_by_name(lexer_alias, stripall=True) # type: ignore[reportUnknownVariableType] + else: + lexer = TextLexer() # type: ignore[reportUnknownVariableType] except ClassNotFound: # Fall back to plain text lexer lexer = TextLexer() # type: ignore[reportUnknownVariableType] @@ -387,8 +461,52 @@ def _highlight_code_with_pygments( linenostart=linenostart, ) - # Highlight the code - return str(highlight(code, lexer, formatter)) # type: ignore[reportUnknownArgumentType] + # Highlight the code with timing if enabled + with timing_stat("_pygments_timings"): + return str(highlight(code, lexer, formatter)) # type: ignore[reportUnknownArgumentType] + + +def _truncate_highlighted_preview(highlighted_html: str, max_lines: int) -> str: + """Truncate Pygments highlighted HTML to first N lines. + + HtmlFormatter(linenos="table") produces a single
LINE_NUMS
CODE
tag to the first max_lines lines.
+
+ Args:
+ highlighted_html: Full Pygments-highlighted HTML
+ max_lines: Maximum number of lines to include in preview
+
+ Returns:
+ Truncated HTML with same structure but fewer lines
+ """
+
+ def truncate_pre_content(match: re.Match[str]) -> str:
+ """Truncate content inside a tag to max_lines."""
+ prefix, content, suffix = match.groups()
+ lines = content.split("\n")
+ truncated = "\n".join(lines[:max_lines])
+ return prefix + truncated + suffix
+
+ # Truncate linenos content (line numbers separated by newlines)
+ result = re.sub(
+ r'()(.*?)(
)',
+ truncate_pre_content,
+ highlighted_html,
+ flags=re.DOTALL,
+ )
+
+ # Truncate code content
+ result = re.sub(
+ r'(]*>)(.*?)(
)',
+ truncate_pre_content,
+ result,
+ flags=re.DOTALL,
+ )
+
+ return result
def format_read_tool_content(tool_use: ToolUseContent) -> str: # noqa: ARG001
@@ -716,6 +834,24 @@ def _render_line_diff(old_line: str, new_line: str) -> str:
return "".join(old_parts) + "".join(new_parts)
+def format_task_tool_content(tool_use: ToolUseContent) -> str:
+ """Format Task tool content with markdown-rendered prompt.
+
+ Task tool spawns sub-agents. We render the prompt as the main content.
+ The sidechain user message (which would duplicate this prompt) is skipped.
+ """
+ prompt = tool_use.input.get("prompt", "")
+
+ if not prompt:
+ # No prompt, show parameters table as fallback
+ return render_params_table(tool_use.input)
+
+ # Render prompt as markdown with Pygments syntax highlighting
+ rendered_html = render_markdown(prompt)
+
+ return f'{rendered_html}'
+
+
def get_tool_summary(tool_use: ToolUseContent) -> Optional[str]:
"""Extract a one-line summary from tool parameters for display in header.
@@ -735,6 +871,12 @@ def get_tool_summary(tool_use: ToolUseContent) -> Optional[str]:
if file_path:
return file_path
+ elif tool_name == "Task":
+ # Return description if present
+ description = params.get("description")
+ if description:
+ return description
+
# No summary for other tools
return None
@@ -765,6 +907,10 @@ def format_tool_use_content(tool_use: ToolUseContent) -> str:
if tool_use.name == "Write":
return format_write_tool_content(tool_use)
+ # Special handling for Task (agent spawning)
+ if tool_use.name == "Task":
+ return format_task_tool_content(tool_use)
+
# Default: render as key/value table using shared renderer
return render_params_table(tool_use.input)
@@ -889,7 +1035,7 @@ def format_tool_result_content(
Args:
tool_result: The tool result content
file_path: Optional file path for context (used for Read/Edit/Write tool rendering)
- tool_name: Optional tool name for specialized rendering (e.g., "Write", "Read", "Edit")
+ tool_name: Optional tool name for specialized rendering (e.g., "Write", "Read", "Edit", "Task")
"""
# Handle both string and structured content
if isinstance(tool_result.content, str):
@@ -949,10 +1095,11 @@ def format_tool_result_content(
# Try to parse as Read tool result if file_path is provided
if file_path and tool_name == "Read" and not has_images:
parsed_result = _parse_read_tool_result(raw_content)
+
if parsed_result:
code_content, system_reminder, line_offset = parsed_result
- # Highlight code with Pygments using correct line offset
+ # Highlight code with Pygments using correct line offset (single call)
highlighted_html = _highlight_code_with_pygments(
code_content, file_path, linenostart=line_offset
)
@@ -963,11 +1110,12 @@ def format_tool_result_content(
# Make collapsible if content has more than 12 lines
lines = code_content.split("\n")
if len(lines) > 12:
- # Get preview (first ~5 lines)
- preview_lines = lines[:5]
- preview_html = _highlight_code_with_pygments(
- "\n".join(preview_lines), file_path, linenostart=line_offset
- )
+ # Extract preview from already-highlighted HTML to avoid double-highlighting
+ # HtmlFormatter(linenos="table") produces a single with two s:
+ # ...LINE_NUMS
...
+ # ...CODE
...
+ # We truncate content within each to first 5 lines
+ preview_html = _truncate_highlighted_preview(highlighted_html, 5)
result_parts.append(f"""
@@ -1031,6 +1179,12 @@ def format_tool_result_content(
result_parts.append("")
return "".join(result_parts)
+ # Special handling for Task tool: render result as markdown with Pygments (agent's final message)
+ # Deduplication is now handled retroactively by replacing the sub-assistant content
+ if tool_name == "Task" and not has_images:
+ rendered_html = render_markdown(raw_content)
+ return f'{rendered_html}'
+
# Check if this looks like Bash tool output and process ANSI codes
# Bash tool results often contain ANSI escape sequences and terminal output
if _looks_like_bash_output(raw_content):
@@ -1255,11 +1409,13 @@ def extract_ide_notifications(text: str) -> tuple[List[str], str]:
return notifications, remaining_text.strip()
-def render_user_message_content(content_list: List[ContentItem]) -> tuple[str, bool]:
+def render_user_message_content(
+ content_list: List[ContentItem],
+) -> tuple[str, bool, bool]:
"""Render user message content with IDE tag extraction and compacted summary handling.
Returns:
- A tuple of (content_html, is_compacted)
+ A tuple of (content_html, is_compacted, is_memory_input)
"""
# Check first text item
if content_list and hasattr(content_list[0], "text"):
@@ -1270,7 +1426,22 @@ def render_user_message_content(content_list: List[ContentItem]) -> tuple[str, b
# Render entire content as markdown for compacted summaries
# Use "assistant" to trigger markdown rendering instead of pre-formatted text
content_html = render_message_content(content_list, "assistant")
- return content_html, True
+ return content_html, True, False
+
+ # Check for user memory input
+ memory_match = re.search(
+ r"(.*?) ",
+ first_text,
+ re.DOTALL,
+ )
+ if memory_match:
+ memory_content = memory_match.group(1).strip()
+ # Render the memory content as user message
+ memory_content_list: List[ContentItem] = [
+ TextContent(type="text", text=memory_content)
+ ]
+ content_html = render_message_content(memory_content_list, "user")
+ return content_html, False, True
# Extract IDE notifications from first text item
ide_notifications_html, remaining_text = extract_ide_notifications(first_text)
@@ -1293,7 +1464,7 @@ def render_user_message_content(content_list: List[ContentItem]) -> tuple[str, b
# No text in first item or empty list, render normally
content_html = render_message_content(content_list, "user")
- return content_html, False
+ return content_html, False, False
def render_message_content(content: List[ContentItem], message_type: str) -> str:
@@ -1333,46 +1504,27 @@ def render_message_content(content: List[ContentItem], message_type: str) -> str
elif type(item) is ToolUseContent or (
hasattr(item, "type") and item_type == "tool_use"
):
- # Handle both ToolUseContent and Anthropic ToolUseBlock
- # Convert Anthropic type to our format if necessary
- if not isinstance(item, ToolUseContent):
- # Create a ToolUseContent from Anthropic ToolUseBlock
- tool_use_item = ToolUseContent(
- type="tool_use",
- id=getattr(item, "id", ""),
- name=getattr(item, "name", ""),
- input=getattr(item, "input", {}),
- )
- else:
- tool_use_item = item
- rendered_parts.append(format_tool_use_content(tool_use_item)) # type: ignore
+ # Tool use items should not appear here - they are filtered out before this function
+ print(
+ "Warning: tool_use content should not be processed in render_message_content",
+ flush=True,
+ )
elif type(item) is ToolResultContent or (
hasattr(item, "type") and item_type == "tool_result"
):
- # Handle both ToolResultContent and Anthropic types
- if not isinstance(item, ToolResultContent):
- # Convert from Anthropic type if needed
- tool_result_item = ToolResultContent(
- type="tool_result",
- tool_use_id=getattr(item, "tool_use_id", ""),
- content=getattr(item, "content", ""),
- is_error=getattr(item, "is_error", False),
- )
- else:
- tool_result_item = item
- rendered_parts.append(format_tool_result_content(tool_result_item)) # type: ignore
+ # Tool result items should not appear here - they are filtered out before this function
+ print(
+ "Warning: tool_result content should not be processed in render_message_content",
+ flush=True,
+ )
elif type(item) is ThinkingContent or (
hasattr(item, "type") and item_type == "thinking"
):
- # Handle both ThinkingContent and Anthropic ThinkingBlock
- if not isinstance(item, ThinkingContent):
- # Convert from Anthropic type if needed
- thinking_item = ThinkingContent(
- type="thinking", thinking=getattr(item, "thinking", str(item))
- )
- else:
- thinking_item = item
- rendered_parts.append(format_thinking_content(thinking_item)) # type: ignore
+ # Thinking items should not appear here - they are filtered out before this function
+ print(
+ "Warning: thinking content should not be processed in render_message_content",
+ flush=True,
+ )
elif type(item) is ImageContent:
rendered_parts.append(format_image_content(item)) # type: ignore
@@ -1391,6 +1543,82 @@ def _get_template_environment() -> Environment:
return env
+def _format_type_counts(type_counts: dict[str, int]) -> str:
+ """Format type counts into human-readable label.
+
+ Args:
+ type_counts: Dictionary of message type to count
+
+ Returns:
+ Human-readable label like "3 assistant, 4 tools" or "8 messages"
+
+ Examples:
+ {"assistant": 3, "tool_use": 4} -> "3 assistant, 4 tools"
+ {"tool_use": 2, "tool_result": 2} -> "2 tool pairs"
+ {"assistant": 1} -> "1 assistant"
+ {"thinking": 3} -> "3 thoughts"
+ """
+ if not type_counts:
+ return "0 messages"
+
+ # Type name mapping for better readability
+ type_labels = {
+ "assistant": ("assistant", "assistants"),
+ "user": ("user", "users"),
+ "tool_use": ("tool", "tools"),
+ "tool_result": ("result", "results"),
+ "thinking": ("thought", "thoughts"),
+ "system": ("system", "systems"),
+ "system-warning": ("warning", "warnings"),
+ "system-error": ("error", "errors"),
+ "system-info": ("info", "infos"),
+ "sidechain": ("task", "tasks"),
+ }
+
+ # Handle special case: tool_use and tool_result together = "tool pairs"
+ # Create a modified counts dict that combines tool pairs
+ modified_counts = dict(type_counts)
+ if (
+ "tool_use" in modified_counts
+ and "tool_result" in modified_counts
+ and modified_counts["tool_use"] == modified_counts["tool_result"]
+ ):
+ # Replace tool_use and tool_result with tool_pair
+ pair_count = modified_counts["tool_use"]
+ del modified_counts["tool_use"]
+ del modified_counts["tool_result"]
+ modified_counts["tool_pair"] = pair_count
+
+ # Add tool_pair label
+ type_labels_with_pairs = {
+ **type_labels,
+ "tool_pair": ("tool pair", "tool pairs"),
+ }
+
+ # Build label parts
+ parts: list[str] = []
+ for msg_type, count in sorted(
+ modified_counts.items(), key=lambda x: x[1], reverse=True
+ ):
+ singular, plural = type_labels_with_pairs.get(
+ msg_type, (msg_type, f"{msg_type}s")
+ )
+ label = singular if count == 1 else plural
+ parts.append(f"{count} {label}")
+
+ # Return combined label
+ if len(parts) == 1:
+ return parts[0]
+ elif len(parts) == 2:
+ return f"{parts[0]}, {parts[1]}"
+ else:
+ # For 3+ types, show top 2 and "X more"
+ remaining = sum(type_counts.values()) - sum(
+ type_counts[t] for t in list(type_counts.keys())[:2]
+ )
+ return f"{parts[0]}, {parts[1]}, {remaining} more"
+
+
class TemplateMessage:
"""Structured message data for template rendering."""
@@ -1409,6 +1637,11 @@ def __init__(
title_hint: Optional[str] = None,
has_markdown: bool = False,
message_title: Optional[str] = None,
+ message_id: Optional[str] = None,
+ ancestry: Optional[List[str]] = None,
+ has_children: bool = False,
+ uuid: Optional[str] = None,
+ parent_uuid: Optional[str] = None,
):
self.type = message_type
self.content_html = content_html
@@ -1426,12 +1659,33 @@ def __init__(
self.token_usage = token_usage
self.tool_use_id = tool_use_id
self.title_hint = title_hint
+ self.message_id = message_id
+ self.ancestry = ancestry or []
+ self.has_children = has_children
self.has_markdown = has_markdown
+ self.uuid = uuid
+ self.parent_uuid = parent_uuid
+ # Fold/unfold counts
+ self.immediate_children_count = 0 # Direct children only
+ self.total_descendants_count = 0 # All descendants recursively
+ # Type-aware counting for smarter labels
+ self.immediate_children_by_type: dict[
+ str, int
+ ] = {} # {"assistant": 2, "tool_use": 3}
+ self.total_descendants_by_type: dict[str, int] = {} # All descendants by type
# Pairing metadata
self.is_paired = False
self.pair_role: Optional[str] = None # "pair_first", "pair_last", "pair_middle"
self.pair_duration: Optional[str] = None # Duration for pair_last messages
+ def get_immediate_children_label(self) -> str:
+ """Generate human-readable label for immediate children."""
+ return _format_type_counts(self.immediate_children_by_type)
+
+ def get_total_descendants_label(self) -> str:
+ """Generate human-readable label for all descendants."""
+ return _format_type_counts(self.total_descendants_by_type)
+
class TemplateProject:
"""Structured project data for template rendering."""
@@ -1976,35 +2230,35 @@ def _process_regular_message(
message_type: str,
is_sidechain: bool,
) -> tuple[str, str, str, str]:
- """Process regular message and return (css_class, content_html, message_type, message_title)."""
+ """Process regular message and return (css_class, content_html, message_type, message_title).
+
+ Note: Sidechain user messages (Sub-assistant prompts) are now skipped entirely
+ in the main processing loop since they duplicate the Task tool input prompt.
+ """
css_class = f"{message_type}"
message_title = message_type.title() # Default title
+ is_compacted = False
# Handle user-specific preprocessing
if message_type == "user":
- # Sub-assistant prompts (sidechain user messages) should be rendered as markdown
- if is_sidechain:
- content_html = render_message_content(text_only_content, "assistant")
- is_compacted = False
- else:
- content_html, is_compacted = render_user_message_content(text_only_content)
- if is_compacted:
- css_class = f"{message_type} compacted"
- message_title = "User (compacted conversation)"
+ # Note: sidechain user messages are skipped before reaching this function
+ content_html, is_compacted, is_memory_input = render_user_message_content(
+ text_only_content
+ )
+ if is_compacted:
+ css_class = f"{message_type} compacted"
+ message_title = "User (compacted conversation)"
+ elif is_memory_input:
+ message_title = "Memory"
else:
# Non-user messages: render directly
content_html = render_message_content(text_only_content, message_type)
- is_compacted = False
if is_sidechain:
css_class = f"{css_class} sidechain"
- # Update message title for display
- if not is_compacted: # Don't override compacted message title
- message_title = (
- "π Sub-assistant prompt"
- if message_type == "user"
- else "π Sub-assistant"
- )
+ # Update message title for display (only non-user types reach here)
+ if not is_compacted:
+ message_title = "π Sub-assistant"
return css_class, content_html, message_type, message_title
@@ -2027,12 +2281,14 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None:
Uses a two-pass algorithm:
1. First pass: Build index of tool_use_id -> message index for tool_use and tool_result
+ Build index of uuid -> message index for parent-child system messages
2. Second pass: Sequential scan for adjacent pairs (system+output, bash, thinking+assistant)
- and match tool_use/tool_result using the index
+ and match tool_use/tool_result and uuid-based pairs using the index
"""
# Pass 1: Build index of tool_use messages and tool_result messages by tool_use_id
tool_use_index: Dict[str, int] = {} # tool_use_id -> message index
tool_result_index: Dict[str, int] = {} # tool_use_id -> message index
+ uuid_index: Dict[str, int] = {} # uuid -> message index for parent-child pairing
for i, msg in enumerate(messages):
if msg.tool_use_id:
@@ -2040,6 +2296,9 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None:
tool_use_index[msg.tool_use_id] = i
elif "tool_result" in msg.css_class:
tool_result_index[msg.tool_use_id] = i
+ # Build UUID index for system messages (both parent and child)
+ if msg.uuid and "system" in msg.css_class:
+ uuid_index[msg.uuid] = i
# Pass 2: Sequential scan to identify pairs
i = 0
@@ -2072,6 +2331,16 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None:
result_msg.is_paired = True
result_msg.pair_role = "pair_last"
+ # Check for UUID-based parent-child system message pair (no distance limit)
+ if "system" in current.css_class and current.parent_uuid:
+ if current.parent_uuid in uuid_index:
+ parent_idx = uuid_index[current.parent_uuid]
+ parent_msg = messages[parent_idx]
+ parent_msg.is_paired = True
+ parent_msg.pair_role = "pair_first"
+ current.is_paired = True
+ current.pair_role = "pair_last"
+
# Check for bash-input + bash-output pair (adjacent only)
if current.css_class == "bash-input" and i + 1 < len(messages):
next_msg = messages[i + 1]
@@ -2200,132 +2469,346 @@ def generate_session_html(
)
+def _get_message_hierarchy_level(css_class: str, is_sidechain: bool) -> int:
+ """Determine the hierarchy level for a message based on its type and sidechain status.
+
+ Correct hierarchy based on logical nesting:
+ - Level 0: Session headers
+ - Level 1: User messages
+ - Level 2: System messages, Assistant, Thinking
+ - Level 3: Tool use/result (nested under assistant)
+ - Level 4: Sidechain assistant/thinking (nested under Task tool result)
+ - Level 5: Sidechain tools (nested under sidechain assistant)
+
+ Note: Sidechain user messages (Sub-assistant prompts) are now skipped entirely
+ since they duplicate the Task tool input prompt.
+
+ Returns:
+ Integer hierarchy level (1-5, session headers are 0)
+ """
+ # User messages at level 1 (under session)
+ # Note: sidechain user messages are skipped before reaching this function
+ if "user" in css_class and not is_sidechain:
+ return 1
+
+ # System messages at level 2 (siblings to assistant, under user)
+ if "system" in css_class and not is_sidechain:
+ return 2
+
+ # Sidechain assistant/thinking at level 4 (nested under Task tool result)
+ if is_sidechain and ("assistant" in css_class or "thinking" in css_class):
+ return 4
+
+ # Sidechain tools at level 5
+ if is_sidechain and ("tool" in css_class):
+ return 5
+
+ # Main assistant/thinking at level 2 (nested under user)
+ if "assistant" in css_class or "thinking" in css_class:
+ return 2
+
+ # Main tools at level 3 (nested under assistant)
+ if "tool" in css_class:
+ return 3
+
+ # Default to level 1
+ return 1
+
+
+def _update_hierarchy_stack(
+ hierarchy_stack: List[tuple[int, str]],
+ current_level: int,
+ message_id_counter: int,
+) -> tuple[str, List[str], int]:
+ """Update the hierarchy stack and return message ID and ancestry.
+
+ Args:
+ hierarchy_stack: Current stack of (level, message_id) tuples
+ current_level: Hierarchy level of the current message
+ message_id_counter: Current message ID counter
+
+ Returns:
+ Tuple of (message_id, ancestry, updated_counter)
+ - message_id: Unique ID for this message (e.g., "d-42")
+ - ancestry: List of ancestor message IDs (e.g., ["d-10", "d-23", "d-35"])
+ - updated_counter: Incremented message ID counter
+ """
+ # Pop stack until we find the appropriate parent level
+ # The parent is the last message at a level strictly less than current_level
+ while hierarchy_stack and hierarchy_stack[-1][0] >= current_level:
+ hierarchy_stack.pop()
+
+ # Build ancestry from remaining stack
+ ancestry = [msg_id for _, msg_id in hierarchy_stack]
+
+ # Generate new message ID
+ message_id = f"d-{message_id_counter}"
+ message_id_counter += 1
+
+ # Push current message onto stack (it could be a parent for future messages)
+ hierarchy_stack.append((current_level, message_id))
+
+ return (message_id, ancestry, message_id_counter)
+
+
+def _mark_messages_with_children(messages: List[TemplateMessage]) -> None:
+ """Mark messages that have children and calculate descendant counts.
+
+ Efficiently calculates:
+ - has_children: Whether message has any children
+ - immediate_children_count: Count of direct children only
+ - total_descendants_count: Count of all descendants recursively
+
+ Time complexity: O(n) where n is the number of messages.
+
+ Args:
+ messages: List of template messages to process
+ """
+ # Build index of messages by ID for O(1) lookup
+ message_by_id: dict[str, TemplateMessage] = {}
+ for message in messages:
+ if message.message_id:
+ message_by_id[message.message_id] = message
+
+ # Process each message and update counts for ancestors
+ for message in messages:
+ if not message.ancestry:
+ continue # Top-level message, no parents
+
+ # Skip counting pair_last messages (second in a pair)
+ # Pairs are visually presented as a single unit, so we only count the first
+ if message.is_paired and message.pair_role == "pair_last":
+ continue
+
+ # Get immediate parent (last in ancestry list)
+ immediate_parent_id = message.ancestry[-1]
+
+ # Get message type for categorization
+ msg_type = message.css_class or message.type
+
+ # Increment immediate parent's child count
+ if immediate_parent_id in message_by_id:
+ parent = message_by_id[immediate_parent_id]
+ parent.immediate_children_count += 1
+ parent.has_children = True
+ # Track by type
+ parent.immediate_children_by_type[msg_type] = (
+ parent.immediate_children_by_type.get(msg_type, 0) + 1
+ )
+
+ # Increment descendant count for ALL ancestors
+ for ancestor_id in message.ancestry:
+ if ancestor_id in message_by_id:
+ ancestor = message_by_id[ancestor_id]
+ ancestor.total_descendants_count += 1
+ # Track by type
+ ancestor.total_descendants_by_type[msg_type] = (
+ ancestor.total_descendants_by_type.get(msg_type, 0) + 1
+ )
+
+
def generate_html(
messages: List[TranscriptEntry],
title: Optional[str] = None,
combined_transcript_link: Optional[str] = None,
) -> str:
"""Generate HTML from transcript messages using Jinja2 templates."""
- if not title:
- title = "Claude Transcript"
-
- # Deduplicate messages caused by Claude Code version upgrade during session
- # Only deduplicate when same message.id appears with DIFFERENT versions
- # Streaming fragments (same message.id, same version) are kept as separate messages
- from claude_code_log.models import AssistantTranscriptEntry, UserTranscriptEntry
- from packaging.version import parse as parse_version
- from collections import defaultdict
-
- # Group messages by their unique identifier
- message_groups: Dict[str, List[tuple[int, str, TranscriptEntry]]] = defaultdict(
- list
- )
-
- for idx, message in enumerate(messages):
- unique_id = None
- version_str = getattr(message, "version", "0.0.0")
-
- # Determine unique identifier based on message type
- if isinstance(message, AssistantTranscriptEntry):
- # Assistant messages: use message.id
- if hasattr(message.message, "id"):
- unique_id = f"msg:{message.message.id}" # type: ignore
-
- elif isinstance(message, UserTranscriptEntry):
- # User messages (tool results): use tool_use_id
- if hasattr(message, "message") and message.message.content:
- for item in message.message.content:
- if hasattr(item, "tool_use_id"):
- unique_id = f"tool:{item.tool_use_id}" # type: ignore
- break
+ # Performance timing
+ t_start = time.time()
+
+ with log_timing("Initialization", t_start):
+ if not title:
+ title = "Claude Transcript"
+
+ # Deduplicate messages by (message_type, timestamp)
+ # Messages with the exact same timestamp are duplicates by definition -
+ # the differences (like IDE selection tags) are just logging artifacts
+ with log_timing(
+ lambda: f"Deduplication ({len(deduplicated_messages)} messages)", t_start
+ ):
+ # Track seen (message_type, timestamp) pairs
+ seen: set[tuple[str, str]] = set()
+ deduplicated_messages: List[TranscriptEntry] = []
- if unique_id:
- message_groups[unique_id].append((idx, version_str, message))
+ for message in messages:
+ # Get basic message type
+ message_type = getattr(message, "type", "unknown")
- # Determine which indices to keep
- indices_to_keep: set[int] = set()
+ # For system messages, include level to differentiate info/warning/error
+ if isinstance(message, SystemTranscriptEntry):
+ level = getattr(message, "level", "info")
+ message_type = f"system-{level}"
- for unique_id, group in message_groups.items():
- if len(group) == 1:
- # Single message, always keep
- indices_to_keep.add(group[0][0])
- else:
- # Multiple messages with same ID - check if they have different versions
- versions = {version_str for _, version_str, _ in group}
+ # Get timestamp
+ timestamp = getattr(message, "timestamp", "")
- if len(versions) == 1:
- # All same version = streaming fragments, keep ALL of them
- for idx, _, _ in group:
- indices_to_keep.add(idx)
- else:
- # Different versions = version duplicates, keep only highest version
- try:
- # Sort by semantic version, keep highest
- sorted_group = sorted(
- group, key=lambda x: parse_version(x[1]), reverse=True
- )
- indices_to_keep.add(sorted_group[0][0])
- except Exception:
- # If version parsing fails, keep first occurrence
- indices_to_keep.add(group[0][0])
-
- # Build deduplicated list
- deduplicated_messages: List[TranscriptEntry] = []
-
- for idx, message in enumerate(messages):
- # Check if this message has a unique ID
- has_unique_id = False
- if isinstance(message, AssistantTranscriptEntry):
- has_unique_id = hasattr(message.message, "id")
- elif isinstance(message, UserTranscriptEntry):
- if hasattr(message, "message") and message.message.content:
- has_unique_id = any(
- hasattr(item, "tool_use_id") for item in message.message.content
- )
+ # Create deduplication key
+ dedup_key = (message_type, timestamp)
- # Keep message if: no unique ID (e.g., queue-operation) OR in keep set
- if not has_unique_id or idx in indices_to_keep:
- deduplicated_messages.append(message)
+ # Keep only first occurrence
+ if dedup_key not in seen:
+ seen.add(dedup_key)
+ deduplicated_messages.append(message)
- messages = deduplicated_messages
+ messages = deduplicated_messages
# Pre-process to find and attach session summaries
- session_summaries: Dict[str, str] = {}
- uuid_to_session: Dict[str, str] = {}
- uuid_to_session_backup: Dict[str, str] = {}
+ with log_timing("Session summary processing", t_start):
+ session_summaries: Dict[str, str] = {}
+ uuid_to_session: Dict[str, str] = {}
+ uuid_to_session_backup: Dict[str, str] = {}
+
+ # Build mapping from message UUID to session ID
+ for message in messages:
+ if hasattr(message, "uuid") and hasattr(message, "sessionId"):
+ message_uuid = getattr(message, "uuid", "")
+ session_id = getattr(message, "sessionId", "")
+ if message_uuid and session_id:
+ # There is often duplication, in that case we want to prioritise the assistant
+ # message because summaries are generated from Claude's (last) success message
+ if type(message) is AssistantTranscriptEntry:
+ uuid_to_session[message_uuid] = session_id
+ else:
+ uuid_to_session_backup[message_uuid] = session_id
+
+ # Map summaries to sessions via leafUuid -> message UUID -> session ID
+ for message in messages:
+ if isinstance(message, SummaryTranscriptEntry):
+ leaf_uuid = message.leafUuid
+ if leaf_uuid in uuid_to_session:
+ session_summaries[uuid_to_session[leaf_uuid]] = message.summary
+ elif (
+ leaf_uuid in uuid_to_session_backup
+ and uuid_to_session_backup[leaf_uuid] not in session_summaries
+ ):
+ session_summaries[uuid_to_session_backup[leaf_uuid]] = (
+ message.summary
+ )
- # Build mapping from message UUID to session ID
- for message in messages:
- if hasattr(message, "uuid") and hasattr(message, "sessionId"):
- message_uuid = getattr(message, "uuid", "")
- session_id = getattr(message, "sessionId", "")
- if message_uuid and session_id:
- # There is often duplication, in that case we want to prioritise the assistant
- # message because summaries are generated from Claude's (last) success message
- if type(message) is AssistantTranscriptEntry:
- uuid_to_session[message_uuid] = session_id
+ # Attach summaries to messages
+ for message in messages:
+ if hasattr(message, "sessionId"):
+ session_id = getattr(message, "sessionId", "")
+ if session_id in session_summaries:
+ setattr(message, "_session_summary", session_summaries[session_id])
+
+ # Process messages through the main rendering loop
+ template_messages, sessions, session_order = _process_messages_loop(messages)
+
+ # Prepare session navigation data
+ session_nav: List[Dict[str, Any]] = []
+ with log_timing(
+ lambda: f"Session navigation building ({len(session_nav)} sessions)", t_start
+ ):
+ for session_id in session_order:
+ session_info = sessions[session_id]
+
+ # Format timestamp range
+ first_ts = session_info["first_timestamp"]
+ last_ts = session_info["last_timestamp"]
+ timestamp_range = ""
+ if first_ts and last_ts:
+ if first_ts == last_ts:
+ timestamp_range = format_timestamp(first_ts)
else:
- uuid_to_session_backup[message_uuid] = session_id
+ timestamp_range = (
+ f"{format_timestamp(first_ts)} - {format_timestamp(last_ts)}"
+ )
+ elif first_ts:
+ timestamp_range = format_timestamp(first_ts)
- # Map summaries to sessions via leafUuid -> message UUID -> session ID
- for message in messages:
- if isinstance(message, SummaryTranscriptEntry):
- leaf_uuid = message.leafUuid
- if leaf_uuid in uuid_to_session:
- session_summaries[uuid_to_session[leaf_uuid]] = message.summary
- elif (
- leaf_uuid in uuid_to_session_backup
- and uuid_to_session_backup[leaf_uuid] not in session_summaries
- ):
- session_summaries[uuid_to_session_backup[leaf_uuid]] = message.summary
+ # Format token usage summary
+ token_summary = ""
+ total_input = session_info["total_input_tokens"]
+ total_output = session_info["total_output_tokens"]
+ total_cache_creation = session_info["total_cache_creation_tokens"]
+ total_cache_read = session_info["total_cache_read_tokens"]
+
+ if total_input > 0 or total_output > 0:
+ token_parts: List[str] = []
+ if total_input > 0:
+ token_parts.append(f"Input: {total_input}")
+ if total_output > 0:
+ token_parts.append(f"Output: {total_output}")
+ if total_cache_creation > 0:
+ token_parts.append(f"Cache Creation: {total_cache_creation}")
+ if total_cache_read > 0:
+ token_parts.append(f"Cache Read: {total_cache_read}")
+ token_summary = "Token usage β " + " | ".join(token_parts)
+
+ session_nav.append(
+ {
+ "id": session_id,
+ "summary": session_info["summary"],
+ "timestamp_range": timestamp_range,
+ "first_timestamp": first_ts,
+ "last_timestamp": last_ts,
+ "message_count": session_info["message_count"],
+ "first_user_message": session_info["first_user_message"]
+ if session_info["first_user_message"] != ""
+ else "[No user message found in session.]",
+ "token_summary": token_summary,
+ }
+ )
- # Attach summaries to messages
- for message in messages:
- if hasattr(message, "sessionId"):
- session_id = getattr(message, "sessionId", "")
- if session_id in session_summaries:
- setattr(message, "_session_summary", session_summaries[session_id])
+ # Identify and mark paired messages (command+output, tool_use+tool_result, etc.)
+ with log_timing("Identify message pairs", t_start):
+ _identify_message_pairs(template_messages)
+
+ # Reorder messages so pairs are adjacent while preserving chronological order
+ with log_timing("Reorder paired messages", t_start):
+ template_messages = _reorder_paired_messages(template_messages)
+
+ # Mark messages that have children for fold/unfold controls
+ with log_timing("Mark messages with children", t_start):
+ _mark_messages_with_children(template_messages)
+ # Render template
+ with log_timing("Template environment setup", t_start):
+ env = _get_template_environment()
+ template = env.get_template("transcript.html")
+
+ with log_timing(lambda: f"Template rendering ({len(html_output)} chars)", t_start):
+ html_output = str(
+ template.render(
+ title=title,
+ messages=template_messages,
+ sessions=session_nav,
+ combined_transcript_link=combined_transcript_link,
+ library_version=get_library_version(),
+ )
+ )
+
+ return html_output
+
+
+def _process_messages_loop(
+ messages: List[TranscriptEntry],
+) -> tuple[
+ List[TemplateMessage],
+ Dict[str, Dict[str, Any]], # sessions
+ List[str], # session_order
+]:
+ """Process messages through the main rendering loop.
+
+ This function handles the core message processing logic:
+ - Processes each message into template-friendly format
+ - Tracks sessions and token usage
+ - Handles message deduplication and hierarchy
+ - Collects timing statistics
+
+ Note: Tool use context must be built before calling this function via
+ _define_tool_use_context()
+
+ Args:
+ messages: List of transcript entries to process
+
+ Returns:
+ Tuple containing:
+ - template_messages: Processed messages ready for template rendering
+ - sessions: Session metadata dict mapping session_id to info
+ - session_order: List of session IDs in chronological order
+ """
# Group messages by session and collect session info for navigation
sessions: Dict[str, Dict[str, Any]] = {}
session_order: List[str] = []
@@ -2336,39 +2819,61 @@ def generate_html(
# Track which messages should show token usage (first occurrence of each requestId)
show_tokens_for_message: set[str] = set()
- # Build mapping of tool_use_id to tool info for specialized tool result rendering
- tool_use_context: Dict[str, Dict[str, Any]] = {}
- for message in messages:
- if hasattr(message, "message") and hasattr(message.message, "content"): # type: ignore
- content = message.message.content # type: ignore
- if isinstance(content, list):
- for item in content: # type: ignore[reportUnknownVariableType]
- # Check if it's a tool_use item
- if hasattr(item, "type") and hasattr(item, "id"): # type: ignore[reportUnknownArgumentType]
- item_type = getattr(item, "type", None) # type: ignore[reportUnknownArgumentType]
- if item_type == "tool_use":
- tool_id = getattr(item, "id", "") # type: ignore[reportUnknownArgumentType]
- tool_name = getattr(item, "name", "") # type: ignore[reportUnknownArgumentType]
- tool_input = getattr(item, "input", {}) # type: ignore[reportUnknownArgumentType]
- if tool_id:
- tool_use_context[tool_id] = {
- "name": tool_name,
- "input": tool_input,
- }
+ # Build mapping of tool_use_id to ToolUseContent for specialized tool result rendering
+ # This will be populated inline as we encounter tool_use items during message processing
+ tool_use_context: Dict[str, ToolUseContent] = {}
# Process messages into template-friendly format
template_messages: List[TemplateMessage] = []
- for message in messages:
+ # Hierarchy tracking for message folding
+ # Stack of (level, message_id) tuples representing current nesting
+ hierarchy_stack: List[tuple[int, str]] = []
+ message_id_counter = 0
+
+ # UUID to message ID mapping for parent-child relationships
+ uuid_to_msg_id: Dict[str, str] = {}
+
+ # Track Task results and sidechain assistants for deduplication
+ # Maps raw content -> (template_messages index, message_id, type: "task" or "assistant")
+ content_map: Dict[str, tuple[int, str, str]] = {}
+
+ # Per-message timing tracking
+ message_timings: List[
+ tuple[float, str, int, str]
+ ] = [] # (duration, message_type, index, uuid)
+
+ # Track expensive operations
+ markdown_timings: List[tuple[float, str]] = [] # (duration, context_uuid)
+ pygments_timings: List[tuple[float, str]] = [] # (duration, context_uuid)
+
+ # Initialize timing tracking
+ set_timing_var("_markdown_timings", markdown_timings)
+ set_timing_var("_pygments_timings", pygments_timings)
+ set_timing_var("_current_msg_uuid", "")
+
+ for msg_idx, message in enumerate(messages):
+ msg_start_time = time.time() if DEBUG_TIMING else 0.0
message_type = message.type
+ msg_uuid = getattr(message, "uuid", f"no-uuid-{msg_idx}")
+
+ # Update current message UUID for timing tracking
+ set_timing_var("_current_msg_uuid", msg_uuid)
+
+ # Skip sidechain user messages (Sub-assistant prompts)
+ # These duplicate the Task tool input prompt and are redundant
+ if message_type == "user" and getattr(message, "isSidechain", False):
+ continue
# Skip summary messages - they should already be attached to their sessions
if isinstance(message, SummaryTranscriptEntry):
continue
- # Skip queue-operation messages - they duplicate user messages
+ # Skip most queue operations - only render 'remove' as steering user messages
if isinstance(message, QueueOperationTranscriptEntry):
- continue
+ if message.operation != "remove":
+ continue
+ # 'remove' operations fall through to be rendered as user messages
# Handle system messages separately
if isinstance(message, SystemTranscriptEntry):
@@ -2376,14 +2881,83 @@ def generate_html(
timestamp = getattr(message, "timestamp", "")
formatted_timestamp = format_timestamp(timestamp) if timestamp else ""
+ # Extract command name if present
+ command_name_match = re.search(
+ r"(.*?) ", message.content, re.DOTALL
+ )
+ # Also check for command output (child of user command)
+ command_output_match = re.search(
+ r"(.*?) ",
+ message.content,
+ re.DOTALL,
+ )
+
# Create level-specific styling and icons
level = getattr(message, "level", "info")
level_icon = {"warning": "β οΈ", "error": "β", "info": "βΉοΈ"}.get(level, "βΉοΈ")
- level_css = f"system system-{level}"
- # Process ANSI codes in system messages (they may contain command output)
- html_content = _convert_ansi_to_html(message.content)
- content_html = f"{level_icon} {html_content}"
+ # Determine CSS class:
+ # - Command name (user-initiated): "system" only
+ # - Command output (assistant response): "system system-{level}"
+ # - Other system messages: "system system-{level}"
+ if command_name_match:
+ # User-initiated command
+ level_css = "system"
+ else:
+ # Command output or other system message
+ level_css = f"system system-{level}"
+
+ # Process content: extract command name or command output, or use full content
+ if command_name_match:
+ # Show just the command name
+ command_name = command_name_match.group(1).strip()
+ html_content = f"{html.escape(command_name)}"
+ content_html = f"{level_icon} {html_content}"
+ elif command_output_match:
+ # Extract and process command output
+ output = command_output_match.group(1).strip()
+ html_content = _convert_ansi_to_html(output)
+ content_html = f"{level_icon} {html_content}"
+ else:
+ # Process ANSI codes in system messages (they may contain command output)
+ html_content = _convert_ansi_to_html(message.content)
+ content_html = f"{level_icon} {html_content}"
+
+ # Check if this message has a parent (for pairing system-info messages)
+ parent_uuid = getattr(message, "parentUuid", None)
+ is_sidechain = getattr(message, "isSidechain", False)
+
+ # Determine hierarchy: use parentUuid if available, otherwise use stack
+ if parent_uuid and parent_uuid in uuid_to_msg_id:
+ # This is a child message (e.g., command output following command invocation)
+ parent_msg_id = uuid_to_msg_id[parent_uuid]
+ # Find the parent's level in the stack
+ current_level: int
+ for idx, (stack_level, stack_msg_id) in enumerate(hierarchy_stack):
+ if stack_msg_id == parent_msg_id:
+ # Child is one level deeper than parent
+ current_level = stack_level + 1
+ # Update stack: keep parent, add child
+ hierarchy_stack = hierarchy_stack[: idx + 1]
+ break
+ else:
+ # Parent not found in stack, use default
+ current_level = _get_message_hierarchy_level(
+ level_css, is_sidechain
+ )
+
+ msg_id, ancestry, message_id_counter = _update_hierarchy_stack(
+ hierarchy_stack, current_level, message_id_counter
+ )
+ else:
+ # No parent, use normal hierarchy determination
+ current_level = _get_message_hierarchy_level(level_css, is_sidechain)
+ msg_id, ancestry, message_id_counter = _update_hierarchy_stack(
+ hierarchy_stack, current_level, message_id_counter
+ )
+
+ # Track this message's UUID for potential children
+ uuid_to_msg_id[message.uuid] = msg_id
system_template_message = TemplateMessage(
message_type="system",
@@ -2393,16 +2967,29 @@ def generate_html(
raw_timestamp=timestamp,
session_id=session_id,
message_title=f"System {level.title()}",
+ message_id=msg_id,
+ ancestry=ancestry,
+ uuid=message.uuid, # Store UUID for pairing
+ parent_uuid=parent_uuid, # Store parent UUID for pairing
)
template_messages.append(system_template_message)
continue
- # Extract message content first to check for duplicates
- # Must be UserTranscriptEntry or AssistantTranscriptEntry
- message_content = message.message.content # type: ignore
+ # Handle queue-operation 'remove' messages as user messages
+ if isinstance(message, QueueOperationTranscriptEntry):
+ # Queue operations have content directly, not in message.message
+ message_content = message.content if message.content else []
+ # Treat as user message type
+ message_type = "queue-operation"
+ else:
+ # Extract message content first to check for duplicates
+ # Must be UserTranscriptEntry or AssistantTranscriptEntry
+ message_content = message.message.content # type: ignore
+
text_content = extract_text_content(message_content)
# Separate tool/thinking/image content from text content
+ # Images in user messages stay inline, images in assistant messages are separate
tool_items: List[ContentItem] = []
text_only_content: List[ContentItem] = []
@@ -2411,12 +2998,20 @@ def generate_html(
for item in message_content:
# Check for both custom types and Anthropic types
item_type = getattr(item, "type", None)
+ is_image = isinstance(item, ImageContent) or item_type == "image"
is_tool_item = isinstance(
item,
- (ToolUseContent, ToolResultContent, ThinkingContent, ImageContent),
- ) or item_type in ("tool_use", "tool_result", "thinking", "image")
-
- if is_tool_item:
+ (ToolUseContent, ToolResultContent, ThinkingContent),
+ ) or item_type in ("tool_use", "tool_result", "thinking")
+
+ # Keep images inline for user messages and queue operations (steering),
+ # extract for assistant messages
+ if is_image and (
+ message_type == "user"
+ or isinstance(message, QueueOperationTranscriptEntry)
+ ):
+ text_only_items.append(item)
+ elif is_tool_item or is_image:
tool_items.append(item)
else:
text_only_items.append(item)
@@ -2454,6 +3049,7 @@ def generate_html(
first_user_message = ""
if (
message_type == "user"
+ and not isinstance(message, QueueOperationTranscriptEntry)
and hasattr(message, "message")
and should_use_as_session_starter(text_content)
):
@@ -2484,6 +3080,11 @@ def generate_html(
else session_id[:8]
)
+ # Reset hierarchy stack for new session
+ hierarchy_stack.clear()
+
+ # Create session header with unique message ID so it can be a fold parent
+ session_message_id = f"session-{session_id}"
session_header = TemplateMessage(
message_type="session_header",
content_html=session_title,
@@ -2493,12 +3094,19 @@ def generate_html(
session_summary=current_session_summary,
session_id=session_id,
is_session_header=True,
+ message_id=session_message_id,
+ ancestry=[], # Session headers are top-level
)
template_messages.append(session_header)
+ # Session header becomes the parent for all messages in this session
+ hierarchy_stack.append((0, session_message_id))
+
# Update first user message if this is a user message and we don't have one yet
elif message_type == "user" and not sessions[session_id]["first_user_message"]:
- if hasattr(message, "message"):
+ if not isinstance(message, QueueOperationTranscriptEntry) and hasattr(
+ message, "message"
+ ):
first_user_content = extract_text_content(message.message.content)
if should_use_as_session_starter(first_user_content):
sessions[session_id]["first_user_message"] = create_session_preview(
@@ -2592,16 +3200,40 @@ def generate_html(
text_content
)
else:
- css_class, content_html, message_type, message_title = (
+ # For queue-operation messages, treat them as user messages
+ if isinstance(message, QueueOperationTranscriptEntry):
+ effective_type = "user"
+ else:
+ effective_type = message_type
+
+ css_class, content_html, message_type_result, message_title = (
_process_regular_message(
text_only_content,
- message_type,
+ effective_type,
getattr(message, "isSidechain", False),
)
)
+ message_type = message_type_result # Update message_type with result
+
+ # Add 'steering' CSS class for queue-operation 'remove' messages
+ if (
+ isinstance(message, QueueOperationTranscriptEntry)
+ and message.operation == "remove"
+ ):
+ css_class = f"{css_class} steering"
+ message_title = "User (steering)"
- # Create main message (if it has text content)
+ # Only create main message if it has text content
+ # For assistant/thinking with only tools (no text), we don't create a container message
+ # The tools will be direct children of the current hierarchy level
if text_only_content:
+ # Determine hierarchy level and update stack
+ is_sidechain = getattr(message, "isSidechain", False)
+ current_level = _get_message_hierarchy_level(css_class, is_sidechain)
+ msg_id, ancestry, message_id_counter = _update_hierarchy_stack(
+ hierarchy_stack, current_level, message_id_counter
+ )
+
template_message = TemplateMessage(
message_type=message_type,
content_html=content_html,
@@ -2612,9 +3244,31 @@ def generate_html(
session_id=session_id,
token_usage=token_usage_str,
message_title=message_title,
+ message_id=msg_id,
+ ancestry=ancestry,
)
template_messages.append(template_message)
+ # Track sidechain assistant messages for deduplication
+ if message_type == "assistant" and is_sidechain and text_content.strip():
+ template_msg_index = len(template_messages) - 1
+ content_key = text_content.strip()
+
+ # Check if we already have a Task result with this content
+ if content_key in content_map:
+ existing_index, existing_id, existing_type = content_map[
+ content_key
+ ]
+ if existing_type == "task":
+ # Found matching Task result - deduplicate this assistant message
+ forward_link_html = f'(Task summary β already displayed in Task tool result above)
'
+ template_messages[
+ template_msg_index
+ ].content_html = forward_link_html
+ else:
+ # Track this assistant in case we see a matching Task result later
+ content_map[content_key] = (template_msg_index, msg_id, "assistant")
+
# Create separate messages for each tool/thinking/image item
for tool_item in tool_items:
tool_timestamp = getattr(message, "timestamp", "")
@@ -2626,40 +3280,64 @@ def generate_html(
item_type = getattr(tool_item, "type", None)
item_tool_use_id: Optional[str] = None
tool_title_hint: Optional[str] = None
+ pending_dedup: Optional[str] = (
+ None # Holds task result content for deduplication
+ )
if isinstance(tool_item, ToolUseContent) or item_type == "tool_use":
# Convert Anthropic type to our format if necessary
if not isinstance(tool_item, ToolUseContent):
- tool_use_converted = ToolUseContent(
+ tool_use = ToolUseContent(
type="tool_use",
id=getattr(tool_item, "id", ""),
name=getattr(tool_item, "name", ""),
input=getattr(tool_item, "input", {}),
)
else:
- tool_use_converted = tool_item
+ tool_use = tool_item
- tool_content_html = format_tool_use_content(tool_use_converted)
- escaped_name = escape_html(tool_use_converted.name)
- escaped_id = escape_html(tool_use_converted.id)
- item_tool_use_id = tool_use_converted.id
+ tool_content_html = format_tool_use_content(tool_use)
+ escaped_name = escape_html(tool_use.name)
+ escaped_id = escape_html(tool_use.id)
+ item_tool_use_id = tool_use.id
tool_title_hint = f"ID: {escaped_id}"
+ # Populate tool_use_context for later use when processing tool results
+ tool_use_context[item_tool_use_id] = tool_use
+
# Get summary for header (description or filepath)
- summary = get_tool_summary(tool_use_converted)
+ summary = get_tool_summary(tool_use)
# Set message_type (for CSS/logic) and message_title (for display)
tool_message_type = "tool_use"
- if tool_use_converted.name == "TodoWrite":
+ if tool_use.name == "TodoWrite":
tool_message_title = "π Todo List"
- elif tool_use_converted.name in ("Edit", "Write"):
+ elif tool_use.name == "Task":
+ # Special handling for Task tool: show subagent_type and description
+ subagent_type = tool_use.input.get("subagent_type", "")
+ description = tool_use.input.get("description", "")
+ escaped_subagent = (
+ escape_html(subagent_type) if subagent_type else ""
+ )
+
+ if description and subagent_type:
+ escaped_desc = escape_html(description)
+ tool_message_title = f"π§ {escaped_name} {escaped_desc} ({escaped_subagent})"
+ elif description:
+ escaped_desc = escape_html(description)
+ tool_message_title = f"π§ {escaped_name} {escaped_desc}"
+ elif subagent_type:
+ tool_message_title = f"π§ {escaped_name} ({escaped_subagent})"
+ else:
+ tool_message_title = f"π§ {escaped_name}"
+ elif tool_use.name in ("Edit", "Write"):
# Use π icon for Edit/Write
if summary:
escaped_summary = escape_html(summary)
tool_message_title = f"π {escaped_name} {escaped_summary}"
else:
tool_message_title = f"π {escaped_name}"
- elif tool_use_converted.name == "Read":
+ elif tool_use.name == "Read":
# Use π icon for Read
if summary:
escaped_summary = escape_html(summary)
@@ -2689,18 +3367,48 @@ def generate_html(
result_file_path: Optional[str] = None
result_tool_name: Optional[str] = None
if tool_result_converted.tool_use_id in tool_use_context:
- tool_ctx = tool_use_context[tool_result_converted.tool_use_id]
- result_tool_name = tool_ctx.get("name")
- if result_tool_name in (
- "Read",
- "Edit",
- "Write",
- ) and "file_path" in tool_ctx.get("input", {}):
- result_file_path = tool_ctx["input"]["file_path"]
+ tool_use_from_ctx = tool_use_context[
+ tool_result_converted.tool_use_id
+ ]
+ result_tool_name = tool_use_from_ctx.name
+ if (
+ result_tool_name
+ in (
+ "Read",
+ "Edit",
+ "Write",
+ )
+ and "file_path" in tool_use_from_ctx.input
+ ):
+ result_file_path = tool_use_from_ctx.input["file_path"]
tool_content_html = format_tool_result_content(
- tool_result_converted, result_file_path, result_tool_name
+ tool_result_converted,
+ result_file_path,
+ result_tool_name,
)
+
+ # Retroactive deduplication: if Task result matches a sidechain assistant, replace that assistant with a forward link
+ if result_tool_name == "Task":
+ # Extract text content from tool result
+ # Note: tool_result.content can be str or List[Dict[str, Any]] (not List[ContentItem])
+ if isinstance(tool_result_converted.content, str):
+ task_result_content = tool_result_converted.content.strip()
+ else:
+ # Handle list of dicts (tool result format)
+ content_parts: list[str] = []
+ for item in tool_result_converted.content:
+ # tool_result_converted.content is List[Dict[str, Any]]
+ text_val = item.get("text", "")
+ if isinstance(text_val, str):
+ content_parts.append(text_val)
+ task_result_content = "\n".join(content_parts).strip()
+
+ # Store for deduplication - we'll check/update after we have the message_id
+ pending_dedup = task_result_content if task_result_content else None
+ else:
+ pending_dedup = None
+
escaped_id = escape_html(tool_result_converted.tool_use_id)
item_tool_use_id = tool_result_converted.tool_use_id
tool_title_hint = f"ID: {escaped_id}"
@@ -2748,9 +3456,17 @@ def generate_html(
tool_css_class = "unknown"
# Preserve sidechain context for tool/thinking/image content within sidechain messages
- if getattr(message, "isSidechain", False):
+ tool_is_sidechain = getattr(message, "isSidechain", False)
+ if tool_is_sidechain:
tool_css_class += " sidechain"
+ # Determine hierarchy level and generate unique message ID
+ # Note: Pairing logic is handled later by _identify_message_pairs()
+ tool_level = _get_message_hierarchy_level(tool_css_class, tool_is_sidechain)
+ tool_msg_id, tool_ancestry, message_id_counter = _update_hierarchy_stack(
+ hierarchy_stack, tool_level, message_id_counter
+ )
+
tool_template_message = TemplateMessage(
message_type=tool_message_type,
content_html=tool_content_html,
@@ -2762,79 +3478,54 @@ def generate_html(
tool_use_id=item_tool_use_id,
title_hint=tool_title_hint,
message_title=tool_message_title,
+ message_id=tool_msg_id,
+ ancestry=tool_ancestry,
)
template_messages.append(tool_template_message)
- # Prepare session navigation data
- session_nav: List[Dict[str, Any]] = []
- for session_id in session_order:
- session_info = sessions[session_id]
-
- # Format timestamp range
- first_ts = session_info["first_timestamp"]
- last_ts = session_info["last_timestamp"]
- timestamp_range = ""
- if first_ts and last_ts:
- if first_ts == last_ts:
- timestamp_range = format_timestamp(first_ts)
- else:
- timestamp_range = (
- f"{format_timestamp(first_ts)} - {format_timestamp(last_ts)}"
- )
- elif first_ts:
- timestamp_range = format_timestamp(first_ts)
-
- # Format token usage summary
- token_summary = ""
- total_input = session_info["total_input_tokens"]
- total_output = session_info["total_output_tokens"]
- total_cache_creation = session_info["total_cache_creation_tokens"]
- total_cache_read = session_info["total_cache_read_tokens"]
-
- if total_input > 0 or total_output > 0:
- token_parts: List[str] = []
- if total_input > 0:
- token_parts.append(f"Input: {total_input}")
- if total_output > 0:
- token_parts.append(f"Output: {total_output}")
- if total_cache_creation > 0:
- token_parts.append(f"Cache Creation: {total_cache_creation}")
- if total_cache_read > 0:
- token_parts.append(f"Cache Read: {total_cache_read}")
- token_summary = "Token usage β " + " | ".join(token_parts)
-
- session_nav.append(
- {
- "id": session_id,
- "summary": session_info["summary"],
- "timestamp_range": timestamp_range,
- "first_timestamp": first_ts,
- "last_timestamp": last_ts,
- "message_count": session_info["message_count"],
- "first_user_message": session_info["first_user_message"]
- if session_info["first_user_message"] != ""
- else "[No user message found in session.]",
- "token_summary": token_summary,
- }
- )
+ # Track Task results and check for matching assistants
+ if pending_dedup is not None:
+ # pending_dedup contains the task result content
+ task_result_content = pending_dedup
+ template_msg_index = len(template_messages) - 1
+
+ # Check if we already have a sidechain assistant with this content
+ if task_result_content in content_map:
+ existing_index, existing_id, existing_type = content_map[
+ task_result_content
+ ]
+ if existing_type == "assistant":
+ # Found matching assistant - deduplicate it by replacing with forward link
+ forward_link_html = f'(Task summary β already displayed in Task tool result below)
'
+ template_messages[
+ existing_index
+ ].content_html = forward_link_html
+ else:
+ # Track this Task result in case we see a matching assistant later
+ content_map[task_result_content] = (
+ template_msg_index,
+ tool_msg_id,
+ "task",
+ )
- # Identify and mark paired messages (command+output, tool_use+tool_result, etc.)
- _identify_message_pairs(template_messages)
+ pending_dedup = None # Reset for next iteration
- # Reorder messages so pairs are adjacent while preserving chronological order
- template_messages = _reorder_paired_messages(template_messages)
+ # Track message timing
+ if DEBUG_TIMING:
+ msg_duration = time.time() - msg_start_time
+ message_timings.append((msg_duration, message_type, msg_idx, msg_uuid))
- # Render template
- env = _get_template_environment()
- template = env.get_template("transcript.html")
- return str(
- template.render(
- title=title,
- messages=template_messages,
- sessions=session_nav,
- combined_transcript_link=combined_transcript_link,
- library_version=get_library_version(),
+ # Report loop statistics
+ if DEBUG_TIMING:
+ report_timing_statistics(
+ message_timings,
+ [("Markdown", markdown_timings), ("Pygments", pygments_timings)],
)
+
+ return (
+ template_messages,
+ sessions,
+ session_order,
)
diff --git a/claude_code_log/renderer_timings.py b/claude_code_log/renderer_timings.py
new file mode 100644
index 00000000..fb5111cb
--- /dev/null
+++ b/claude_code_log/renderer_timings.py
@@ -0,0 +1,158 @@
+"""Timing utilities for renderer performance profiling.
+
+This module provides timing and performance profiling utilities for the renderer.
+All timing-related configuration and functionality is centralized here.
+"""
+
+import os
+import time
+from contextlib import contextmanager
+from typing import List, Tuple, Iterator, Any, Dict, Callable, Union, Optional
+
+# Performance debugging - enabled via CLAUDE_CODE_LOG_DEBUG_TIMING environment variable
+# Set to "1", "true", or "yes" to enable timing output
+DEBUG_TIMING = os.getenv("CLAUDE_CODE_LOG_DEBUG_TIMING", "").lower() in (
+ "1",
+ "true",
+ "yes",
+)
+
+# Global timing data storage
+_timing_data: Dict[str, Any] = {}
+
+
+def set_timing_var(name: str, value: Any) -> None:
+ """Set a timing variable in the global timing data dict.
+
+ Args:
+ name: Variable name (e.g., "_markdown_timings", "_pygments_timings", "_current_msg_uuid")
+ value: Value to set
+ """
+ if DEBUG_TIMING:
+ _timing_data[name] = value
+
+
+@contextmanager
+def log_timing(
+ phase: Union[str, Callable[[], str]],
+ t_start: Optional[float] = None,
+) -> Iterator[None]:
+ """Context manager for logging phase timing.
+
+ Args:
+ phase: Phase name (static string) or callable returning phase name (for dynamic names)
+ t_start: Optional start time for calculating total elapsed time
+
+ Example:
+ # Static phase name
+ with log_timing("Initialization", t_start):
+ setup_code()
+
+ # Dynamic phase name (evaluated at end)
+ with log_timing(lambda: f"Processing ({len(items)} items)", t_start):
+ items = process()
+ """
+ if not DEBUG_TIMING:
+ yield
+ return
+
+ t_phase_start = time.time()
+
+ try:
+ yield
+ finally:
+ t_now = time.time()
+ phase_time = t_now - t_phase_start
+
+ # Evaluate phase name (call if callable, use directly if string)
+ phase_name = phase() if callable(phase) else phase
+
+ # Calculate total time if t_start provided
+ if t_start is not None:
+ total_time = t_now - t_start
+ print(
+ f"[TIMING] {phase_name:40s} {phase_time:8.3f}s (total: {total_time:8.3f}s)",
+ flush=True,
+ )
+ else:
+ print(
+ f"[TIMING] {phase_name:40s} {phase_time:8.3f}s",
+ flush=True,
+ )
+
+ # Update last timing checkpoint
+ _timing_data["_t_last"] = t_now
+
+
+@contextmanager
+def timing_stat(list_name: str) -> Iterator[None]:
+ """Context manager for tracking timing statistics.
+
+ Args:
+ list_name: Name of the timing list to append to
+ (e.g., "_markdown_timings", "_pygments_timings")
+
+ Example:
+ with timing_stat("_pygments_timings"):
+ result = expensive_operation()
+ """
+ if not DEBUG_TIMING:
+ yield
+ return
+
+ t_start = time.time()
+ try:
+ yield
+ finally:
+ duration = time.time() - t_start
+ if list_name in _timing_data:
+ msg_uuid = _timing_data.get("_current_msg_uuid", "")
+ _timing_data[list_name].append((duration, msg_uuid))
+
+
+def report_timing_statistics(
+ message_timings: List[Tuple[float, str, int, str]],
+ operation_timings: List[Tuple[str, List[Tuple[float, str]]]],
+) -> None:
+ """Report timing statistics for message rendering.
+
+ Args:
+ message_timings: List of (duration, message_type, index, uuid) tuples
+ operation_timings: List of (name, timings) tuples where timings is a list of (duration, uuid)
+ e.g., [("Markdown", markdown_timings), ("Pygments", pygments_timings)]
+ """
+ if not message_timings:
+ return
+
+ # Sort by duration descending
+ sorted_timings = sorted(message_timings, key=lambda x: x[0], reverse=True)
+
+ # Calculate statistics
+ total_msg_time = sum(t[0] for t in message_timings)
+ avg_time = total_msg_time / len(message_timings)
+
+ # Report slowest messages
+ print("\n[TIMING] Loop statistics:", flush=True)
+ print(f"[TIMING] Total messages: {len(message_timings)}", flush=True)
+ print(f"[TIMING] Average time per message: {avg_time * 1000:.1f}ms", flush=True)
+ print("[TIMING] Slowest 10 messages:", flush=True)
+ for duration, msg_type, idx, uuid in sorted_timings[:10]:
+ print(
+ f"[TIMING] Message {uuid} (#{idx}, {msg_type}): {duration * 1000:.1f}ms",
+ flush=True,
+ )
+
+ # Report operation-specific statistics
+ for operation_name, timings in operation_timings:
+ if timings:
+ sorted_ops = sorted(timings, key=lambda x: x[0], reverse=True)
+ total_time = sum(t[0] for t in timings)
+ print(f"\n[TIMING] {operation_name} rendering:", flush=True)
+ print(f"[TIMING] Total operations: {len(timings)}", flush=True)
+ print(f"[TIMING] Total time: {total_time:.3f}s", flush=True)
+ print("[TIMING] Slowest 10 operations:", flush=True)
+ for duration, uuid in sorted_ops[:10]:
+ print(
+ f"[TIMING] {uuid}: {duration * 1000:.1f}ms",
+ flush=True,
+ )
diff --git a/claude_code_log/templates/components/filter_styles.css b/claude_code_log/templates/components/filter_styles.css
index e6bfaa36..570f107c 100644
--- a/claude_code_log/templates/components/filter_styles.css
+++ b/claude_code_log/templates/components/filter_styles.css
@@ -114,12 +114,12 @@
}
.filter-toggle[data-type="system"] {
- border-color: #d98100;
+ border-color: var(--system-color);
border-width: 2px;
}
.filter-toggle[data-type="tool"] {
- border-color: #4caf50;
+ border-color: var(--tool-use-color);
border-width: 2px;
}
diff --git a/claude_code_log/templates/components/global_styles.css b/claude_code_log/templates/components/global_styles.css
index 5ee38644..e46b10e5 100644
--- a/claude_code_log/templates/components/global_styles.css
+++ b/claude_code_log/templates/components/global_styles.css
@@ -29,17 +29,29 @@
/* Slightly transparent variants (55 = ~33% opacity) */
--highlight-light: #e3f2fd55;
+ /* Solid colors for message types */
+ --user-color: #ff9800;
+ --user-dimmed: #ff980066;
+ --assistant-color: #9c27b0;
+ --system-color: #d98100;
+ --system-warning-color: #2196f3;
+ --system-error-color: #f44336;
+ --tool-use-color: #4caf50;
+
/* Solid colors for text and accents */
--text-muted: #666;
--text-secondary: #495057;
+ /* Layout spacing */
+ --message-padding: 1em;
+
/* Font families */
--font-monospace: 'Fira Code', 'Monaco', 'Consolas', 'SF Mono', 'Inconsolata', 'Droid Sans Mono', 'Source Code Pro', 'Ubuntu Mono', 'Cascadia Code', 'Menlo', monospace;
--font-ui: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
}
body {
- font-family: var(--font-monospace);
+ font-family: var(--font-ui);
line-height: 1.5;
max-width: 1200px;
margin: 0 auto;
diff --git a/claude_code_log/templates/components/message_styles.css b/claude_code_log/templates/components/message_styles.css
index e0bfd672..7f44886e 100644
--- a/claude_code_log/templates/components/message_styles.css
+++ b/claude_code_log/templates/components/message_styles.css
@@ -1,8 +1,7 @@
/* Message and content styles */
.message {
margin-bottom: 1em;
- margin-left: 1em;
- padding: 1em;
+ padding: var(--message-padding);
border-radius: 8px;
border-left: var(--white-dimmed) 2px solid;
background-color: var(--highlight-light);
@@ -10,8 +9,230 @@
border-top: var(--white-dimmed) 1px solid;
border-bottom: #00000017 1px solid;
border-right: #00000017 1px solid;
+ position: relative;
+}
+
+/* Message with fold bar: remove bottom padding */
+.message:has(.fold-bar) {
+ padding-bottom: 0;
+}
+
+/* Horizontal Fold Bar - integrated into message box */
+.fold-bar {
+ display: flex;
+ margin: 1em calc(-1 * var(--message-padding)) 0;
+ height: 28px;
+ border-radius: 0 0 8px 8px;
+ overflow: hidden;
+ transition: all 0.2s ease;
+}
+
+.fold-bar-section {
+ flex: 1;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap: 0.4em;
+ cursor: pointer;
+ user-select: none;
+ font-size: 0.9em;
+ font-weight: 500;
+ padding: 0.4em;
+ transition: all 0.2s ease;
+ border-bottom: 2px solid;
+ background: linear-gradient(to bottom, #f8f8f844, #f0f0f0);
+}
+
+/* Double-line effect when folded */
+.fold-bar-section.folded {
+ border-bottom-style: double;
+ border-bottom-width: 4px;
+}
+
+.fold-bar-section:hover {
+ background: linear-gradient(to bottom, #fff, #f5f5f5);
+ transform: translateY(1px);
+}
+
+.fold-bar-section:active {
+ transform: translateY(0);
+}
+
+/* Left section: fold one level */
+.fold-one-level {
+ border-right: 1px solid rgba(0, 0, 0, 0.1);
+}
+
+/* Full-width single button when counts are equal */
+.fold-bar-section.full-width {
+ border-right: none;
+}
+
+/* Icon styling */
+.fold-icon {
+ font-size: 1.1em;
+ line-height: 1;
+}
+
+.fold-count {
+ font-weight: 600;
+ min-width: 1.5em;
+ text-align: center;
+}
+
+.fold-label {
+ color: var(--text-muted);
+ font-size: 0.9em;
+}
+
+/* Border colors matching message types */
+.fold-bar[data-border-color="user"] .fold-bar-section,
+.fold-bar[data-border-color="user compacted"] .fold-bar-section,
+.fold-bar[data-border-color="user sidechain"] .fold-bar-section,
+.fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section {
+ border-bottom-color: var(--user-color);
+}
+
+.fold-bar[data-border-color="assistant"] .fold-bar-section,
+.fold-bar[data-border-color="assistant sidechain"] .fold-bar-section {
+ border-bottom-color: var(--assistant-color);
+}
+
+.fold-bar[data-border-color="system"] .fold-bar-section,
+.fold-bar[data-border-color="system command-output"] .fold-bar-section {
+ border-bottom-color: var(--system-color);
+}
+
+.fold-bar[data-border-color="system-warning"] .fold-bar-section {
+ border-bottom-color: var(--system-warning-color);
+}
+
+.fold-bar[data-border-color="system-error"] .fold-bar-section {
+ border-bottom-color: var(--system-error-color);
+}
+
+.fold-bar[data-border-color="system-info"] .fold-bar-section {
+ border-bottom-color: var(--info-dimmed);
+}
+
+.fold-bar[data-border-color="tool_use"] .fold-bar-section,
+.fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section {
+ border-bottom-color: var(--tool-use-color);
+}
+
+.fold-bar[data-border-color="tool_result"] .fold-bar-section,
+.fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section {
+ border-bottom-color: var(--success-dimmed);
+}
+
+.fold-bar[data-border-color="tool_result error"] .fold-bar-section,
+.fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section {
+ border-bottom-color: var(--error-dimmed);
+}
+
+.fold-bar[data-border-color="thinking"] .fold-bar-section,
+.fold-bar[data-border-color="thinking sidechain"] .fold-bar-section {
+ border-bottom-color: var(--assistant-dimmed);
+}
+
+.fold-bar[data-border-color="image"] .fold-bar-section,
+.fold-bar[data-border-color="image sidechain"] .fold-bar-section {
+ border-bottom-color: var(--info-dimmed);
}
+.fold-bar[data-border-color="unknown"] .fold-bar-section,
+.fold-bar[data-border-color="unknown sidechain"] .fold-bar-section {
+ border-bottom-color: var(--neutral-dimmed);
+}
+
+.fold-bar[data-border-color="bash-input"] .fold-bar-section {
+ border-bottom-color: var(--tool-use-color);
+}
+
+.fold-bar[data-border-color="bash-output"] .fold-bar-section {
+ border-bottom-color: var(--success-dimmed);
+}
+
+.fold-bar[data-border-color="session-header"] .fold-bar-section {
+ border-bottom-color: #2196f3;
+}
+
+/* Sidechain (sub-assistant) fold-bar styling */
+.sidechain .fold-bar-section {
+ border-bottom-style: dashed;
+ border-bottom-width: 2px;
+}
+
+.sidechain .fold-bar-section.folded {
+ border-bottom-style: dashed;
+ border-bottom-width: 4px;
+}
+
+/* ========================================
+ CONVERSATION STRUCTURE - Margin Hierarchy
+ ======================================== */
+
+/* Right-aligned messages (user-initiated, right margin 0, left margin 33%) */
+.user:not(.compacted),
+.system {
+ margin-left: 33%;
+ margin-right: 0;
+}
+
+/* System error messages (assistant-generated) */
+.system-error {
+ margin-left: 0;
+ margin-right: 8em;
+}
+
+/* Left-aligned messages (assistant-generated) with progressive indentation */
+/* Base assistant messages */
+.assistant,
+.thinking {
+ margin-left: 0;
+ margin-right: 8em;
+}
+
+/* Tool messages (nested under assistant) */
+.tool_use,
+.tool_result {
+ margin-left: 2em;
+ margin-right: 6em;
+}
+
+/* System warnings/info (assistant-initiated) */
+.system-warning,
+.system-info {
+ margin-left: 0;
+ margin-right: 10em;
+}
+
+/* Exception: paired system-info messages align right (like user commands) */
+.system.system-info.paired-message {
+ margin-left: 33%;
+ margin-right: 0;
+}
+
+/* Sidechain messages (sub-assistant hierarchy) */
+/* Note: .sidechain.user (Sub-assistant prompt) is no longer produced
+ since it duplicates the Task tool input prompt */
+
+/* Sub-assistant response and thinking (nested under Task tool result) */
+.sidechain.assistant,
+.sidechain.thinking {
+ margin-left: 4em;
+ margin-right: 4em;
+}
+
+/* Sub-assistant tools (nested below sub-assistant) */
+.sidechain.tool_use,
+.sidechain.tool_result {
+ margin-left: 6em;
+ margin-right: 2em;
+}
+
+/* ======================================== */
+
/* Message header info styling */
.header-info {
display: flex;
@@ -72,7 +293,12 @@
/* Message type styling */
.user {
border-left-color: #ff9800;
- margin-left: 0;
+}
+
+/* Steering user messages (out-of-band input while agent is working) */
+.user.steering {
+ border-left-color: var(--user-dimmed);
+ opacity: 0.7;
}
.assistant {
@@ -85,26 +311,22 @@
}
.system {
- border-left-color: #d98100;
- margin-left: 0;
+ border-left-color: var(--system-color);
}
.system-warning {
- border-left-color: #2196f3;
+ border-left-color: var(--system-warning-color);
background-color: var(--highlight-semi);
- margin-left: 2em; /* Extra indent - assistant-initiated */
}
.system-error {
- border-left-color: #f44336;
+ border-left-color: var(--system-error-color);
background-color: var(--error-semi);
- margin-left: 0;
}
.system-info {
border-left-color: var(--info-dimmed);
background-color: var(--highlight-dimmed);
- margin-left: 2em; /* Extra indent - assistant-initiated */
font-size: 80%;
}
@@ -218,12 +440,10 @@
.tool_use {
border-left-color: #4caf50;
- margin-left: 2em; /* Extra indent - assistant-initiated */
}
.tool_result {
border-left-color: var(--success-dimmed);
- margin-left: 2em; /* Extra indent - assistant-initiated */
}
.tool_result.error {
@@ -243,20 +463,6 @@
border-left-style: dashed;
}
-/* Sidechain indentation hierarchy */
-.sidechain.user {
- margin-left: 3em; /* Sub-assistant Prompt - nested below Task tool use (2em) */
-}
-
-.sidechain.assistant {
- margin-left: 4em; /* Sub-assistant - nested below prompt (3em) */
-}
-
-.sidechain.tool_use,
-.sidechain.tool_result {
- margin-left: 5em; /* Sub-assistant tools - nested below assistant (4em) */
-}
-
.sidechain .sidechain-indicator {
color: var(--text-muted);
font-size: 0.9em;
@@ -356,7 +562,8 @@
/* Assistant and Thinking content styling */
.assistant .content,
.thinking-text,
-.user.compacted .content {
+.user.compacted .content,
+.markdown {
font-family: var(--font-ui);
}
diff --git a/claude_code_log/templates/components/timeline.html b/claude_code_log/templates/components/timeline.html
index 602508c0..7931efeb 100644
--- a/claude_code_log/templates/components/timeline.html
+++ b/claude_code_log/templates/components/timeline.html
@@ -133,14 +133,14 @@
let displayContent = content ?? messageTypeGroups[messageType].content;
// Check for sidechain context regardless of primary message type
+ // Note: Sidechain user messages (Sub-assistant prompts) are now skipped
+ // since they duplicate the Task tool input prompt
if (classList.includes('sidechain')) {
// Override group for sidechain messages, but preserve the content
messageType = 'sidechain';
// For sidechain messages, prefix with appropriate icon based on original type
- if (classList.includes('user')) {
- displayContent = 'π ' + (content ?? 'Sub-assistant prompt');
- } else if (classList.includes('assistant')) {
+ if (classList.includes('assistant')) {
displayContent = 'π ' + (content ?? 'Sub-assistant response');
} else if (classList.includes('tool_use')) {
displayContent = 'π ' + (content ?? 'Sub-assistant tool use');
diff --git a/claude_code_log/templates/transcript.html b/claude_code_log/templates/transcript.html
index 31186794..c72f4866 100644
--- a/claude_code_log/templates/transcript.html
+++ b/claude_code_log/templates/transcript.html
@@ -72,25 +72,46 @@ π Search & Filter
{% for message in messages %}
{% if message.is_session_header %}
-
@@ -1631,7 +1643,7 @@ Claude Code Projects (from last week to today)
π 5 transcript files
π¬ 127 messages
- π 2023-11-14 22:13:20
+ π 2023-11-14 23:13:20
diff --git a/test/test_command_handling.py b/test/test_command_handling.py
index dbea27e7..b77ef07e 100644
--- a/test/test_command_handling.py
+++ b/test/test_command_handling.py
@@ -59,11 +59,11 @@ def test_system_message_command_handling():
assert "Command: init" in html, (
"Should show command name in summary"
)
- assert "class='message system'" in html, "Should have system CSS class"
+ # Check for system CSS class (may have ancestor IDs appended)
+ assert "class='message system" in html, "Should have system CSS class"
- print(
- "β Test passed: System messages with commands are shown in expandable details"
- )
+ # Test passed successfully
+ pass
finally:
test_file_path.unlink()
diff --git a/test/test_data/dedup_agent.jsonl b/test/test_data/dedup_agent.jsonl
new file mode 100644
index 00000000..2ec4a232
--- /dev/null
+++ b/test/test_data/dedup_agent.jsonl
@@ -0,0 +1,4 @@
+{"parentUuid": null, "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_agent_start", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "I'll research how data-border-color is used throughout the codebase."}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 1000, "cache_read_input_tokens": 0, "output_tokens": 50, "service_tier": "standard"}}, "requestId": "req_agent_1", "type": "assistant", "uuid": "agent-dedup-1", "timestamp": "2025-11-19T22:53:39.112Z"}
+{"parentUuid": "agent-dedup-1", "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_agent_grep", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_grep", "name": "Grep", "input": {"pattern": "data-border-color", "output_mode": "files_with_matches"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 500, "cache_read_input_tokens": 0, "output_tokens": 100, "service_tier": "standard"}}, "requestId": "req_agent_2", "type": "assistant", "uuid": "agent-dedup-2", "timestamp": "2025-11-19T22:53:39.868Z"}
+{"parentUuid": "agent-dedup-2", "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_grep", "type": "tool_result", "content": "transcript.html\nmessage_styles.css"}]}, "uuid": "agent-dedup-3", "timestamp": "2025-11-19T22:54:57.555Z", "toolUseResult": {"type": "text", "file": {"filePath": "e:\\Workspace\\src\\github\\claude-code-log\\renderer.py", "content": "# File content here...", "numLines": 50, "startLine": 3130, "totalLines": 3379}}}
+{"parentUuid": "agent-dedup-3", "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_agent_final", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "I created the test file successfully. The data-border-color attribute is set in the template and used by CSS selectors to determine fold-bar border colors."}], "stop_reason": "end_turn", "stop_sequence": null, "usage": {"input_tokens": 2, "cache_creation_input_tokens": 1001, "cache_read_input_tokens": 5000, "output_tokens": 200, "service_tier": "standard"}}, "requestId": "req_agent_final", "type": "assistant", "uuid": "agent-dedup-4", "timestamp": "2025-11-19T22:55:34.309Z"}
diff --git a/test/test_data/dedup_main.jsonl b/test/test_data/dedup_main.jsonl
new file mode 100644
index 00000000..f3c49a7b
--- /dev/null
+++ b/test/test_data/dedup_main.jsonl
@@ -0,0 +1,3 @@
+{"parentUuid": "8ae40e91-fa63-40a9-969d-70c0c1d6175e", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01H4wg8bFy2psdmQvJMU6kpD", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_dedup_task", "name": "Task", "input": {"subagent_type": "general-purpose", "description": "Research data-border-color usage", "prompt": "Research how data-border-color is used in the codebase"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 1000, "cache_read_input_tokens": 0, "output_tokens": 100, "service_tier": "standard"}}, "requestId": "req_dedup_main_1", "type": "assistant", "uuid": "dedup-main-1", "timestamp": "2025-11-19T22:53:34.908Z"}
+{"parentUuid": "dedup-main-1", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_dedup_task", "type": "tool_result", "content": [{"type": "text", "text": "I created the test file successfully. The data-border-color attribute is set in the template and used by CSS selectors to determine fold-bar border colors."}]}]}, "uuid": "dedup-main-2", "timestamp": "2025-11-19T22:55:34.463Z", "toolUseResult": {"status": "completed", "prompt": "Research how data-border-color is used in the codebase", "agentId": "e1c84ba5", "content": [{"type": "text", "text": "I created the test file successfully. The data-border-color attribute is set in the template and used by CSS selectors to determine fold-bar border colors."}], "totalDurationMs": 119249, "totalTokens": 59967, "totalToolUseCount": 12, "usage": {"input_tokens": 2, "cache_creation_input_tokens": 1001, "cache_read_input_tokens": 5000, "output_tokens": 200, "service_tier": "standard"}}}
+{"parentUuid": "dedup-main-2", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "test-dedup-session", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_dedup_final", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "Great! The research is complete."}], "stop_reason": "end_turn", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 100, "cache_read_input_tokens": 1000, "output_tokens": 50, "service_tier": "standard"}}, "requestId": "req_dedup_main_3", "type": "assistant", "uuid": "dedup-main-3", "timestamp": "2025-11-19T22:55:40.790Z"}
diff --git a/test/test_data/edit_tool.jsonl b/test/test_data/edit_tool.jsonl
new file mode 100644
index 00000000..896e9d44
--- /dev/null
+++ b/test/test_data/edit_tool.jsonl
@@ -0,0 +1,2 @@
+{"parentUuid":"49306c7f-62f4-41ce-b0e7-b67a891148b7","isSidechain":false,"userType":"external","cwd":"e:\\Workspace\\src\\github\\claude-code-log","sessionId":"88a8d761-7b9a-4bf1-a8ca-c1febe6bf358","version":"2.0.46","gitBranch":"dev/fold-ui-horizontal","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01A76saVFjpeVZQmepohx5gy","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01PhNkTBr17qvahqhCdkUkvM","name":"Edit","input":{"file_path":"e:\\Workspace\\src\\github\\claude-code-log\\claude_code_log\\renderer_timings.py","old_string":"def set_timing_var(name: str, value: Any) -> None:\n \"\"\"Set a timing variable in the global timing data dict.\n\n Args:\n name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\")\n value: Value to set\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[name] = value\n\n\ndef set_current_msg_uuid(uuid: str) -> None:\n \"\"\"Set the current message UUID for timing tracking.\n\n Args:\n uuid: Message UUID\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[\"_current_msg_uuid\"] = uuid","new_string":"def set_timing_var(name: str, value: Any) -> None:\n \"\"\"Set a timing variable in the global timing data dict.\n\n Args:\n name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\", \"_current_msg_uuid\")\n value: Value to set\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[name] = value"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":0,"cache_creation_input_tokens":10391,"cache_read_input_tokens":102802,"cache_creation":{"ephemeral_5m_input_tokens":10391,"ephemeral_1h_input_tokens":0},"output_tokens":526,"service_tier":"standard"}},"requestId":"req_011CVRRU3KTqNrUhZ9K35Zd2","type":"assistant","uuid":"e450d9e3-ccb9-4595-a7cb-e13f13bab5a0","timestamp":"2025-11-23T20:06:04.851Z"}
+{"parentUuid":"e450d9e3-ccb9-4595-a7cb-e13f13bab5a0","isSidechain":false,"userType":"external","cwd":"e:\\Workspace\\src\\github\\claude-code-log","sessionId":"88a8d761-7b9a-4bf1-a8ca-c1febe6bf358","version":"2.0.46","gitBranch":"dev/fold-ui-horizontal","type":"user","message":{"role":"user","content":[{"tool_use_id":"toolu_01PhNkTBr17qvahqhCdkUkvM","type":"tool_result","content":"The file e:\\Workspace\\src\\github\\claude-code-log\\claude_code_log\\renderer_timings.py has been updated. Here's the result of running `cat -n` on a snippet of the edited file:\n 15β# Global timing data storage\n 16β_timing_data: Dict[str, Any] = {}\n 17β\n 18β\n 19βdef set_timing_var(name: str, value: Any) -> None:\n 20β \"\"\"Set a timing variable in the global timing data dict.\n 21β\n 22β Args:\n 23β name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\", \"_current_msg_uuid\")\n 24β value: Value to set\n 25β \"\"\"\n 26β if DEBUG_TIMING:\n 27β _timing_data[name] = value\n 28β\n 29β\n 30β@contextmanager\n 31βdef timing_stat(list_name: str) -> Iterator[None]:"}]},"uuid":"46b5acca-1860-4ae3-be78-ed3537a81d8b","timestamp":"2025-11-23T20:06:14.160Z","toolUseResult":{"filePath":"e:\\Workspace\\src\\github\\claude-code-log\\claude_code_log\\renderer_timings.py","oldString":"def set_timing_var(name: str, value: Any) -> None:\n \"\"\"Set a timing variable in the global timing data dict.\n\n Args:\n name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\")\n value: Value to set\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[name] = value\n\n\ndef set_current_msg_uuid(uuid: str) -> None:\n \"\"\"Set the current message UUID for timing tracking.\n\n Args:\n uuid: Message UUID\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[\"_current_msg_uuid\"] = uuid","newString":"def set_timing_var(name: str, value: Any) -> None:\n \"\"\"Set a timing variable in the global timing data dict.\n\n Args:\n name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\", \"_current_msg_uuid\")\n value: Value to set\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[name] = value","originalFile":"\"\"\"Timing utilities for renderer performance profiling.\"\"\"\n\nimport os\nimport time\nfrom contextlib import contextmanager\nfrom typing import List, Tuple, Iterator, Any, Dict\n\n# Performance debugging\nDEBUG_TIMING = os.getenv(\"CLAUDE_CODE_LOG_DEBUG_TIMING\", \"\").lower() in (\n \"1\",\n \"true\",\n \"yes\",\n)\n\n# Global timing data storage\n_timing_data: Dict[str, Any] = {}\n\n\ndef set_timing_var(name: str, value: Any) -> None:\n \"\"\"Set a timing variable in the global timing data dict.\n\n Args:\n name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\")\n value: Value to set\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[name] = value\n\n\ndef set_current_msg_uuid(uuid: str) -> None:\n \"\"\"Set the current message UUID for timing tracking.\n\n Args:\n uuid: Message UUID\n \"\"\"\n if DEBUG_TIMING:\n _timing_data[\"_current_msg_uuid\"] = uuid\n\n\n@contextmanager\ndef timing_stat(list_name: str) -> Iterator[None]:\n \"\"\"Context manager for tracking timing statistics.\n\n Args:\n list_name: Name of the timing list to append to\n (e.g., \"_markdown_timings\", \"_pygments_timings\")\n\n Example:\n with timing_stat(\"_pygments_timings\"):\n result = expensive_operation()\n \"\"\"\n if not DEBUG_TIMING:\n yield\n return\n\n t_start = time.time()\n try:\n yield\n finally:\n duration = time.time() - t_start\n if list_name in _timing_data:\n msg_uuid = _timing_data.get(\"_current_msg_uuid\", \"\")\n _timing_data[list_name].append((duration, msg_uuid))\n\n\ndef report_timing_statistics(\n message_timings: List[Tuple[float, str, int, str]],\n markdown_timings: List[Tuple[float, str]],\n pygments_timings: List[Tuple[float, str]],\n) -> None:\n \"\"\"Report timing statistics for message rendering.\n\n Args:\n message_timings: List of (duration, message_type, index, uuid) tuples\n markdown_timings: List of (duration, uuid) tuples for markdown rendering\n pygments_timings: List of (duration, uuid) tuples for Pygments highlighting\n \"\"\"\n if not message_timings:\n return\n\n # Sort by duration descending\n sorted_timings = sorted(message_timings, key=lambda x: x[0], reverse=True)\n\n # Calculate statistics\n total_msg_time = sum(t[0] for t in message_timings)\n avg_time = total_msg_time / len(message_timings)\n\n # Report slowest messages\n print(\"\\n[TIMING] Loop statistics:\", flush=True)\n print(f\"[TIMING] Total messages: {len(message_timings)}\", flush=True)\n print(f\"[TIMING] Average time per message: {avg_time * 1000:.1f}ms\", flush=True)\n print(\"[TIMING] Slowest 10 messages:\", flush=True)\n for duration, msg_type, idx, uuid in sorted_timings[:10]:\n print(\n f\"[TIMING] Message {uuid} (#{idx}, {msg_type}): {duration * 1000:.1f}ms\",\n flush=True,\n )\n\n # Report markdown rendering statistics\n if markdown_timings:\n sorted_markdown = sorted(markdown_timings, key=lambda x: x[0], reverse=True)\n total_markdown_time = sum(t[0] for t in markdown_timings)\n print(f\"\\n[TIMING] Markdown rendering:\", flush=True)\n print(f\"[TIMING] Total operations: {len(markdown_timings)}\", flush=True)\n print(f\"[TIMING] Total time: {total_markdown_time:.3f}s\", flush=True)\n print(f\"[TIMING] Slowest 10 operations:\", flush=True)\n for duration, uuid in sorted_markdown[:10]:\n print(\n f\"[TIMING] {uuid}: {duration * 1000:.1f}ms\",\n flush=True,\n )\n\n # Report Pygments highlighting statistics\n if pygments_timings:\n sorted_pygments = sorted(pygments_timings, key=lambda x: x[0], reverse=True)\n total_pygments_time = sum(t[0] for t in pygments_timings)\n print(f\"\\n[TIMING] Pygments highlighting:\", flush=True)\n print(f\"[TIMING] Total operations: {len(pygments_timings)}\", flush=True)\n print(f\"[TIMING] Total time: {total_pygments_time:.3f}s\", flush=True)\n print(f\"[TIMING] Slowest 10 operations:\", flush=True)\n for duration, uuid in sorted_pygments[:10]:\n print(\n f\"[TIMING] {uuid}: {duration * 1000:.1f}ms\",\n flush=True,\n )\n","structuredPatch":[{"oldStart":20,"oldLines":23,"newStart":20,"newLines":13,"lines":[" \"\"\"Set a timing variable in the global timing data dict."," "," Args:","- name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\")","+ name: Variable name (e.g., \"_markdown_timings\", \"_pygments_timings\", \"_current_msg_uuid\")"," value: Value to set"," \"\"\""," if DEBUG_TIMING:"," _timing_data[name] = value"," "," ","-def set_current_msg_uuid(uuid: str) -> None:","- \"\"\"Set the current message UUID for timing tracking.","-","- Args:","- uuid: Message UUID","- \"\"\"","- if DEBUG_TIMING:","- _timing_data[\"_current_msg_uuid\"] = uuid","-","-"," @contextmanager"," def timing_stat(list_name: str) -> Iterator[None]:"," \"\"\"Context manager for tracking timing statistics."]}],"userModified":false,"replaceAll":false}}
\ No newline at end of file
diff --git a/test/test_data/sidechain_agent.jsonl b/test/test_data/sidechain_agent.jsonl
new file mode 100644
index 00000000..3255f484
--- /dev/null
+++ b/test/test_data/sidechain_agent.jsonl
@@ -0,0 +1,3 @@
+{"parentUuid": null, "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_017zj75dqWUNqVxCUrhqbTkf", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "I'll research how `data-border-color` is used thro..."}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 19830, "cache_read_input_tokens": 0, "cache_creation": {"ephemeral_5m_input_tokens": 19830, "ephemeral_1h_input_tokens": 0}, "output_tokens": 183, "service_tier": "standard"}}, "requestId": "req_011CVJ51ytkcBEjrQhRNswqR", "type": "assistant", "uuid": "b674eedd-0d9d-49e7-8116-1f092cd750ed", "timestamp": "2025-11-19T22:53:39.112Z"}
+{"parentUuid": "b674eedd-0d9d-49e7-8116-1f092cd750ed", "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_017zj75dqWUNqVxCUrhqbTkf", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01BWgwmyj8gGuqFNHMPqqtN2", "name": "Grep", "input": {"pattern": "data-border-color", "output_mode": "files_with_matches"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 19830, "cache_read_input_tokens": 0, "cache_creation": {"ephemeral_5m_input_tokens": 19830, "ephemeral_1h_input_tokens": 0}, "output_tokens": 183, "service_tier": "standard"}}, "requestId": "req_011CVJ51ytkcBEjrQhRNswqR", "type": "assistant", "uuid": "3c78114e-3de3-471e-b80f-0c86adb361c4", "timestamp": "2025-11-19T22:53:39.868Z"}
+{"parentUuid": "718670f5-3e6b-4b3e-9a70-bc3e5ac90f92", "isSidechain": true, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "agentId": "e1c84ba5", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_011V1hetiJ3pC79EoLTY53Ni", "type": "tool_result", "content": " 3130\u00e2\u2020\u2019 # Simplified: no \"Tool Re..."}]}, "uuid": "4899826e-499f-4f24-ae6e-f2ac31cddfd4", "timestamp": "2025-11-19T22:54:57.555Z", "toolUseResult": {"type": "text", "file": {"filePath": "e:\\Workspace\\src\\github\\claude-code-log\\claude_cod...", "content": " # Simplified: no \"Tool Result\" hea...", "numLines": 50, "startLine": 3130, "totalLines": 3379}}}
diff --git a/test/test_data/sidechain_main.jsonl b/test/test_data/sidechain_main.jsonl
new file mode 100644
index 00000000..d397bb7a
--- /dev/null
+++ b/test/test_data/sidechain_main.jsonl
@@ -0,0 +1,3 @@
+{"parentUuid": "8ae40e91-fa63-40a9-969d-70c0c1d6175e", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01H4wg8bFy2psdmQvJMU6kpD", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01JA2mLseQCAW3MdwPE4qWEk", "name": "Task", "input": {"subagent_type": "general-purpose", "description": "Research data-border-color usage and CSS class mat...", "prompt": "Research how data-border-color is used in the code..."}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 141272, "cache_read_input_tokens": 0, "cache_creation": {"ephemeral_5m_input_tokens": 141272, "ephemeral_1h_input_tokens": 0}, "output_tokens": 365, "service_tier": "standard"}}, "requestId": "req_011CVJ517iGrQqjVFtrWsUgP", "type": "assistant", "uuid": "83a7cabd-3642-45d6-ba73-9b9f843b9ab1", "timestamp": "2025-11-19T22:53:34.908Z"}
+{"parentUuid": "83a7cabd-3642-45d6-ba73-9b9f843b9ab1", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01JA2mLseQCAW3MdwPE4qWEk", "type": "tool_result", "content": [{"type": "text", "text": "Perfect! Now I have a complete understanding. Let ..."}]}]}, "uuid": "97965533-18f3-41b7-aa07-0c438f2a5893", "timestamp": "2025-11-19T22:55:34.463Z", "toolUseResult": {"status": "completed", "prompt": "Research how data-border-color is used in the code...", "agentId": "e1c84ba5", "content": [{"type": "text", "text": "Perfect! Now I have a complete understanding. Let ..."}], "totalDurationMs": 119249, "totalTokens": 59967, "totalToolUseCount": 12, "usage": {"input_tokens": 2, "cache_creation_input_tokens": 1001, "cache_read_input_tokens": 56513, "cache_creation": {"ephemeral_5m_input_tokens": 1001, "ephemeral_1h_input_tokens": 0}, "output_tokens": 2451, "service_tier": "standard"}}}
+{"parentUuid": "97965533-18f3-41b7-aa07-0c438f2a5893", "isSidechain": false, "userType": "external", "cwd": "e:\\Workspace\\src\\github\\claude-code-log", "sessionId": "88a8d761-7b9a-4bf1-a8ca-c1febe6bf358", "version": "2.0.46", "gitBranch": "dev/fold-ui-horizontal", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_011b5A5rZCXR7eBYR4VbohVZ", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "Excellent research! The sub-assistant has identifi..."}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 2952, "cache_read_input_tokens": 141272, "cache_creation": {"ephemeral_5m_input_tokens": 2952, "ephemeral_1h_input_tokens": 0}, "output_tokens": 190, "service_tier": "standard"}}, "requestId": "req_011CVJ5Au8m6JgC6Ly7SbVPv", "type": "assistant", "uuid": "91c82c1c-29b0-4149-a261-e5582d944ac6", "timestamp": "2025-11-19T22:55:40.790Z"}
diff --git a/test/test_ide_tags.py b/test/test_ide_tags.py
index 932a53e2..723e4885 100644
--- a/test/test_ide_tags.py
+++ b/test/test_ide_tags.py
@@ -129,7 +129,9 @@ def test_render_user_message_with_multi_item_content():
image_item,
]
- content_html, is_compacted = render_user_message_content(content_list)
+ content_html, is_compacted, is_memory_input = render_user_message_content(
+ content_list
+ )
# Should extract IDE notification
assert "π€" in content_html
@@ -145,6 +147,8 @@ def test_render_user_message_with_multi_item_content():
# Should not be compacted
assert is_compacted is False
+ # Should not be memory input
+ assert is_memory_input is False
def test_render_message_content_single_text_item():
diff --git a/test/test_preview_truncation.py b/test/test_preview_truncation.py
new file mode 100644
index 00000000..0ccb45c4
--- /dev/null
+++ b/test/test_preview_truncation.py
@@ -0,0 +1,129 @@
+"""Tests for code preview truncation in tool results.
+
+Regression test for the bug where Pygments highlighted code previews
+weren't truncated because the code assumed multiple rows per line,
+but HtmlFormatter(linenos="table") produces a single with two s.
+"""
+
+from pathlib import Path
+
+from claude_code_log.parser import load_transcript
+from claude_code_log.renderer import generate_html, _truncate_highlighted_preview
+
+
+class TestPreviewTruncation:
+ """Tests for preview truncation in collapsible code blocks."""
+
+ def test_truncate_highlighted_preview_function(self):
+ """Test the _truncate_highlighted_preview helper directly."""
+ # Simulate Pygments output with 10 lines
+ html = """ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
line 1
+line 2
+line 3
+line 4
+line 5
+line 6
+line 7
+line 8
+line 9
+line 10
+
"""
+
+ # Truncate to 5 lines
+ result = _truncate_highlighted_preview(html, 5)
+
+ # Should have lines 1-5 in linenos
+ assert ' 1' in result
+ assert ' 5' in result
+ # Should NOT have lines 6-10
+ assert ' 6' not in result
+ assert '10' not in result
+
+ # Should have lines 1-5 in code
+ assert "line 1" in result
+ assert "line 5" in result
+ # Should NOT have lines 6-10
+ assert "line 6" not in result
+ assert "line 10" not in result
+
+ def test_edit_tool_result_preview_truncation(self):
+ """Test that Edit tool results have truncated previews in collapsible blocks.
+
+ Regression test for: Preview extraction was looking for multiple tags,
+ but Pygments produces a single with two s, so the fallback showed
+ full content instead of truncated preview.
+ """
+ test_data_path = Path(__file__).parent / "test_data" / "edit_tool.jsonl"
+
+ messages = load_transcript(test_data_path)
+ html = generate_html(messages, "Edit Tool Test")
+
+ # The Edit tool result has 17 lines (>12), so should be collapsible
+ assert "collapsible-code" in html, "Should have collapsible code block"
+
+ # Find the preview content section
+ assert "preview-content" in html, "Should have preview content"
+
+ # The preview should only show first 5 lines, not all 17
+ # Line 15 is "_timing_data: Dict[str, Any] = {}" - should be in preview
+ # Line 31 is 'def timing_stat(list_name: str) -> Iterator[None]:"' - should NOT be in preview
+
+ # Extract preview content (between preview-content div tags)
+ import re
+
+ preview_match = re.search(
+ r"(.*?)\s*",
+ html,
+ re.DOTALL,
+ )
+ assert preview_match, "Should find preview-content div"
+ preview_html = preview_match.group(1)
+
+ # Preview should have early lines (within first 5)
+ # Line 15 (line 1 of snippet): "_timing_data"
+ assert "_timing_data" in preview_html, "Preview should contain line 15 content"
+
+ # Preview should NOT have later lines (beyond first 5)
+ # Line 26 (line 12 of snippet): "if DEBUG_TIMING:"
+ # Note: Pygments wraps tokens in tags, so check for identifier
+ assert "DEBUG_TIMING" not in preview_html, (
+ "Preview should NOT contain line 26 content (beyond 5 lines)"
+ )
+
+ # Line 30-31 (line 16-17 of snippet): "@contextmanager"
+ assert "contextmanager" not in preview_html, (
+ "Preview should NOT contain line 30 content (beyond 5 lines)"
+ )
+
+ def test_full_content_still_available(self):
+ """Test that full content is still available in the expanded section."""
+ test_data_path = Path(__file__).parent / "test_data" / "edit_tool.jsonl"
+
+ messages = load_transcript(test_data_path)
+ html = generate_html(messages, "Edit Tool Test")
+
+ # The full content section should have all lines
+ import re
+
+ full_match = re.search(
+ r"(.*?)\s*",
+ html,
+ re.DOTALL,
+ )
+ assert full_match, "Should find code-full div"
+ full_html = full_match.group(1)
+
+ # Full content should have both early and late lines
+ # Note: Pygments wraps tokens in tags, so we check for the identifier
+ assert "_timing_data" in full_html, "Full content should contain line 15"
+ assert "DEBUG_TIMING" in full_html, "Full content should contain line 26"
+ assert "contextmanager" in full_html, "Full content should contain line 30"
diff --git a/test/test_query_params_browser.py b/test/test_query_params_browser.py
index aa181e63..d3bb13aa 100644
--- a/test/test_query_params_browser.py
+++ b/test/test_query_params_browser.py
@@ -86,33 +86,36 @@ def test_filter_query_param_sets_active_toggles(self, page: Page):
@pytest.mark.browser
def test_filter_query_param_filters_messages(self, page: Page):
- """Test that filter query parameter actually hides/shows messages."""
+ """Test that filter query parameter actually hides/shows messages.
+
+ Note: sidechain.jsonl only has sidechain user messages which are now skipped
+ (they duplicate Task tool input). Test with sidechain+assistant filters instead.
+ """
sidechain_file = Path("test/test_data/sidechain.jsonl")
messages = load_transcript(sidechain_file)
temp_file = self._create_temp_html(messages, "Query Param Filtering Test")
- # Load page with user and sidechain filters active
- # (sidechain messages require both sidechain AND their type filter)
- page.goto(f"file://{temp_file}?filter=user,sidechain")
+ # Load page with sidechain and assistant filters active
+ page.goto(f"file://{temp_file}?filter=sidechain,assistant")
# Wait for page to load and filters to apply
page.wait_for_load_state("networkidle")
# Wait for filter application by checking for filtered-hidden class to be applied
page.wait_for_selector(
- ".message.assistant.filtered-hidden", state="attached", timeout=5000
+ ".message.tool_use.filtered-hidden", state="attached", timeout=5000
)
- # Only user messages should be visible
- visible_user_messages = page.locator(".message.user:not(.filtered-hidden)")
- user_count = visible_user_messages.count()
- assert user_count > 0, "User messages should be visible"
-
- # Assistant messages should be hidden
- visible_assistant_messages = page.locator(
- ".message.assistant:not(.filtered-hidden)"
+ # Sidechain assistant messages should be visible
+ visible_sidechain_messages = page.locator(
+ ".message.sidechain.assistant:not(.filtered-hidden)"
)
- assistant_count = visible_assistant_messages.count()
- assert assistant_count == 0, "Assistant messages should be hidden"
+ sidechain_count = visible_sidechain_messages.count()
+ assert sidechain_count > 0, "Sidechain assistant messages should be visible"
+
+ # Tool use messages should be hidden (not in filter)
+ visible_tool_messages = page.locator(".message.tool_use:not(.filtered-hidden)")
+ tool_count = visible_tool_messages.count()
+ assert tool_count == 0, "Tool use messages should be hidden"
@pytest.mark.browser
def test_no_query_params_toolbar_hidden(self, page: Page):
diff --git a/test/test_sidechain_agents.py b/test/test_sidechain_agents.py
new file mode 100644
index 00000000..f9b665bc
--- /dev/null
+++ b/test/test_sidechain_agents.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+"""Tests for sidechain agent insertion and deduplication functionality."""
+
+import tempfile
+from pathlib import Path
+
+
+from claude_code_log.parser import load_transcript
+from claude_code_log.renderer import generate_html
+
+
+def test_agent_insertion():
+ """Test that agent messages are inserted after their referencing tool result."""
+ # Create test data files
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir_path = Path(tmpdir)
+
+ # Write main transcript
+ main_file = tmpdir_path / "main.jsonl"
+ main_file.write_text(
+ (Path(__file__).parent / "test_data" / "sidechain_main.jsonl").read_text()
+ )
+
+ # Write agent transcript (must match agentId in main file)
+ agent_file = tmpdir_path / "agent-e1c84ba5.jsonl"
+ agent_file.write_text(
+ (Path(__file__).parent / "test_data" / "sidechain_agent.jsonl").read_text()
+ )
+
+ # Load transcript with agent insertion (agent files discovered automatically)
+ messages = load_transcript(main_file)
+
+ # Verify agent messages were inserted (3 main + 3 agent = 6 total)
+ assert len(messages) == 6, f"Expected 6 messages, got {len(messages)}"
+
+ # Find the tool result message (uuid 97965533-18f3-41b7-aa07-0c438f2a5893)
+ tool_result_idx = next(
+ i
+ for i, msg in enumerate(messages)
+ if msg.uuid == "97965533-18f3-41b7-aa07-0c438f2a5893"
+ )
+
+ # Verify agent messages come right after tool result
+ # The agent messages should be inserted between tool_result and next main message
+ assert tool_result_idx + 3 < len(messages), "Agent messages should be inserted"
+ # Verify at least one message after tool result is from sidechain
+ assert any(
+ getattr(messages[i], "isSidechain", False)
+ for i in range(tool_result_idx + 1, len(messages))
+ )
+
+
+def test_deduplication_task_result_vs_sidechain():
+ """Test that sidechain assistant final message is deduplicated when it matches Task result."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir_path = Path(tmpdir)
+
+ # Write deduplication test data
+ main_file = tmpdir_path / "main.jsonl"
+ main_file.write_text(
+ (Path(__file__).parent / "test_data" / "dedup_main.jsonl").read_text()
+ )
+
+ agent_file = tmpdir_path / "agent-e1c84ba5.jsonl"
+ agent_file.write_text(
+ (Path(__file__).parent / "test_data" / "dedup_agent.jsonl").read_text()
+ )
+
+ # Load and render (agent files discovered automatically)
+ messages = load_transcript(main_file)
+ html = generate_html(messages, title="Test")
+
+ # Verify deduplication occurred:
+ # The sidechain assistant's final message should be replaced with a forward link
+ assert "(Task summary" in html
+ assert "already displayed in" in html
+ assert "Task tool result above" in html
+
+ # The actual content "I created the test file successfully" should only appear once
+ # in the Task result, not in the sidechain assistant
+ content_count = html.count("I created the test file successfully")
+ assert content_count == 1, (
+ f"Expected content to appear once, found {content_count} times"
+ )
+
+
+def test_no_deduplication_when_content_different():
+ """Test that deduplication doesn't occur when Task result and sidechain differ."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir_path = Path(tmpdir)
+
+ # Create test data with different content
+ # Note: agentId is just "ghi789", the filename is "agent-ghi789.jsonl"
+ main_file = tmpdir_path / "main.jsonl"
+ main_file.write_text(
+ '{"parentUuid":null,"isSidechain":false,"userType":"external","cwd":"e:\\\\test","sessionId":"test-3","version":"2.0.46","gitBranch":"main","type":"user","message":{"role":"user","content":[{"type":"text","text":"Do something"}]},"uuid":"d-0","timestamp":"2025-01-15T12:00:00.000Z"}\n'
+ '{"parentUuid":"d-0","isSidechain":false,"userType":"external","cwd":"e:\\\\test","sessionId":"test-3","version":"2.0.46","gitBranch":"main","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01test1","type":"message","role":"assistant","content":[{"type":"tool_use","id":"task-3","name":"Task","input":{"prompt":"Do it"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":10,"output_tokens":20}},"requestId":"req_01test1","type":"assistant","uuid":"d-1","timestamp":"2025-01-15T12:00:05.000Z"}\n'
+ '{"parentUuid":"d-1","isSidechain":false,"userType":"external","cwd":"e:\\\\test","sessionId":"test-3","version":"2.0.46","gitBranch":"main","type":"user","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"task-3","content":"Done A"}]},"uuid":"d-2","timestamp":"2025-01-15T12:00:15.000Z","toolUseResult":{"agentId":"ghi789","content":"Done A"},"agentId":"ghi789"}\n'
+ )
+
+ agent_file = tmpdir_path / "agent-ghi789.jsonl"
+ agent_file.write_text(
+ '{"parentUuid":null,"isSidechain":true,"userType":"external","cwd":"e:\\\\test","sessionId":"test-3","version":"2.0.46","gitBranch":"main","agentId":"ghi789","type":"user","message":{"role":"user","content":[{"type":"text","text":"Do it"}]},"uuid":"agent-d-0","timestamp":"2025-01-15T12:00:06.000Z"}\n'
+ '{"parentUuid":"agent-d-0","isSidechain":true,"userType":"external","cwd":"e:\\\\test","sessionId":"test-3","version":"2.0.46","gitBranch":"main","agentId":"ghi789","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01testagent1","type":"message","role":"assistant","content":[{"type":"text","text":"Done B"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":5,"output_tokens":10}},"requestId":"req_01testagent1","type":"assistant","uuid":"agent-d-1","timestamp":"2025-01-15T12:00:14.000Z"}\n'
+ )
+
+ messages = load_transcript(main_file)
+ html = generate_html(messages, title="Test")
+
+ # No deduplication should occur - both "Done A" and "Done B" should appear
+ assert "Done A" in html
+ assert "Done B" in html
+ assert "(Task summary" not in html
+
+
+def test_agent_messages_marked_as_sidechain():
+ """Test that agent messages are properly marked with sidechain class."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir_path = Path(tmpdir)
+
+ main_file = tmpdir_path / "main.jsonl"
+ main_file.write_text(
+ (Path(__file__).parent / "test_data" / "sidechain_main.jsonl").read_text()
+ )
+
+ agent_file = tmpdir_path / "agent-e1c84ba5.jsonl"
+ agent_file.write_text(
+ (Path(__file__).parent / "test_data" / "sidechain_agent.jsonl").read_text()
+ )
+
+ messages = load_transcript(main_file)
+ html = generate_html(messages, title="Test")
+
+ # Agent messages should have sidechain class
+ assert (
+ "class='message assistant sidechain" in html
+ or "class='message user sidechain" in html
+ )
+
+ # Verify sidechain messages are indented (have ancestry classes)
+ assert (
+ "d-2" in html
+ ) # Tool result ID should appear in ancestry for sidechain messages
+
+
+def test_multiple_agent_invocations():
+ """Test handling of multiple Task invocations in same session."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tmpdir_path = Path(tmpdir)
+
+ # Create scenario with two Task invocations
+ main_file = tmpdir_path / "main.jsonl"
+ main_file.write_text(
+ '{"parentUuid":null,"isSidechain":false,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","type":"user","message":{"role":"user","content":[{"type":"text","text":"Do two things"}]},"uuid":"d-0","timestamp":"2025-01-15T13:00:00.000Z"}\n'
+ '{"parentUuid":"d-0","isSidechain":false,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_01","type":"message","role":"assistant","content":[{"type":"tool_use","id":"task-4a","name":"Task","input":{"prompt":"First task"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":100,"output_tokens":50}},"requestId":"req_01","type":"assistant","uuid":"d-1","timestamp":"2025-01-15T13:00:05.000Z"}\n'
+ '{"parentUuid":"d-1","isSidechain":false,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","type":"user","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"task-4a","content":"First done"}]},"uuid":"d-2","timestamp":"2025-01-15T13:00:15.000Z","toolUseResult":{"status":"completed","agentId":"first","content":[{"type":"text","text":"First done"}]},"agentId":"first"}\n'
+ '{"parentUuid":"d-2","isSidechain":false,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_02","type":"message","role":"assistant","content":[{"type":"tool_use","id":"task-4b","name":"Task","input":{"prompt":"Second task"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":150,"output_tokens":60}},"requestId":"req_02","type":"assistant","uuid":"d-3","timestamp":"2025-01-15T13:00:20.000Z"}\n'
+ '{"parentUuid":"d-3","isSidechain":false,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","type":"user","message":{"role":"user","content":[{"type":"tool_result","tool_use_id":"task-4b","content":"Second done"}]},"uuid":"d-4","timestamp":"2025-01-15T13:00:30.000Z","toolUseResult":{"status":"completed","agentId":"second","content":[{"type":"text","text":"Second done"}]},"agentId":"second"}\n'
+ )
+
+ (tmpdir_path / "agent-first.jsonl").write_text(
+ '{"parentUuid":null,"isSidechain":true,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","agentId":"first","type":"user","message":{"role":"user","content":[{"type":"text","text":"First task"}]},"uuid":"agent-d-0","timestamp":"2025-01-15T13:00:06.000Z"}\n'
+ '{"parentUuid":"agent-d-0","isSidechain":true,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","agentId":"first","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_agent_01","type":"message","role":"assistant","content":[{"type":"text","text":"First done"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":50,"output_tokens":25}},"requestId":"req_agent_01","type":"assistant","uuid":"agent-d-1","timestamp":"2025-01-15T13:00:14.000Z"}\n'
+ )
+
+ (tmpdir_path / "agent-second.jsonl").write_text(
+ '{"parentUuid":null,"isSidechain":true,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","agentId":"second","type":"user","message":{"role":"user","content":[{"type":"text","text":"Second task"}]},"uuid":"agent2-d-0","timestamp":"2025-01-15T13:00:21.000Z"}\n'
+ '{"parentUuid":"agent2-d-0","isSidechain":true,"userType":"external","cwd":"/workspace/test","sessionId":"test-4","version":"2.0.46","gitBranch":"main","agentId":"second","message":{"model":"claude-sonnet-4-5-20250929","id":"msg_agent_02","type":"message","role":"assistant","content":[{"type":"text","text":"Second done"}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":55,"output_tokens":30}},"requestId":"req_agent_02","type":"assistant","uuid":"agent2-d-1","timestamp":"2025-01-15T13:00:29.000Z"}\n'
+ )
+
+ messages = load_transcript(main_file)
+
+ # Should have 5 main + 2 + 2 agent messages = 9 total
+ assert len(messages) == 9
+
+ # Verify both agent sequences are inserted correctly
+ html = generate_html(messages, title="Test")
+ assert "First done" in html
+ assert "Second done" in html
diff --git a/test/test_template_rendering.py b/test/test_template_rendering.py
index 3e0803a0..0a10afbd 100644
--- a/test/test_template_rendering.py
+++ b/test/test_template_rendering.py
@@ -40,8 +40,8 @@ def test_representative_messages_render(self):
)
# Check that all message types are present
- assert "class='message user'" in html_content
- assert "class='message assistant'" in html_content
+ assert "class='message user" in html_content
+ assert "class='message assistant" in html_content
# Summary messages are now integrated into session headers
assert "session-summary" in html_content or "Summary:" in html_content
@@ -249,8 +249,8 @@ def test_css_classes_applied(self):
html_content = html_file.read_text(encoding="utf-8")
# Check message type classes
- assert "class='message user'" in html_content
- assert "class='message assistant'" in html_content
+ assert "class='message user" in html_content
+ assert "class='message assistant" in html_content
# Summary messages are now integrated into session headers
assert "session-summary" in html_content or "Summary:" in html_content
diff --git a/test/test_timeline_browser.py b/test/test_timeline_browser.py
index 4063b173..2321e45c 100644
--- a/test/test_timeline_browser.py
+++ b/test/test_timeline_browser.py
@@ -38,16 +38,23 @@ def _create_temp_html(self, messages: List[TranscriptEntry], title: str) -> Path
return temp_file
- def _wait_for_timeline_loaded(self, page: Page):
- """Wait for timeline to be fully loaded and initialized."""
+ def _wait_for_timeline_loaded(self, page: Page, expect_items: bool = True):
+ """Wait for timeline to be fully loaded and initialized.
+
+ Args:
+ page: The Playwright page object
+ expect_items: Whether to wait for timeline items (default True).
+ Set to False when filters might hide all messages.
+ """
# Wait for timeline container to be visible
page.wait_for_selector("#timeline-container", state="attached")
# Wait for vis-timeline to create its DOM elements
page.wait_for_selector(".vis-timeline", timeout=10000)
- # Wait for timeline items to be rendered
- page.wait_for_selector(".vis-item", timeout=5000)
+ # Wait for timeline items to be rendered (if expected)
+ if expect_items:
+ page.wait_for_selector(".vis-item", timeout=5000)
@pytest.mark.browser
def test_timeline_toggle_button_exists(self, page: Page):
@@ -287,18 +294,23 @@ def test_sidechain_message_filtering_integration(self, page: Page):
@pytest.mark.browser
def test_sidechain_messages_html_css_classes(self, page: Page):
- """Test that sidechain messages in the main content have correct CSS classes."""
+ """Test that sidechain messages in the main content have correct CSS classes.
+
+ Note: User sidechain messages (Sub-assistant prompts) are now skipped
+ since they duplicate the Task tool input prompt.
+ """
sidechain_file = Path("test/test_data/sidechain.jsonl")
messages = load_transcript(sidechain_file)
temp_file = self._create_temp_html(messages, "Sidechain CSS Classes Test")
page.goto(f"file://{temp_file}")
- # Check for sub-assistant user messages in main content
+ # User sidechain messages should no longer be produced
+ # (they duplicate the Task tool input prompt)
user_sidechain_messages = page.locator(".message.user.sidechain")
user_count = user_sidechain_messages.count()
- assert user_count > 0, (
- "Should have user sidechain messages with 'user sidechain' classes"
+ assert user_count == 0, (
+ "User sidechain messages should no longer be produced (duplicates Task tool input)"
)
# Check for sub-assistant assistant messages in main content
@@ -308,19 +320,6 @@ def test_sidechain_messages_html_css_classes(self, page: Page):
"Should have assistant sidechain messages with 'assistant sidechain' classes"
)
- # Verify that we found the expected sidechain message types
- assert user_count > 0 and assistant_count > 0, (
- f"Should have both user ({user_count}) and assistant ({assistant_count}) sidechain messages"
- )
-
- # Check that the specific failing test message has the right classes
- failing_test_message = page.locator(
- '.message.user.sidechain:has-text("failing test")'
- )
- assert failing_test_message.count() > 0, (
- "Sub-assistant prompt about failing test should have 'user sidechain' classes"
- )
-
@pytest.mark.browser
def test_sidechain_filter_complete_integration(self, page: Page):
"""Test complete integration of sidechain filtering between main content and timeline."""
@@ -611,18 +610,21 @@ def test_timeline_filter_synchronization(self, page: Page):
# Open filter panel
page.locator("#filterMessages").click()
+ filter_toolbar = page.locator(".filter-toolbar")
+ expect(filter_toolbar).to_be_visible()
# Test multiple filter combinations
+ # Note: Filter buttons with 0 count are hidden, so we skip them
test_cases = [
- ("user", '.filter-toggle[data-type="user"]'),
("assistant", '.filter-toggle[data-type="assistant"]'),
("sidechain", '.filter-toggle[data-type="sidechain"]'),
]
for filter_type, selector in test_cases:
- if page.locator(selector).count() > 0:
+ filter_toggle = page.locator(selector)
+ if filter_toggle.count() > 0 and filter_toggle.is_visible():
# Deselect the filter
- page.locator(selector).click()
+ filter_toggle.click()
page.wait_for_timeout(100) # Allow filters to apply
# Check that main messages are filtered
@@ -637,7 +639,7 @@ def test_timeline_filter_synchronization(self, page: Page):
# because timeline groups messages differently)
# Re-enable the filter
- page.locator(selector).click()
+ filter_toggle.click()
page.wait_for_timeout(100)
# Check that messages are visible again
@@ -777,18 +779,21 @@ def test_timeline_filter_edge_cases(self, page: Page):
# Test rapid filter toggling
page.locator("#filterMessages").click()
+ filter_toolbar = page.locator(".filter-toolbar")
+ expect(filter_toolbar).to_be_visible()
- user_filter = page.locator('.filter-toggle[data-type="user"]')
+ # Note: Filter buttons with 0 count are hidden (e.g., user filter when sidechain.jsonl has no regular users)
+ sidechain_filter = page.locator('.filter-toggle[data-type="sidechain"]')
assistant_filter = page.locator('.filter-toggle[data-type="assistant"]')
- if user_filter.count() > 0 and assistant_filter.count() > 0:
+ if sidechain_filter.is_visible() and assistant_filter.is_visible():
# Rapidly toggle filters
for _ in range(3):
- user_filter.click()
+ sidechain_filter.click()
page.wait_for_timeout(50)
assistant_filter.click()
page.wait_for_timeout(50)
- user_filter.click()
+ sidechain_filter.click()
page.wait_for_timeout(50)
assistant_filter.click()
page.wait_for_timeout(50)
@@ -803,13 +808,13 @@ def test_timeline_filter_edge_cases(self, page: Page):
page.wait_for_timeout(200)
# Change filters while timeline is hidden
- if user_filter.count() > 0:
- user_filter.click()
+ if sidechain_filter.is_visible():
+ sidechain_filter.click()
page.wait_for_timeout(100)
- # Show timeline again
+ # Show timeline again (may have no items if filters hide all messages)
page.locator("#toggleTimeline").click()
- self._wait_for_timeline_loaded(page)
+ self._wait_for_timeline_loaded(page, expect_items=False)
# Timeline should reflect current filter state
timeline_items = page.locator(".vis-item")
@@ -835,22 +840,25 @@ def test_timeline_filter_performance(self, page: Page):
# Open filter panel
page.locator("#filterMessages").click()
+ filter_toolbar = page.locator(".filter-toolbar")
+ expect(filter_toolbar).to_be_visible()
# Perform multiple filter operations in sequence
start_time = page.evaluate("() => performance.now()")
# Test sequence of filter operations
+ # Note: Filter buttons with 0 count are hidden, so check visibility
operations = [
"#selectNone",
"#selectAll",
- '.filter-toggle[data-type="user"]',
'.filter-toggle[data-type="assistant"]',
'.filter-toggle[data-type="sidechain"]',
]
for operation in operations:
- if page.locator(operation).count() > 0:
- page.locator(operation).click()
+ locator = page.locator(operation)
+ if locator.count() > 0 and locator.is_visible():
+ locator.click()
page.wait_for_timeout(100)
end_time = page.evaluate("() => performance.now()")
@@ -965,55 +973,62 @@ def test_timeline_synchronizes_with_message_filtering(self, page: Page):
# Open filter panel and turn off user messages
page.locator("#filterMessages").click()
- user_filter = page.locator('.filter-toggle[data-type="user"]')
- if user_filter.count() > 0:
- user_filter.click() # Turn off user messages
+ filter_toolbar = page.locator(".filter-toolbar")
+ expect(filter_toolbar).to_be_visible()
+
+ # Note: User filter may be hidden if sidechain.jsonl has no regular user messages
+ # Use sidechain filter instead for this test
+ sidechain_filter = page.locator('.filter-toggle[data-type="sidechain"]')
+ if sidechain_filter.is_visible():
+ sidechain_filter.click() # Turn off sidechain messages
page.wait_for_timeout(100)
- # Check that user messages are hidden in main content
- visible_user_messages = page.locator(
- ".message.user:not(.filtered-hidden)"
+ # Check that sidechain messages are hidden in main content
+ visible_sidechain_messages = page.locator(
+ ".message.sidechain:not(.filtered-hidden)"
).count()
- assert visible_user_messages == 0, (
- "User messages should be hidden by main filter"
+ assert visible_sidechain_messages == 0, (
+ "Sidechain messages should be hidden by main filter"
)
- # Now activate timeline
+ # Now activate timeline (may have no items if filters hide all messages)
page.locator("#toggleTimeline").click()
- self._wait_for_timeline_loaded(page)
+ self._wait_for_timeline_loaded(page, expect_items=False)
- # Timeline should NOT contain user messages since they're filtered out
+ # Timeline should NOT contain sidechain messages since they're filtered out
# This is the core issue - timeline might be building from all messages, not just visible ones
- # Check timeline items - if the bug exists, we'll see user messages in timeline
+ # Check timeline items - if the bug exists, we'll see sidechain messages in timeline
# even though they're filtered out in main view
timeline_items = page.locator(".vis-item")
timeline_count = timeline_items.count()
- # Let's check if any timeline items contain user content that should be filtered
+ # Let's check if any timeline items contain sidechain content that should be filtered
# This is tricky because we need to check the timeline's internal representation
# For now, let's just verify that timeline filtering matches main filtering
- # by checking if timeline shows fewer items when user filter is off
+ # by checking if timeline shows fewer items when sidechain filter is off
- # Turn user filter back on
- user_filter.click()
+ # Turn sidechain filter back on
+ sidechain_filter.click()
page.wait_for_timeout(100)
- # Timeline should now show more items (or same if no user messages were in timeline)
- timeline_items_with_user = page.locator(".vis-item")
- timeline_count_with_user = timeline_items_with_user.count()
+ # Timeline should now show more items (or same if no sidechain messages were in timeline)
+ timeline_items_with_sidechain = page.locator(".vis-item")
+ timeline_count_with_sidechain = timeline_items_with_sidechain.count()
- # The counts should be different if user messages are properly filtered
+ # The counts should be different if sidechain messages are properly filtered
# But this test documents the expected behavior even if it's currently broken
- print(f"Timeline items without user filter: {timeline_count}")
- print(f"Timeline items with user filter: {timeline_count_with_user}")
+ print(f"Timeline items without sidechain filter: {timeline_count}")
+ print(
+ f"Timeline items with sidechain filter: {timeline_count_with_sidechain}"
+ )
# The assertion here documents what SHOULD happen
- # If timeline filtering works correctly, timeline_count_with_user should be >= timeline_count
- # because enabling user filter should show same or more items
- assert timeline_count_with_user >= timeline_count, (
- "Timeline should show same or more items when user filter is enabled"
+ # If timeline filtering works correctly, timeline_count_with_sidechain should be >= timeline_count
+ # because enabling sidechain filter should show same or more items
+ assert timeline_count_with_sidechain >= timeline_count, (
+ "Timeline should show same or more items when sidechain filter is enabled"
)
@pytest.mark.browser
diff --git a/test/test_todowrite_rendering.py b/test/test_todowrite_rendering.py
index 933a7f5f..80942ab7 100644
--- a/test/test_todowrite_rendering.py
+++ b/test/test_todowrite_rendering.py
@@ -211,8 +211,9 @@ def test_todowrite_integration_with_full_message(self):
assert "Write tests" in html_content
assert "π" in html_content # in_progress emoji
assert "β³" in html_content # pending emoji
+ # Check tool_use class is present (may have ancestor IDs appended)
assert (
- "class='message tool_use'" in html_content
+ "class='message tool_use" in html_content
) # tool as top-level message
# Check CSS classes are applied