diff --git a/PLAN_PHASE12.md b/PLAN_PHASE12.md new file mode 100644 index 00000000..1dd2836d --- /dev/null +++ b/PLAN_PHASE12.md @@ -0,0 +1,345 @@ +# Phase 12: Format-Neutral Decomposition Plan + +## Overview + +This plan separates format-neutral logic from HTML-specific generation in renderer.py. The goal is to: +1. Create a `TemplateMessage` that stores logical attributes instead of CSS classes +2. Move HTML-specific rendering to a new `html_renderer.py` module +3. Keep format-neutral processing in `renderer.py` (to be renamed later) + +## Key Design Decisions + +### 1. Replace `css_class` with Typed Attributes + +Instead of encoding traits as space-separated CSS classes (e.g., `"user sidechain slash-command"`), we'll use explicit fields: + +```python +# In models.py - add MessageModifiers dataclass +@dataclass +class MessageModifiers: + """Semantic modifiers for message rendering.""" + is_sidechain: bool = False + is_slash_command: bool = False + is_command_output: bool = False + is_compacted: bool = False + is_error: bool = False + is_steering: bool = False + system_level: Optional[str] = None # "info", "warning", "error", "hook" +``` + +The `TemplateMessage` will have: +- `type: MessageType` (already have the enum) +- `modifiers: MessageModifiers` (new) +- Remove `css_class` field + +### 2. HTML Renderer Module (`html_renderer.py`) + +New module containing HTML-specific functions: + +```python +# html_renderer.py + +def css_class_from_message(msg: TemplateMessage) -> str: + """Generate CSS class string from message type and modifiers.""" + parts = [msg.type.value] + if msg.modifiers.is_sidechain: + parts.append("sidechain") + if msg.modifiers.is_slash_command: + parts.append("slash-command") + if msg.modifiers.is_command_output: + parts.append("command-output") + if msg.modifiers.is_compacted: + parts.append("compacted") + if msg.modifiers.is_error: + parts.append("error") + if msg.modifiers.is_steering: + parts.append("steering") + if msg.modifiers.system_level: + parts.append(f"system-{msg.modifiers.system_level}") + return " ".join(parts) + +def get_message_emoji(msg: TemplateMessage) -> str: + """Return emoji for message type.""" + # Move emoji logic from template to here + +def render_content_html(msg: TemplateMessage) -> str: + """Render message content to HTML.""" + # Delegates to format_* functions +``` + +### 3. Keep Format-Neutral Processing in renderer.py + +Functions that stay in renderer.py (format-neutral): +- `_process_messages_loop()` - but sets `modifiers` instead of `css_class` +- `_identify_message_pairs()` - pairing logic +- `_build_message_hierarchy()` - but uses `type` and `modifiers` instead of `css_class` +- `_reorder_paired_messages()` - reordering logic +- Deduplication logic +- Token aggregation + +### 4. Migration Strategy + +The migration will be done in phases to minimize disruption: + +**Phase 12a: Add MessageModifiers** +- Add `MessageModifiers` dataclass to `models.py` +- Add `modifiers` field to `TemplateMessage` +- Keep `css_class` field for backward compatibility + +**Phase 12b: Populate Modifiers** +- Update all TemplateMessage creation sites to set `modifiers` +- Replace `"x" in css_class` checks with `modifiers.is_x` + +**Phase 12c: Create html_renderer.py** +- Move `escape_html()`, `render_markdown()` to html_renderer.py +- Create `css_class_from_message()` function +- Move tool formatters to html_renderer.py + +**Phase 12d: Update Templates** +- Modify template to call `css_class_from_message(message)` +- Update emoji logic to use modifiers + +**Phase 12e: Remove css_class** +- Remove `css_class` parameter from TemplateMessage +- Clean up any remaining references + +## Detailed Implementation + +### Phase 12a: Add MessageModifiers (models.py) + +```python +from dataclasses import dataclass, field +from typing import Optional + +@dataclass +class MessageModifiers: + """Semantic modifiers that affect message display. + + These are format-neutral flags that renderers can use to determine + how to display a message. HTML renderer converts these to CSS classes, + text renderer might use them for indentation or formatting. + """ + is_sidechain: bool = False + is_slash_command: bool = False + is_command_output: bool = False + is_compacted: bool = False + is_error: bool = False + is_steering: bool = False + # System message level (mutually exclusive) + system_level: Optional[str] = None # "info", "warning", "error", "hook" +``` + +Add to TemplateMessage.__init__: +```python +def __init__( + self, + message_type: str, # Will become MessageType + content_html: str, + formatted_timestamp: str, + css_class: str, # Keep for now, will remove in 12e + modifiers: Optional[MessageModifiers] = None, # New + # ... other params +): + self.type = message_type + self.modifiers = modifiers or MessageModifiers() + # ... rest +``` + +### Phase 12b: Populate Modifiers + +Update each TemplateMessage creation site. Example from `_process_system_message`: + +```python +# Before +css_class = f"{message_type}" +if is_sidechain: + css_class = f"{css_class} sidechain" + +# After +modifiers = MessageModifiers(is_sidechain=is_sidechain) +css_class = f"{message_type}" # Keep for backward compat +if is_sidechain: + css_class = f"{css_class} sidechain" +``` + +Update `_get_message_hierarchy_level()`: +```python +# Before +if "sidechain" in css_class: + ... + +# After +def _get_message_hierarchy_level(msg: TemplateMessage) -> int: + is_sidechain = msg.modifiers.is_sidechain + msg_type = msg.type + + if msg_type == MessageType.USER and not is_sidechain: + return 1 + # ... +``` + +### Phase 12c: Create html_renderer.py + +```python +"""HTML-specific rendering utilities. + +This module contains all HTML generation code: +- CSS class computation +- HTML escaping +- Markdown rendering +- Tool-specific formatters +""" + +from html import escape +from typing import Optional, List +import mistune + +from .models import MessageType, MessageModifiers, TemplateMessage + + +def escape_html(text: str) -> str: + """Escape HTML special characters.""" + return escape(text, quote=True) + + +def render_markdown(text: str) -> str: + """Convert markdown to HTML.""" + return mistune.html(text) + + +def css_class_from_message(msg: TemplateMessage) -> str: + """Generate CSS class string from message type and modifiers. + + This reconstructs the original css_class format for backward + compatibility with existing CSS and JavaScript. + """ + parts: List[str] = [msg.type.value if isinstance(msg.type, MessageType) else msg.type] + + mods = msg.modifiers + if mods.is_slash_command: + parts.append("slash-command") + if mods.is_command_output: + parts.append("command-output") + if mods.is_compacted: + parts.append("compacted") + if mods.is_error: + parts.append("error") + if mods.is_steering: + parts.append("steering") + if mods.is_sidechain: + parts.append("sidechain") + if mods.system_level: + parts.append(f"system-{mods.system_level}") + + return " ".join(parts) + + +def get_message_emoji(msg: TemplateMessage) -> str: + """Return appropriate emoji for message type.""" + msg_type = msg.type if isinstance(msg.type, MessageType) else msg.type + + if msg_type == MessageType.SESSION_HEADER: + return "📋" + elif msg_type == MessageType.USER: + return "🤷" + elif msg_type == MessageType.ASSISTANT: + return "🤖" + elif msg_type == MessageType.SYSTEM: + return "⚙️" + elif msg_type == MessageType.TOOL_USE: + return "🛠️" + elif msg_type == MessageType.TOOL_RESULT: + if msg.modifiers.is_error: + return "🚨" + return "🧰" + elif msg_type == MessageType.THINKING: + return "💭" + elif msg_type == MessageType.IMAGE: + return "🖼️" + return "" + + +# Move format_* tool functions here: +# - format_ask_user_question_tool_content +# - format_todo_write_tool_content +# - format_bash_tool_content +# etc. +``` + +### Phase 12d: Update Templates + +Update transcript.html to use the new functions. Register them as Jinja filters or pass as context: + +```python +# In renderer.py when rendering template +from .html_renderer import css_class_from_message, get_message_emoji + +template = env.get_template("transcript.html") +html = template.render( + messages=messages, + css_class_from_message=css_class_from_message, + get_message_emoji=get_message_emoji, + # ... +) +``` + +Template changes: +```jinja +{# Before #} +
+ +{# After #} +
+``` + +### Phase 12e: Remove css_class + +Once all references use modifiers: +1. Remove `css_class` parameter from `TemplateMessage.__init__` +2. Remove `self.css_class = css_class` +3. Clean up all `css_class=...` at creation sites +4. Update tests to use modifiers + +## Files Changed + +| File | Changes | +|------|---------| +| `models.py` | Add `MessageModifiers` dataclass | +| `renderer.py` | Update TemplateMessage, populate modifiers, update hierarchy logic | +| `html_renderer.py` | New file with HTML utilities and css_class_from_message | +| `templates/transcript.html` | Use css_class_from_message filter | +| `test_*.py` | Update tests to use modifiers | + +## Testing Strategy + +1. **Snapshot tests**: Run after each phase to verify HTML output unchanged +2. **Unit tests for css_class_from_message**: Verify it produces same strings +3. **Unit tests for modifiers**: Test each modifier flag +4. **Integration tests**: Full render with real transcripts + +## Commit Plan + +1. `Add MessageModifiers dataclass to models.py` (12a) +2. `Add modifiers field to TemplateMessage` (12a) +3. `Populate modifiers in message processing` (12b part 1) +4. `Update hierarchy logic to use modifiers` (12b part 2) +5. `Create html_renderer.py with css_class_from_message` (12c) +6. `Move escape_html and render_markdown to html_renderer` (12c) +7. `Update template to use css_class_from_message` (12d) +8. `Remove css_class field from TemplateMessage` (12e) + +## Risk Assessment + +- **Low risk**: MessageModifiers is additive, doesn't break existing code +- **Medium risk**: Moving functions to html_renderer.py requires import updates +- **High risk**: Template changes and css_class removal need careful testing + +## Estimated Scope + +- Phase 12a: ~30 lines added to models.py, ~10 lines to renderer.py +- Phase 12b: ~50 modifications across renderer.py +- Phase 12c: ~200 lines new file, ~200 lines moved from renderer.py +- Phase 12d: ~10 lines template changes +- Phase 12e: ~20 lines removed + +Total: Moderate refactoring, ~5-8 commits diff --git a/claude_code_log/cache.py b/claude_code_log/cache.py index a8fb43b8..7213da9b 100644 --- a/claude_code_log/cache.py +++ b/claude_code_log/cache.py @@ -172,7 +172,7 @@ def load_cached_entries(self, jsonl_path: Path) -> Optional[List[TranscriptEntry entries_data.extend(cast(List[Dict[str, Any]], timestamp_entries)) # Deserialize back to TranscriptEntry objects - from .models import parse_transcript_entry + from .parser import parse_transcript_entry entries = [ parse_transcript_entry(entry_dict) for entry_dict in entries_data @@ -257,7 +257,7 @@ def load_cached_entries_filtered( ) # Deserialize filtered entries - from .models import parse_transcript_entry + from .parser import parse_transcript_entry entries = [ parse_transcript_entry(entry_dict) diff --git a/claude_code_log/cli.py b/claude_code_log/cli.py index f2a66ddc..13ab85c9 100644 --- a/claude_code_log/cli.py +++ b/claude_code_log/cli.py @@ -10,7 +10,11 @@ import click from git import Repo, InvalidGitRepositoryError -from .converter import convert_jsonl_to_html, process_projects_hierarchy +from .converter import ( + convert_jsonl_to, + convert_jsonl_to_html, + process_projects_hierarchy, +) from .cache import CacheManager, get_library_version @@ -411,6 +415,13 @@ def _clear_html_files(input_path: Path, all_projects: bool) -> None: default=None, help="Custom projects directory (default: ~/.claude/projects/). Useful for testing.", ) +@click.option( + "--format", + "output_format", + type=click.Choice(["html"]), + default="html", + help="Output format (default: html). Currently only html is supported.", +) def main( input_path: Optional[Path], output: Optional[Path], @@ -424,6 +435,7 @@ def main( clear_html: bool, tui: bool, projects_dir: Optional[Path], + output_format: str, ) -> None: """Convert Claude transcript JSONL files to HTML. @@ -602,7 +614,8 @@ def main( f"Neither {input_path} nor {claude_path} exists" ) - output_path = convert_jsonl_to_html( + output_path = convert_jsonl_to( + output_format, input_path, output, from_date, diff --git a/claude_code_log/converter.py b/claude_code_log/converter.py index c5bc698a..83e58ee6 100644 --- a/claude_code_log/converter.py +++ b/claude_code_log/converter.py @@ -1,39 +1,374 @@ #!/usr/bin/env python3 """Convert Claude transcript JSONL files to HTML.""" +import json +import re from pathlib import Path import traceback from typing import List, Optional, Dict, Any, TYPE_CHECKING +import dateparser + if TYPE_CHECKING: from .cache import CacheManager from .utils import ( + format_timestamp_range, + get_project_display_name, should_use_as_session_starter, create_session_preview, extract_working_directories, get_warmup_session_ids, ) from .cache import CacheManager, SessionCacheData, get_library_version -from .parser import ( - load_transcript, - load_directory_transcripts, - filter_messages_by_date, -) +from .parser import parse_timestamp, parse_transcript_entry from .models import ( TranscriptEntry, AssistantTranscriptEntry, SummaryTranscriptEntry, + SystemTranscriptEntry, UserTranscriptEntry, + ToolResultContent, ) -from .renderer import ( - deduplicate_messages, - generate_html, - generate_session_html, - generate_projects_index_html, - is_html_outdated, - get_project_display_name, -) +from .renderer import get_renderer + + +# ============================================================================= +# Transcript Loading Functions +# ============================================================================= + + +def filter_messages_by_date( + messages: List[TranscriptEntry], from_date: Optional[str], to_date: Optional[str] +) -> List[TranscriptEntry]: + """Filter messages based on date range.""" + if not from_date and not to_date: + return messages + + # Parse the date strings using dateparser + from_dt = None + to_dt = None + + if from_date: + from_dt = dateparser.parse(from_date) + if not from_dt: + raise ValueError(f"Could not parse from-date: {from_date}") + # If parsing relative dates like "today", start from beginning of day + if from_date in ["today", "yesterday"] or "days ago" in from_date: + from_dt = from_dt.replace(hour=0, minute=0, second=0, microsecond=0) + + if to_date: + to_dt = dateparser.parse(to_date) + if not to_dt: + raise ValueError(f"Could not parse to-date: {to_date}") + # If parsing relative dates like "today", end at end of day + if to_date in ["today", "yesterday"] or "days ago" in to_date: + to_dt = to_dt.replace(hour=23, minute=59, second=59, microsecond=999999) + + filtered_messages: List[TranscriptEntry] = [] + for message in messages: + # Handle SummaryTranscriptEntry which doesn't have timestamp + if isinstance(message, SummaryTranscriptEntry): + filtered_messages.append(message) + continue + + timestamp_str = message.timestamp + if not timestamp_str: + continue + + message_dt = parse_timestamp(timestamp_str) + if not message_dt: + continue + + # Convert to naive datetime for comparison (dateparser returns naive datetimes) + if message_dt.tzinfo: + message_dt = message_dt.replace(tzinfo=None) + + # Check if message falls within date range + if from_dt and message_dt < from_dt: + continue + if to_dt and message_dt > to_dt: + continue + + filtered_messages.append(message) + + return filtered_messages + + +def load_transcript( + jsonl_path: Path, + cache_manager: Optional["CacheManager"] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + silent: bool = False, + _loaded_files: Optional[set[Path]] = None, +) -> List[TranscriptEntry]: + """Load and parse JSONL transcript file, using cache if available. + + Args: + _loaded_files: Internal parameter to track loaded files and prevent infinite recursion. + """ + # Initialize loaded files set on first call + if _loaded_files is None: + _loaded_files = set() + + # Prevent infinite recursion by checking if this file is already being loaded + if jsonl_path in _loaded_files: + return [] + + _loaded_files.add(jsonl_path) + # Try to load from cache first + if cache_manager is not None: + # Use filtered loading if date parameters are provided + if from_date or to_date: + cached_entries = cache_manager.load_cached_entries_filtered( + jsonl_path, from_date, to_date + ) + else: + cached_entries = cache_manager.load_cached_entries(jsonl_path) + + if cached_entries is not None: + if not silent: + print(f"Loading {jsonl_path} from cache...") + return cached_entries + + # Parse from source file + messages: List[TranscriptEntry] = [] + agent_ids: set[str] = set() # Collect agentId references while parsing + + with open(jsonl_path, "r", encoding="utf-8", errors="replace") as f: + if not silent: + print(f"Processing {jsonl_path}...") + for line_no, line in enumerate(f, 1): # Start counting from 1 + line = line.strip() + if line: + try: + entry_dict: dict[str, Any] | str = json.loads(line) + if not isinstance(entry_dict, dict): + print( + f"Line {line_no} of {jsonl_path} is not a JSON object: {line}" + ) + continue + + # Check for agentId BEFORE Pydantic parsing + # agentId can be at top level OR nested in toolUseResult + # For UserTranscriptEntry, we need to copy it to top level so Pydantic preserves it + if "agentId" in entry_dict: + agent_id = entry_dict.get("agentId") + if agent_id: + agent_ids.add(agent_id) + elif "toolUseResult" in entry_dict: + tool_use_result = entry_dict.get("toolUseResult") + if ( + isinstance(tool_use_result, dict) + and "agentId" in tool_use_result + ): + agent_id_value = tool_use_result.get("agentId") # type: ignore[reportUnknownVariableType, reportUnknownMemberType] + if isinstance(agent_id_value, str): + agent_ids.add(agent_id_value) + # Copy agentId to top level for Pydantic to preserve + entry_dict["agentId"] = agent_id_value + + entry_type: str | None = entry_dict.get("type") + + if entry_type in [ + "user", + "assistant", + "summary", + "system", + "queue-operation", + ]: + # Parse using Pydantic models + entry = parse_transcript_entry(entry_dict) + messages.append(entry) + elif ( + entry_type + in [ + "file-history-snapshot", # Internal Claude Code file backup metadata + ] + ): + # Silently skip internal message types we don't render + pass + else: + print( + f"Line {line_no} of {jsonl_path} is not a recognised message type: {line}" + ) + except json.JSONDecodeError as e: + print( + f"Line {line_no} of {jsonl_path} | JSON decode error: {str(e)}" + ) + except ValueError as e: + # Extract a more descriptive error message + error_msg = str(e) + if "validation error" in error_msg.lower(): + err_no_url = re.sub( + r" For further information visit https://errors.pydantic(.*)\n?", + "", + error_msg, + ) + print(f"Line {line_no} of {jsonl_path} | {err_no_url}") + else: + print( + f"Line {line_no} of {jsonl_path} | ValueError: {error_msg}" + "\n{traceback.format_exc()}" + ) + except Exception as e: + print( + f"Line {line_no} of {jsonl_path} | Unexpected error: {str(e)}" + "\n{traceback.format_exc()}" + ) + + # Load agent files if any were referenced + # Build a map of agentId -> agent messages + agent_messages_map: dict[str, List[TranscriptEntry]] = {} + if agent_ids: + parent_dir = jsonl_path.parent + for agent_id in agent_ids: + agent_file = parent_dir / f"agent-{agent_id}.jsonl" + # Skip if the agent file is the same as the current file (self-reference) + if agent_file == jsonl_path: + continue + if agent_file.exists(): + if not silent: + print(f"Loading agent file {agent_file}...") + # Recursively load the agent file (it might reference other agents) + agent_messages = load_transcript( + agent_file, + cache_manager, + from_date, + to_date, + silent=True, + _loaded_files=_loaded_files, + ) + agent_messages_map[agent_id] = agent_messages + + # Insert agent messages at their point of use + if agent_messages_map: + # Iterate through messages and insert agent messages after the message + # that references them (via UserTranscriptEntry.agentId) + result_messages: List[TranscriptEntry] = [] + for message in messages: + result_messages.append(message) + + # Check if this is a UserTranscriptEntry with agentId + if isinstance(message, UserTranscriptEntry) and message.agentId: + agent_id = message.agentId + if agent_id in agent_messages_map: + # Insert agent messages right after this message + result_messages.extend(agent_messages_map[agent_id]) + + messages = result_messages + + # Save to cache if cache manager is available + if cache_manager is not None: + cache_manager.save_cached_entries(jsonl_path, messages) + + return messages + + +def load_directory_transcripts( + directory_path: Path, + cache_manager: Optional["CacheManager"] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + silent: bool = False, +) -> List[TranscriptEntry]: + """Load all JSONL transcript files from a directory and combine them.""" + all_messages: List[TranscriptEntry] = [] + + # Find all .jsonl files + jsonl_files = list(directory_path.glob("*.jsonl")) + + for jsonl_file in jsonl_files: + messages = load_transcript( + jsonl_file, cache_manager, from_date, to_date, silent + ) + all_messages.extend(messages) + + # Sort all messages chronologically + def get_timestamp(entry: TranscriptEntry) -> str: + if hasattr(entry, "timestamp"): + return entry.timestamp # type: ignore + return "" + + all_messages.sort(key=get_timestamp) + return all_messages + + +# ============================================================================= +# Deduplication +# ============================================================================= + + +def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]: + """Remove duplicate messages based on (type, timestamp, sessionId, content_key). + + Messages with the exact same timestamp are duplicates by definition - + the differences (like IDE selection tags) are just logging artifacts. + + We need a content-based key to handle two cases: + 1. Version stutter: Same message logged twice during Claude Code upgrade + -> Same timestamp, same message.id or tool_use_id -> SHOULD deduplicate + 2. Concurrent tool results: Multiple tool results with same timestamp + -> Same timestamp, different tool_use_ids -> should NOT deduplicate + + Args: + messages: List of transcript entries to deduplicate + + Returns: + List of deduplicated messages, preserving order (first occurrence kept) + """ + # Track seen (message_type, timestamp, is_meta, session_id, content_key) tuples + seen: set[tuple[str, str, bool, str, str]] = set() + deduplicated: List[TranscriptEntry] = [] + + for message in messages: + # Get basic message type + message_type = getattr(message, "type", "unknown") + + # For system messages, include level to differentiate info/warning/error + if isinstance(message, SystemTranscriptEntry): + level = getattr(message, "level", "info") + message_type = f"system-{level}" + + # Get timestamp + timestamp = getattr(message, "timestamp", "") + + # Get isMeta flag (slash command prompts have isMeta=True with same timestamp as parent) + is_meta = getattr(message, "isMeta", False) + + # Get sessionId for multi-session report deduplication + session_id = getattr(message, "sessionId", "") + + # Get content key for differentiating concurrent messages + # - For assistant messages: use message.id (same for stutters, different for different msgs) + # - For user messages with tool results: use first tool_use_id + # - For other messages: use uuid as fallback + content_key = "" + if isinstance(message, AssistantTranscriptEntry): + # For assistant messages, use the message id + content_key = message.message.id + elif isinstance(message, UserTranscriptEntry): + # For user messages, check for tool results + if isinstance(message.message.content, list): + for item in message.message.content: + if isinstance(item, ToolResultContent): + content_key = item.tool_use_id + break + # Fallback to uuid if no content key found + if not content_key: + content_key = getattr(message, "uuid", "") + + # Create deduplication key - include content_key for proper handling + # of both version stutters and concurrent tool results + dedup_key = (message_type, timestamp, is_meta, session_id, content_key) + + # Keep only first occurrence + if dedup_key not in seen: + seen.add(dedup_key) + deduplicated.append(message) + + return deduplicated def convert_jsonl_to_html( @@ -45,7 +380,33 @@ def convert_jsonl_to_html( use_cache: bool = True, silent: bool = False, ) -> Path: - """Convert JSONL transcript(s) to HTML file(s).""" + """Convert JSONL transcript(s) to HTML file(s). + + Convenience wrapper around convert_jsonl_to() for HTML format. + """ + return convert_jsonl_to( + "html", + input_path, + output_path, + from_date, + to_date, + generate_individual_sessions, + use_cache, + silent, + ) + + +def convert_jsonl_to( + format: str, + input_path: Path, + output_path: Optional[Path] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + generate_individual_sessions: bool = True, + use_cache: bool = True, + silent: bool = False, +) -> Path: + """Convert JSONL transcript(s) to the specified format.""" if not input_path.exists(): raise FileNotFoundError(f"Input path not found: {input_path}") @@ -61,14 +422,14 @@ def convert_jsonl_to_html( if input_path.is_file(): # Single file mode - cache only available for directory mode if output_path is None: - output_path = input_path.with_suffix(".html") + output_path = input_path.with_suffix(f".{format}") messages = load_transcript(input_path, silent=silent) title = f"Claude Transcript - {input_path.stem}" cache_was_updated = False # No cache in single file mode else: # Directory mode - Cache-First Approach if output_path is None: - output_path = input_path / "combined_transcripts.html" + output_path = input_path / f"combined_transcripts.{format}" # Phase 1: Ensure cache is fresh and populated cache_was_updated = ensure_fresh_cache( @@ -102,10 +463,11 @@ def convert_jsonl_to_html( date_range_str = " ".join(date_range_parts) title += f" ({date_range_str})" - # Generate combined HTML file (check if regeneration needed) + # Generate combined output file (check if regeneration needed) assert output_path is not None + renderer = get_renderer(format) should_regenerate = ( - is_html_outdated(output_path) + renderer.is_outdated(output_path) or from_date is not None or to_date is not None or not output_path.exists() @@ -115,15 +477,24 @@ def convert_jsonl_to_html( ) if should_regenerate: - html_content = generate_html(messages, title) - output_path.write_text(html_content, encoding="utf-8") + content = renderer.generate(messages, title) + assert content is not None + output_path.write_text(content, encoding="utf-8") else: - print(f"HTML file {output_path.name} is current, skipping regeneration") + print( + f"{format.upper()} file {output_path.name} is current, skipping regeneration" + ) # Generate individual session files if requested and in directory mode if generate_individual_sessions and input_path.is_dir(): _generate_individual_session_files( - messages, input_path, from_date, to_date, cache_manager, cache_was_updated + format, + messages, + input_path, + from_date, + to_date, + cache_manager, + cache_was_updated, ) return output_path @@ -333,21 +704,6 @@ def _update_cache_with_session_data( ) -def _format_session_timestamp_range(first_timestamp: str, last_timestamp: str) -> str: - """Format session timestamp range for display.""" - from .renderer import format_timestamp - - if first_timestamp and last_timestamp: - if first_timestamp == last_timestamp: - return format_timestamp(first_timestamp) - else: - return f"{format_timestamp(first_timestamp)} - {format_timestamp(last_timestamp)}" - elif first_timestamp: - return format_timestamp(first_timestamp) - else: - return "" - - def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str, Any]]: """Collect session data for project index navigation.""" from .parser import extract_text_content @@ -428,21 +784,10 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str, # Convert to list format with formatted timestamps session_list: List[Dict[str, Any]] = [] for session_data in sessions.values(): - from .renderer import format_timestamp - - first_ts = session_data["first_timestamp"] - last_ts = session_data["last_timestamp"] - timestamp_range = "" - if first_ts and last_ts: - if first_ts == last_ts: - timestamp_range = format_timestamp(first_ts) - else: - timestamp_range = ( - f"{format_timestamp(first_ts)} - {format_timestamp(last_ts)}" - ) - elif first_ts: - timestamp_range = format_timestamp(first_ts) - + timestamp_range = format_timestamp_range( + session_data["first_timestamp"], + session_data["last_timestamp"], + ) session_dict: Dict[str, Any] = { "id": session_data["id"], "summary": session_data["summary"], @@ -464,6 +809,7 @@ def _collect_project_sessions(messages: List[TranscriptEntry]) -> List[Dict[str, def _generate_individual_session_files( + format: str, messages: List[TranscriptEntry], output_dir: Path, from_date: Optional[str] = None, @@ -471,7 +817,7 @@ def _generate_individual_session_files( cache_manager: Optional["CacheManager"] = None, cache_was_updated: bool = False, ) -> None: - """Generate individual HTML files for each session.""" + """Generate individual files for each session in the specified format.""" # Pre-compute warmup sessions to exclude them warmup_session_ids = get_warmup_session_ids(messages) @@ -528,11 +874,12 @@ def _generate_individual_session_files( session_title += f" ({date_range_str})" # Check if session file needs regeneration - session_file_path = output_dir / f"session-{session_id}.html" + session_file_path = output_dir / f"session-{session_id}.{format}" + renderer = get_renderer(format) # Only regenerate if outdated, doesn't exist, or date filtering is active should_regenerate_session = ( - is_html_outdated(session_file_path) + renderer.is_outdated(session_file_path) or from_date is not None or to_date is not None or not session_file_path.exists() @@ -540,12 +887,13 @@ def _generate_individual_session_files( ) if should_regenerate_session: - # Generate session HTML - session_html = generate_session_html( + # Generate session content + session_content = renderer.generate_session( messages, session_id, session_title, cache_manager ) + assert session_content is not None # Write session file - session_file_path.write_text(session_html, encoding="utf-8") + session_file_path.write_text(session_content, encoding="utf-8") else: print( f"Session file {session_file_path.name} is current, skipping regeneration" @@ -638,7 +986,7 @@ def process_projects_hierarchy( { "id": session_data.session_id, "summary": session_data.summary, - "timestamp_range": _format_session_timestamp_range( + "timestamp_range": format_timestamp_range( session_data.first_timestamp, session_data.last_timestamp, ), @@ -749,8 +1097,12 @@ def process_projects_hierarchy( # Generate index HTML (always regenerate if outdated) index_path = projects_path / "index.html" - if is_html_outdated(index_path) or from_date or to_date or any_cache_updated: - index_html = generate_projects_index_html(project_summaries, from_date, to_date) + renderer = get_renderer("html") + if renderer.is_outdated(index_path) or from_date or to_date or any_cache_updated: + index_html = renderer.generate_projects_index( + project_summaries, from_date, to_date + ) + assert index_html is not None index_path.write_text(index_html, encoding="utf-8") else: print("Index HTML is current, skipping regeneration") diff --git a/claude_code_log/html/__init__.py b/claude_code_log/html/__init__.py new file mode 100644 index 00000000..ef847ef9 --- /dev/null +++ b/claude_code_log/html/__init__.py @@ -0,0 +1,162 @@ +"""HTML-specific rendering utilities package. + +Re-exports all functions from utils and formatter modules for backward compatibility. +""" + +from .utils import ( + css_class_from_message, + escape_html, + get_message_emoji, + get_template_environment, + render_collapsible_code, + render_file_content_collapsible, + render_markdown, + render_markdown_collapsible, + starts_with_emoji, +) +from .tool_formatters import ( + format_askuserquestion_content, + format_askuserquestion_result, + format_bash_tool_content, + format_edit_tool_content, + format_edit_tool_result, + format_exitplanmode_content, + format_exitplanmode_result, + format_multiedit_tool_content, + format_read_tool_content, + format_read_tool_result, + format_task_tool_content, + format_todowrite_content, + format_tool_result_content, + format_tool_use_content, + format_tool_use_title, + format_write_tool_content, + get_tool_summary, + parse_edit_output, + parse_read_output, + render_params_table, +) +from .system_formatters import ( + format_dedup_notice_content, + format_hook_summary_content, + format_session_header_content, + format_system_content, +) +from ..models import ( + AssistantTextContent, + BashInputContent, + BashOutputContent, + CommandOutputContent, + CompactedSummaryContent, + DedupNoticeContent, + IdeDiagnostic, + IdeNotificationContent, + IdeOpenedFile, + IdeSelection, + SessionHeaderContent, + SlashCommandContent, + ThinkingContentModel, + UserMemoryContent, + UserTextContent, +) +from ..parser import ( + parse_bash_input, + parse_bash_output, + parse_command_output, + parse_ide_notifications, + parse_slash_command, +) +from .user_formatters import ( + format_bash_input_content, + format_bash_output_content, + format_command_output_content, + format_compacted_summary_content, + format_ide_notification_content, + format_slash_command_content, + format_user_memory_content, + format_user_text_content, + format_user_text_model_content, +) +from .assistant_formatters import ( + format_assistant_text_content, + format_image_content, + format_thinking_content, +) + +__all__ = [ + # utils + "css_class_from_message", + "escape_html", + "get_message_emoji", + "get_template_environment", + "render_collapsible_code", + "render_file_content_collapsible", + "render_markdown", + "render_markdown_collapsible", + "starts_with_emoji", + # tool_formatters (input) + "format_askuserquestion_content", + "format_askuserquestion_result", + "format_bash_tool_content", + "format_edit_tool_content", + "format_exitplanmode_content", + "format_exitplanmode_result", + "format_multiedit_tool_content", + "format_read_tool_content", + "format_task_tool_content", + "format_todowrite_content", + "format_tool_use_content", + "format_tool_use_title", + "format_write_tool_content", + "get_tool_summary", + "render_params_table", + # tool_formatters (output/result) + "parse_read_output", + "format_read_tool_result", + "parse_edit_output", + "format_edit_tool_result", + "format_tool_result_content", + # system_formatters + "format_dedup_notice_content", + "format_hook_summary_content", + "format_session_header_content", + "format_system_content", + # system content models + "DedupNoticeContent", + "SessionHeaderContent", + # user_formatters (content models) + "SlashCommandContent", + "CommandOutputContent", + "BashInputContent", + "BashOutputContent", + "CompactedSummaryContent", + "UserMemoryContent", + "UserTextContent", + "IdeNotificationContent", + "IdeOpenedFile", + "IdeSelection", + "IdeDiagnostic", + # user_formatters (formatting) + "format_slash_command_content", + "format_command_output_content", + "format_bash_input_content", + "format_bash_output_content", + "format_compacted_summary_content", + "format_user_memory_content", + "format_user_text_content", + "format_user_text_model_content", + "format_ide_notification_content", + # user_formatters (parsing) + "parse_slash_command", + "parse_command_output", + "parse_bash_input", + "parse_bash_output", + "parse_ide_notifications", + # assistant_formatters (content models) + "AssistantTextContent", + "ThinkingContentModel", + # assistant_formatters (formatting) + "format_assistant_text_content", + "format_thinking_content", + "format_image_content", +] diff --git a/claude_code_log/html/ansi_colors.py b/claude_code_log/html/ansi_colors.py new file mode 100644 index 00000000..cd14f989 --- /dev/null +++ b/claude_code_log/html/ansi_colors.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +"""ANSI escape code to HTML conversion. + +This module provides utilities for converting terminal ANSI escape codes +to HTML with appropriate CSS classes for styling. +""" + +import html +import re +from typing import Any, Dict, List + + +def _escape_html(text: str) -> str: + """Escape HTML special characters in text. + + Also normalizes line endings (CRLF -> LF) to prevent double spacing in
 blocks.
+    """
+    # Normalize CRLF to LF to prevent double line breaks in HTML
+    normalized = text.replace("\r\n", "\n").replace("\r", "\n")
+    return html.escape(normalized)
+
+
+def convert_ansi_to_html(text: str) -> str:
+    """Convert ANSI escape codes to HTML spans with CSS classes.
+
+    Supports:
+    - Colors (30-37, 90-97 for foreground; 40-47, 100-107 for background)
+    - RGB colors (38;2;r;g;b for foreground; 48;2;r;g;b for background)
+    - Bold (1), Dim (2), Italic (3), Underline (4)
+    - Reset (0, 39, 49, 22, 23, 24)
+    - Strips cursor movement and screen manipulation codes
+    """
+    # First, strip cursor movement and screen manipulation codes
+    # Common patterns: [1A (cursor up), [2K (erase line), [?25l (hide cursor), etc.
+    cursor_patterns = [
+        r"\x1b\[[0-9]*[ABCD]",  # Cursor movement (up, down, forward, back)
+        r"\x1b\[[0-9]*[EF]",  # Cursor next/previous line
+        r"\x1b\[[0-9]*[GH]",  # Cursor horizontal/home position
+        r"\x1b\[[0-9;]*[Hf]",  # Cursor position
+        r"\x1b\[[0-9]*[JK]",  # Erase display/line
+        r"\x1b\[[0-9]*[ST]",  # Scroll up/down
+        r"\x1b\[\?[0-9]*[hl]",  # Private mode set/reset (show/hide cursor, etc.)
+        r"\x1b\[[0-9]*[PXYZ@]",  # Insert/delete operations
+        r"\x1b\[=[0-9]*[A-Za-z]",  # Alternate character set
+        r"\x1b\][0-9];[^\x07]*\x07",  # Operating System Command (OSC)
+        r"\x1b\][0-9];[^\x1b]*\x1b\\",  # OSC with string terminator
+    ]
+
+    # Strip all cursor movement and screen manipulation codes
+    for pattern in cursor_patterns:
+        text = re.sub(pattern, "", text)
+
+    # Also strip any remaining unhandled escape sequences that aren't color codes
+    # This catches any we might have missed, but preserves \x1b[...m color codes
+    text = re.sub(r"\x1b\[(?![0-9;]*m)[0-9;]*[A-Za-z]", "", text)
+
+    result: List[str] = []
+    segments: List[Dict[str, Any]] = []
+
+    # First pass: split text into segments with their styles
+    last_end = 0
+    current_fg = None
+    current_bg = None
+    current_bold = False
+    current_dim = False
+    current_italic = False
+    current_underline = False
+    current_rgb_fg = None
+    current_rgb_bg = None
+
+    for match in re.finditer(r"\x1b\[([0-9;]+)m", text):
+        # Add text before this escape code
+        if match.start() > last_end:
+            segments.append(
+                {
+                    "text": text[last_end : match.start()],
+                    "fg": current_fg,
+                    "bg": current_bg,
+                    "bold": current_bold,
+                    "dim": current_dim,
+                    "italic": current_italic,
+                    "underline": current_underline,
+                    "rgb_fg": current_rgb_fg,
+                    "rgb_bg": current_rgb_bg,
+                }
+            )
+
+        # Process escape codes
+        codes = match.group(1).split(";")
+        i = 0
+        while i < len(codes):
+            code = codes[i]
+
+            # Reset codes
+            if code == "0":
+                current_fg = None
+                current_bg = None
+                current_bold = False
+                current_dim = False
+                current_italic = False
+                current_underline = False
+                current_rgb_fg = None
+                current_rgb_bg = None
+            elif code == "39":
+                current_fg = None
+                current_rgb_fg = None
+            elif code == "49":
+                current_bg = None
+                current_rgb_bg = None
+            elif code == "22":
+                current_bold = False
+                current_dim = False
+            elif code == "23":
+                current_italic = False
+            elif code == "24":
+                current_underline = False
+
+            # Style codes
+            elif code == "1":
+                current_bold = True
+            elif code == "2":
+                current_dim = True
+            elif code == "3":
+                current_italic = True
+            elif code == "4":
+                current_underline = True
+
+            # Standard foreground colors
+            elif code in ["30", "31", "32", "33", "34", "35", "36", "37"]:
+                color_map = {
+                    "30": "black",
+                    "31": "red",
+                    "32": "green",
+                    "33": "yellow",
+                    "34": "blue",
+                    "35": "magenta",
+                    "36": "cyan",
+                    "37": "white",
+                }
+                current_fg = f"ansi-{color_map[code]}"
+                current_rgb_fg = None
+
+            # Standard background colors
+            elif code in ["40", "41", "42", "43", "44", "45", "46", "47"]:
+                color_map = {
+                    "40": "black",
+                    "41": "red",
+                    "42": "green",
+                    "43": "yellow",
+                    "44": "blue",
+                    "45": "magenta",
+                    "46": "cyan",
+                    "47": "white",
+                }
+                current_bg = f"ansi-bg-{color_map[code]}"
+                current_rgb_bg = None
+
+            # Bright foreground colors
+            elif code in ["90", "91", "92", "93", "94", "95", "96", "97"]:
+                color_map = {
+                    "90": "bright-black",
+                    "91": "bright-red",
+                    "92": "bright-green",
+                    "93": "bright-yellow",
+                    "94": "bright-blue",
+                    "95": "bright-magenta",
+                    "96": "bright-cyan",
+                    "97": "bright-white",
+                }
+                current_fg = f"ansi-{color_map[code]}"
+                current_rgb_fg = None
+
+            # Bright background colors
+            elif code in ["100", "101", "102", "103", "104", "105", "106", "107"]:
+                color_map = {
+                    "100": "bright-black",
+                    "101": "bright-red",
+                    "102": "bright-green",
+                    "103": "bright-yellow",
+                    "104": "bright-blue",
+                    "105": "bright-magenta",
+                    "106": "bright-cyan",
+                    "107": "bright-white",
+                }
+                current_bg = f"ansi-bg-{color_map[code]}"
+                current_rgb_bg = None
+
+            # RGB foreground color
+            elif code == "38" and i + 1 < len(codes) and codes[i + 1] == "2":
+                if i + 4 < len(codes):
+                    r, g, b = codes[i + 2], codes[i + 3], codes[i + 4]
+                    current_rgb_fg = f"color: rgb({r}, {g}, {b})"
+                    current_fg = None
+                    i += 4
+
+            # RGB background color
+            elif code == "48" and i + 1 < len(codes) and codes[i + 1] == "2":
+                if i + 4 < len(codes):
+                    r, g, b = codes[i + 2], codes[i + 3], codes[i + 4]
+                    current_rgb_bg = f"background-color: rgb({r}, {g}, {b})"
+                    current_bg = None
+                    i += 4
+
+            i += 1
+
+        last_end = match.end()
+
+    # Add remaining text
+    if last_end < len(text):
+        segments.append(
+            {
+                "text": text[last_end:],
+                "fg": current_fg,
+                "bg": current_bg,
+                "bold": current_bold,
+                "dim": current_dim,
+                "italic": current_italic,
+                "underline": current_underline,
+                "rgb_fg": current_rgb_fg,
+                "rgb_bg": current_rgb_bg,
+            }
+        )
+
+    # Second pass: build HTML
+    for segment in segments:
+        if not segment["text"]:
+            continue
+
+        classes: List[str] = []
+        styles: List[str] = []
+
+        if segment["fg"]:
+            classes.append(segment["fg"])
+        if segment["bg"]:
+            classes.append(segment["bg"])
+        if segment["bold"]:
+            classes.append("ansi-bold")
+        if segment["dim"]:
+            classes.append("ansi-dim")
+        if segment["italic"]:
+            classes.append("ansi-italic")
+        if segment["underline"]:
+            classes.append("ansi-underline")
+        if segment["rgb_fg"]:
+            styles.append(segment["rgb_fg"])
+        if segment["rgb_bg"]:
+            styles.append(segment["rgb_bg"])
+
+        escaped_text = _escape_html(segment["text"])
+
+        if classes or styles:
+            attrs: List[str] = []
+            if classes:
+                attrs.append(f'class="{" ".join(classes)}"')
+            if styles:
+                attrs.append(f'style="{"; ".join(styles)}"')
+            result.append(f"{escaped_text}")
+        else:
+            result.append(escaped_text)
+
+    return "".join(result)
diff --git a/claude_code_log/html/assistant_formatters.py b/claude_code_log/html/assistant_formatters.py
new file mode 100644
index 00000000..63f6f61f
--- /dev/null
+++ b/claude_code_log/html/assistant_formatters.py
@@ -0,0 +1,109 @@
+"""HTML formatters for assistant message content.
+
+This module formats assistant message content types to HTML.
+Part of the thematic formatter organization:
+- system_formatters.py: SystemContent, HookSummaryContent
+- user_formatters.py: SlashCommandContent, CommandOutputContent, BashInputContent
+- assistant_formatters.py: AssistantTextContent, ThinkingContentModel, ImageContent
+- tool_formatters.py: tool use/result content
+
+Content models are defined in models.py, this module only handles formatting.
+"""
+
+from ..models import (
+    AssistantTextContent,
+    ImageContent,
+    ThinkingContentModel,
+    UnknownContent,
+)
+from .utils import escape_html, render_markdown_collapsible
+
+
+# =============================================================================
+# Formatting Functions
+# =============================================================================
+
+
+def format_assistant_text_content(
+    content: AssistantTextContent,
+    line_threshold: int = 30,
+    preview_line_count: int = 10,
+) -> str:
+    """Format assistant text content as HTML.
+
+    Args:
+        content: AssistantTextContent with the text to render
+        line_threshold: Number of lines before content becomes collapsible
+        preview_line_count: Number of preview lines to show when collapsed
+
+    Returns:
+        HTML string with markdown-rendered, optionally collapsible content
+    """
+    return render_markdown_collapsible(
+        content.text,
+        "assistant-text",
+        line_threshold=line_threshold,
+        preview_line_count=preview_line_count,
+    )
+
+
+def format_thinking_content(
+    content: ThinkingContentModel,
+    line_threshold: int = 20,
+    preview_line_count: int = 5,
+) -> str:
+    """Format thinking content as HTML.
+
+    Args:
+        content: ThinkingContentModel with the thinking text
+        line_threshold: Number of lines before content becomes collapsible
+        preview_line_count: Number of preview lines to show when collapsed
+
+    Returns:
+        HTML string with markdown-rendered, optionally collapsible thinking content
+    """
+    return render_markdown_collapsible(
+        content.thinking,
+        "thinking-content",
+        line_threshold=line_threshold,
+        preview_line_count=preview_line_count,
+    )
+
+
+def format_image_content(image: ImageContent) -> str:
+    """Format image content as HTML.
+
+    Args:
+        image: ImageContent with base64 image data
+
+    Returns:
+        HTML img tag with data URL
+    """
+    data_url = f"data:{image.source.media_type};base64,{image.source.data}"
+    return f'Uploaded image'
+
+
+def format_unknown_content(content: UnknownContent) -> str:
+    """Format unknown content type as HTML.
+
+    Args:
+        content: UnknownContent with the type name
+
+    Returns:
+        HTML paragraph with escaped type name
+    """
+    escaped_type = escape_html(content.type_name)
+    return f"

Unknown content type: {escaped_type}

" + + +# ============================================================================= +# Public Exports +# ============================================================================= + +__all__ = [ + # Formatting functions + "format_assistant_text_content", + "format_thinking_content", + "format_image_content", + "format_unknown_content", +] diff --git a/claude_code_log/html/renderer.py b/claude_code_log/html/renderer.py new file mode 100644 index 00000000..c2efdc8f --- /dev/null +++ b/claude_code_log/html/renderer.py @@ -0,0 +1,336 @@ +"""HTML renderer implementation for Claude Code transcripts.""" + +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +from ..cache import get_library_version +from ..models import ( + AssistantTextContent, + BashInputContent, + BashOutputContent, + CommandOutputContent, + CompactedSummaryContent, + DedupNoticeContent, + HookSummaryContent, + ImageContent, + SessionHeaderContent, + SlashCommandContent, + SystemContent, + ThinkingContentModel, + ToolResultContent, + ToolResultContentModel, + ToolUseContent, + TranscriptEntry, + UnknownContent, + UserMemoryContent, + UserTextContent, +) +from ..renderer import ( + Renderer, + TemplateMessage, + generate_template_messages, + prepare_projects_index, + title_for_projects_index, +) +from ..renderer_timings import log_timing +from .system_formatters import ( + format_dedup_notice_content, + format_hook_summary_content, + format_session_header_content, + format_system_content, +) +from .user_formatters import ( + format_bash_input_content, + format_bash_output_content, + format_command_output_content, + format_compacted_summary_content, + format_slash_command_content, + format_user_memory_content, + format_user_text_model_content, +) +from .assistant_formatters import ( + format_assistant_text_content, + format_image_content, + format_thinking_content, + format_unknown_content, +) +from .tool_formatters import format_tool_result_content, format_tool_use_content +from .utils import css_class_from_message, get_message_emoji, get_template_environment + +if TYPE_CHECKING: + from ..cache import CacheManager + + +def check_html_version(html_file_path: Path) -> Optional[str]: + """Check the version of an existing HTML file from its comment. + + Returns: + The version string if found, None if no version comment or file doesn't exist. + """ + if not html_file_path.exists(): + return None + + try: + with open(html_file_path, "r", encoding="utf-8") as f: + # Read only the first few lines to find the version comment + for _ in range(5): # Check first 5 lines + line = f.readline() + if not line: + break + # Look for comment like: + if "' + start = line.find("v") + 1 + end = line.find(" -->") + if start > 0 and end > start: + return line[start:end] + except (IOError, UnicodeDecodeError): + pass + + return None + + +class HtmlRenderer(Renderer): + """HTML renderer for Claude Code transcripts.""" + + def _format_message_content(self, message: TemplateMessage) -> str: + """Format structured content to HTML for a single message. + + Args: + message: TemplateMessage with content model to format + + Returns: + HTML string for the message content, or empty string if no content + """ + if message.content is None: + return "" + + # Dispatch to appropriate formatter based on content type + if isinstance(message.content, SystemContent): + return format_system_content(message.content) + elif isinstance(message.content, HookSummaryContent): + return format_hook_summary_content(message.content) + elif isinstance(message.content, SessionHeaderContent): + return format_session_header_content(message.content) + elif isinstance(message.content, DedupNoticeContent): + return format_dedup_notice_content(message.content) + elif isinstance(message.content, SlashCommandContent): + return format_slash_command_content(message.content) + elif isinstance(message.content, CommandOutputContent): + return format_command_output_content(message.content) + elif isinstance(message.content, BashInputContent): + return format_bash_input_content(message.content) + elif isinstance(message.content, BashOutputContent): + return format_bash_output_content(message.content) + elif isinstance(message.content, ThinkingContentModel): + return format_thinking_content(message.content, line_threshold=10) + elif isinstance(message.content, AssistantTextContent): + return format_assistant_text_content(message.content) + elif isinstance(message.content, ImageContent): + return format_image_content(message.content) + elif isinstance(message.content, ToolUseContent): + return format_tool_use_content(message.content) + elif isinstance(message.content, ToolResultContentModel): + # Create ToolResultContent from the model for formatting + tool_result = ToolResultContent( + type="tool_result", + tool_use_id=message.content.tool_use_id, + content=message.content.content, + is_error=message.content.is_error, + ) + return format_tool_result_content( + tool_result, + message.content.file_path, + message.content.tool_name, + ) + # User message content types + elif isinstance(message.content, CompactedSummaryContent): + return format_compacted_summary_content(message.content) + elif isinstance(message.content, UserMemoryContent): + return format_user_memory_content(message.content) + elif isinstance(message.content, UserTextContent): + # Check if this is a slash command expanded prompt (via modifiers) + if message.modifiers and message.modifiers.is_slash_command: + # Slash command expanded prompts are markdown (LLM-generated) + from .utils import render_markdown_collapsible + + return render_markdown_collapsible( + message.content.text, + "slash-command-content", + line_threshold=20, + preview_line_count=5, + ) + else: + return format_user_text_model_content(message.content) + elif isinstance(message.content, UnknownContent): + return format_unknown_content(message.content) + # Future content types will be added here as they are migrated + return "" + + def _flatten_preorder( + self, roots: List[TemplateMessage] + ) -> List[Tuple[TemplateMessage, str]]: + """Flatten message tree via pre-order traversal, formatting each message. + + Traverses the tree depth-first (pre-order), formats each message's + content to HTML, and builds a flat list of (message, html) pairs. + + Args: + roots: Root messages (typically session headers) with children populated + + Returns: + Flat list of (message, html_content) tuples in pre-order + """ + flat: List[Tuple[TemplateMessage, str]] = [] + + def visit(msg: TemplateMessage) -> None: + html = self._format_message_content(msg) + flat.append((msg, html)) + for child in msg.children: + visit(child) + + for root in roots: + visit(root) + + return flat + + def generate( + self, + messages: List[TranscriptEntry], + title: Optional[str] = None, + combined_transcript_link: Optional[str] = None, + ) -> str: + """Generate HTML from transcript messages.""" + import time + + t_start = time.time() + + if not title: + title = "Claude Transcript" + + # Get root messages (tree) and session navigation from format-neutral renderer + root_messages, session_nav = generate_template_messages(messages) + + # Flatten tree via pre-order traversal, formatting content along the way + with log_timing("Content formatting (pre-order)", t_start): + template_messages = self._flatten_preorder(root_messages) + + # Render template + with log_timing("Template environment setup", t_start): + env = get_template_environment() + template = env.get_template("transcript.html") + + with log_timing( + lambda: f"Template rendering ({len(html_output)} chars)", t_start + ): + html_output = str( + template.render( + title=title, + messages=template_messages, + sessions=session_nav, + combined_transcript_link=combined_transcript_link, + library_version=get_library_version(), + css_class_from_message=css_class_from_message, + get_message_emoji=get_message_emoji, + ) + ) + + return html_output + + def generate_session( + self, + messages: List[TranscriptEntry], + session_id: str, + title: Optional[str] = None, + cache_manager: Optional["CacheManager"] = None, + ) -> str: + """Generate HTML for a single session.""" + # Filter messages for this session (SummaryTranscriptEntry.sessionId is always None) + session_messages = [msg for msg in messages if msg.sessionId == session_id] + + # Get combined transcript link if cache manager is available + combined_link = None + if cache_manager is not None: + try: + project_cache = cache_manager.get_cached_project_data() + if project_cache and project_cache.sessions: + combined_link = "combined_transcripts.html" + except Exception: + pass + + return self.generate( + session_messages, + title or f"Session {session_id[:8]}", + combined_transcript_link=combined_link, + ) + + def generate_projects_index( + self, + project_summaries: List[Dict[str, Any]], + from_date: Optional[str] = None, + to_date: Optional[str] = None, + ) -> str: + """Generate an HTML projects index page.""" + title = title_for_projects_index(project_summaries, from_date, to_date) + template_projects, template_summary = prepare_projects_index(project_summaries) + + env = get_template_environment() + template = env.get_template("index.html") + return str( + template.render( + title=title, + projects=template_projects, + summary=template_summary, + library_version=get_library_version(), + ) + ) + + def is_outdated(self, file_path: Path) -> bool: + """Check if an HTML file is outdated based on version. + + Returns: + True if the file should be regenerated (missing version, + different version, or file doesn't exist). + False if the file is current. + """ + html_version = check_html_version(file_path) + current_version = get_library_version() + # If no version found or different version, it's outdated + return html_version != current_version + + +# -- Convenience Functions ---------------------------------------------------- + + +def generate_html( + messages: List[TranscriptEntry], + title: Optional[str] = None, + combined_transcript_link: Optional[str] = None, +) -> str: + """Generate HTML from transcript messages using Jinja2 templates. + + This is a convenience function that delegates to HtmlRenderer.generate. + """ + return HtmlRenderer().generate(messages, title, combined_transcript_link) + + +def generate_session_html( + messages: List[TranscriptEntry], + session_id: str, + title: Optional[str] = None, + cache_manager: Optional["CacheManager"] = None, +) -> str: + """Generate HTML for a single session using Jinja2 templates.""" + return HtmlRenderer().generate_session(messages, session_id, title, cache_manager) + + +def generate_projects_index_html( + project_summaries: List[Dict[str, Any]], + from_date: Optional[str] = None, + to_date: Optional[str] = None, +) -> str: + """Generate an index HTML page listing all projects using Jinja2 templates. + + This is a convenience function that delegates to HtmlRenderer.generate_projects_index. + """ + return HtmlRenderer().generate_projects_index(project_summaries, from_date, to_date) diff --git a/claude_code_log/html/renderer_code.py b/claude_code_log/html/renderer_code.py new file mode 100644 index 00000000..ab93f039 --- /dev/null +++ b/claude_code_log/html/renderer_code.py @@ -0,0 +1,330 @@ +#!/usr/bin/env python3 +"""Code rendering utilities for syntax highlighting and diffs. + +This module provides utilities for rendering source code with syntax highlighting +(using Pygments) and rendering diffs with intra-line highlighting. +""" + +import difflib +import fnmatch +import html +import os +import re +from typing import Callable, List, Optional + +from pygments import highlight # type: ignore[reportUnknownVariableType] +from pygments.lexers import TextLexer, get_lexer_by_name, get_all_lexers # type: ignore[reportUnknownVariableType] +from pygments.formatters import HtmlFormatter # type: ignore[reportUnknownVariableType] +from pygments.util import ClassNotFound # type: ignore[reportUnknownVariableType] + +from ..renderer_timings import timing_stat + + +def _escape_html(text: str) -> str: + """Escape HTML special characters in text. + + Also normalizes line endings (CRLF -> LF) to prevent double spacing in
 blocks.
+    """
+    normalized = text.replace("\r\n", "\n").replace("\r", "\n")
+    return html.escape(normalized)
+
+
+# Cache for Pygments lexer pattern matching
+_pattern_cache: Optional[dict[str, str]] = None
+_extension_cache: Optional[dict[str, str]] = None
+
+
+def _init_lexer_caches() -> tuple[dict[str, str], dict[str, str]]:
+    """Initialize lexer pattern and extension caches.
+
+    Returns:
+        Tuple of (pattern_cache, extension_cache)
+    """
+    global _pattern_cache, _extension_cache
+
+    if _pattern_cache is not None and _extension_cache is not None:
+        return _pattern_cache, _extension_cache
+
+    pattern_cache: dict[str, str] = {}
+    extension_cache: dict[str, str] = {}
+
+    # Use public API: get_all_lexers() returns (name, aliases, patterns, mimetypes) tuples
+    for name, aliases, patterns, mimetypes in get_all_lexers():  # type: ignore[reportUnknownVariableType]
+        if aliases and patterns:
+            # Use first alias as the lexer name
+            lexer_alias = aliases[0]
+            # Map each filename pattern to this lexer alias
+            for pattern in patterns:
+                pattern_lower = pattern.lower()
+                pattern_cache[pattern_lower] = lexer_alias
+                # Extract simple extension patterns (*.ext) for fast lookup
+                if (
+                    pattern_lower.startswith("*.")
+                    and "*" not in pattern_lower[2:]
+                    and "?" not in pattern_lower[2:]
+                ):
+                    ext = pattern_lower[2:]  # Remove "*."
+                    # Prefer first match for each extension
+                    if ext not in extension_cache:
+                        extension_cache[ext] = lexer_alias
+
+    _pattern_cache = pattern_cache
+    _extension_cache = extension_cache
+    return pattern_cache, extension_cache
+
+
+def highlight_code_with_pygments(
+    code: str, file_path: str, show_linenos: bool = True, linenostart: int = 1
+) -> str:
+    """Highlight code using Pygments with appropriate lexer based on file path.
+
+    Args:
+        code: The source code to highlight
+        file_path: Path to determine the appropriate lexer
+        show_linenos: Whether to show line numbers (default: True)
+        linenostart: Starting line number for display (default: 1)
+
+    Returns:
+        HTML string with syntax-highlighted code
+    """
+    # Get caches (initialized lazily)
+    pattern_cache, extension_cache = _init_lexer_caches()
+
+    # Get basename for matching (patterns are like "*.py")
+    basename = os.path.basename(file_path).lower()
+
+    try:
+        # OPTIMIZATION: Try fast extension lookup first (O(1) dict lookup)
+        lexer_alias = None
+        if "." in basename:
+            ext = basename.split(".")[-1]  # Get last extension (handles .tar.gz, etc.)
+            lexer_alias = extension_cache.get(ext)
+
+        # Fall back to pattern matching only if extension lookup failed
+        if lexer_alias is None:
+            for pattern, lex_alias in pattern_cache.items():
+                if fnmatch.fnmatch(basename, pattern):
+                    lexer_alias = lex_alias
+                    break
+
+        # Get lexer or use TextLexer as fallback
+        # Note: stripall=False preserves leading whitespace (important for code indentation)
+        if lexer_alias:
+            lexer = get_lexer_by_name(lexer_alias, stripall=False)  # type: ignore[reportUnknownVariableType]
+        else:
+            lexer = TextLexer()  # type: ignore[reportUnknownVariableType]
+    except ClassNotFound:
+        # Fall back to plain text lexer
+        lexer = TextLexer()  # type: ignore[reportUnknownVariableType]
+
+    # Create formatter with line numbers in table format
+    formatter = HtmlFormatter(  # type: ignore[reportUnknownVariableType]
+        linenos="table" if show_linenos else False,
+        cssclass="highlight",
+        wrapcode=True,
+        linenostart=linenostart,
+    )
+
+    # Highlight the code with timing if enabled
+    with timing_stat("_pygments_timings"):
+        return str(highlight(code, lexer, formatter))  # type: ignore[reportUnknownArgumentType]
+
+
+def truncate_highlighted_preview(highlighted_html: str, max_lines: int) -> str:
+    """Truncate Pygments highlighted HTML to first N lines.
+
+    HtmlFormatter(linenos="table") produces a single  with two s:
+      
LINE_NUMS
+
CODE
+ + We truncate content within each
 tag to the first max_lines lines.
+
+    Args:
+        highlighted_html: Full Pygments-highlighted HTML
+        max_lines: Maximum number of lines to include in preview
+
+    Returns:
+        Truncated HTML with same structure but fewer lines
+    """
+
+    def truncate_pre_content(match: re.Match[str]) -> str:
+        """Truncate content inside a 
 tag to max_lines."""
+        prefix, content, suffix = match.groups()
+        lines = content.split("\n")
+        truncated = "\n".join(lines[:max_lines])
+        return prefix + truncated + suffix
+
+    # Truncate linenos 
 content (line numbers separated by newlines)
+    result = re.sub(
+        r'(
)(.*?)(
)', + truncate_pre_content, + highlighted_html, + flags=re.DOTALL, + ) + + # Truncate code
 content
+    result = re.sub(
+        r'(
]*>)(.*?)(
)', + truncate_pre_content, + result, + flags=re.DOTALL, + ) + + return result + + +def render_line_diff( + old_line: str, new_line: str, escape_fn: Optional[Callable[[str], str]] = None +) -> str: + """Render a pair of changed lines with character-level highlighting. + + Args: + old_line: The original line + new_line: The new line + escape_fn: Optional HTML escape function (defaults to internal _escape_html) + + Returns: + HTML string with both lines and character-level diff highlighting + """ + if escape_fn is None: + escape_fn = _escape_html + + # Use SequenceMatcher for character-level diff + sm = difflib.SequenceMatcher(None, old_line.rstrip("\n"), new_line.rstrip("\n")) + + # Build old line with highlighting + old_parts: List[str] = [] + old_parts.append( + "
-" + ) + for tag, i1, i2, j1, j2 in sm.get_opcodes(): + chunk = old_line[i1:i2] + if tag == "equal": + old_parts.append(escape_fn(chunk)) + elif tag in ("delete", "replace"): + old_parts.append( + f"{escape_fn(chunk)}" + ) + old_parts.append("
") + + # Build new line with highlighting + new_parts: List[str] = [] + new_parts.append( + "
+" + ) + for tag, i1, i2, j1, j2 in sm.get_opcodes(): + chunk = new_line[j1:j2] + if tag == "equal": + new_parts.append(escape_fn(chunk)) + elif tag in ("insert", "replace"): + new_parts.append(f"{escape_fn(chunk)}") + new_parts.append("
") + + return "".join(old_parts) + "".join(new_parts) + + +def render_single_diff( + old_string: str, new_string: str, escape_fn: Optional[Callable[[str], str]] = None +) -> str: + """Render a single diff between old_string and new_string. + + Args: + old_string: The original content + new_string: The new content + escape_fn: Optional HTML escape function (defaults to internal _escape_html) + + Returns: + HTML string with diff view and intra-line highlighting + """ + if escape_fn is None: + escape_fn = _escape_html + + # Split into lines for diff + old_lines = old_string.splitlines(keepends=True) + new_lines = new_string.splitlines(keepends=True) + + # Generate unified diff to identify changed lines + differ = difflib.Differ() + diff: List[str] = list(differ.compare(old_lines, new_lines)) + + html_parts = ["
"] + + i = 0 + while i < len(diff): + line = diff[i] + prefix = line[0:2] + content = line[2:] + + if prefix == "- ": + # Removed line - look ahead for corresponding addition + removed_lines: List[str] = [content] + j = i + 1 + + # Collect consecutive removed lines + while j < len(diff) and diff[j].startswith("- "): + removed_lines.append(diff[j][2:]) + j += 1 + + # Skip '? ' hint lines + while j < len(diff) and diff[j].startswith("? "): + j += 1 + + # Collect consecutive added lines + added_lines: List[str] = [] + while j < len(diff) and diff[j].startswith("+ "): + added_lines.append(diff[j][2:]) + j += 1 + + # Skip '? ' hint lines + while j < len(diff) and diff[j].startswith("? "): + j += 1 + + # Generate character-level diff for paired lines + if added_lines: + for old_line, new_line in zip(removed_lines, added_lines): + html_parts.append(render_line_diff(old_line, new_line, escape_fn)) + + # Handle any unpaired lines + for old_line in removed_lines[len(added_lines) :]: + escaped = escape_fn(old_line.rstrip("\n")) + html_parts.append( + f"
-{escaped}
" + ) + + for new_line in added_lines[len(removed_lines) :]: + escaped = escape_fn(new_line.rstrip("\n")) + html_parts.append( + f"
+{escaped}
" + ) + else: + # No corresponding addition - just removed + for old_line in removed_lines: + escaped = escape_fn(old_line.rstrip("\n")) + html_parts.append( + f"
-{escaped}
" + ) + + i = j + + elif prefix == "+ ": + # Added line without corresponding removal + escaped = escape_fn(content.rstrip("\n")) + html_parts.append( + f"
+{escaped}
" + ) + i += 1 + + elif prefix == "? ": + # Skip hint lines (already processed) + i += 1 + + else: + # Unchanged line - show for context + escaped = escape_fn(content.rstrip("\n")) + html_parts.append( + f"
{escaped}
" + ) + i += 1 + + html_parts.append("
") + return "".join(html_parts) diff --git a/claude_code_log/html/system_formatters.py b/claude_code_log/html/system_formatters.py new file mode 100644 index 00000000..14182a25 --- /dev/null +++ b/claude_code_log/html/system_formatters.py @@ -0,0 +1,113 @@ +"""HTML formatters for system message content. + +This module formats SystemTranscriptEntry-derived content types to HTML. +Part of the thematic formatter organization: +- system_formatters.py: SystemContent, HookSummaryContent +- user_formatters.py: (future) user message variants +- assistant_formatters.py: (future) assistant message variants +- tool_renderers.py: tool use/result content +""" + +import html + +from .ansi_colors import convert_ansi_to_html +from ..models import ( + DedupNoticeContent, + HookSummaryContent, + SessionHeaderContent, + SystemContent, +) + + +def format_system_content(content: SystemContent) -> str: + """Format a system message with level-specific icon. + + Args: + content: SystemContent with level and text + + Returns: + HTML with icon and ANSI-converted text + """ + level_icon = {"warning": "⚠️", "error": "❌", "info": "ℹ️"}.get(content.level, "ℹ️") + html_content = convert_ansi_to_html(content.text) + return f"{level_icon} {html_content}" + + +def format_hook_summary_content(content: HookSummaryContent) -> str: + """Format a hook summary as collapsible details. + + Shows a compact summary with expandable hook commands and error output. + + Args: + content: HookSummaryContent with execution details + + Returns: + HTML with collapsible details section + """ + # Determine if this is a failure or just output + has_errors = bool(content.hook_errors) + summary_icon = "🪝" + summary_text = "Hook failed" if has_errors else "Hook output" + + # Build the command section + command_html = "" + if content.hook_infos: + command_html = '
' + for info in content.hook_infos: + # Truncate very long commands + cmd = info.command + display_cmd = cmd if len(cmd) <= 100 else cmd[:97] + "..." + command_html += f"{html.escape(display_cmd)}" + command_html += "
" + + # Build the error output section + error_html = "" + if content.hook_errors: + error_html = '
' + for err in content.hook_errors: + # Convert ANSI codes in error output + formatted_err = convert_ansi_to_html(err) + error_html += f'
{formatted_err}
' + error_html += "
" + + return f"""
+{summary_icon} {summary_text} +
+{command_html} +{error_html} +
+
""" + + +def format_session_header_content(content: SessionHeaderContent) -> str: + """Format a session header as HTML. + + Args: + content: SessionHeaderContent with title, session_id, and optional summary + + Returns: + HTML for the session header display + """ + escaped_title = html.escape(content.title) + return escaped_title + + +def format_dedup_notice_content(content: DedupNoticeContent) -> str: + """Format a deduplication notice as HTML. + + Args: + content: DedupNoticeContent with notice text + + Returns: + HTML for the dedup notice display + """ + escaped_notice = html.escape(content.notice_text) + return f"

{escaped_notice}

" + + +__all__ = [ + "format_system_content", + "format_hook_summary_content", + "format_session_header_content", + "format_dedup_notice_content", +] diff --git a/claude_code_log/templates/components/edit_diff_styles.css b/claude_code_log/html/templates/components/edit_diff_styles.css similarity index 100% rename from claude_code_log/templates/components/edit_diff_styles.css rename to claude_code_log/html/templates/components/edit_diff_styles.css diff --git a/claude_code_log/templates/components/filter_styles.css b/claude_code_log/html/templates/components/filter_styles.css similarity index 100% rename from claude_code_log/templates/components/filter_styles.css rename to claude_code_log/html/templates/components/filter_styles.css diff --git a/claude_code_log/templates/components/global_styles.css b/claude_code_log/html/templates/components/global_styles.css similarity index 100% rename from claude_code_log/templates/components/global_styles.css rename to claude_code_log/html/templates/components/global_styles.css diff --git a/claude_code_log/templates/components/message_styles.css b/claude_code_log/html/templates/components/message_styles.css similarity index 90% rename from claude_code_log/templates/components/message_styles.css rename to claude_code_log/html/templates/components/message_styles.css index 2bc67bf0..e7a4f30d 100644 --- a/claude_code_log/templates/components/message_styles.css +++ b/claude_code_log/html/templates/components/message_styles.css @@ -39,14 +39,14 @@ font-weight: 500; padding: 0.4em; transition: all 0.2s ease; - border-bottom: 2px solid; + border-bottom: 2px solid transparent; background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); } -/* Double-line effect when folded */ +/* Show border only when folded (content is hidden) */ .fold-bar-section.folded { - border-bottom-style: double; - border-bottom-width: 4px; + border-bottom-style: solid; + border-bottom-width: 2px; } .fold-bar-section:hover { @@ -85,92 +85,86 @@ font-size: 0.9em; } -/* Border colors matching message types */ -.fold-bar[data-border-color="user"] .fold-bar-section, -.fold-bar[data-border-color="user compacted"] .fold-bar-section, -.fold-bar[data-border-color="user sidechain"] .fold-bar-section, -.fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section { +/* Border colors matching message types - only shown when folded */ +.fold-bar[data-border-color="user"] .fold-bar-section.folded, +.fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, +.fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, +.fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } -.fold-bar[data-border-color="user slash-command"] .fold-bar-section, -.fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section { +.fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, +.fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, +.fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } -.fold-bar[data-border-color="assistant"] .fold-bar-section, -.fold-bar[data-border-color="assistant sidechain"] .fold-bar-section { +.fold-bar[data-border-color="assistant"] .fold-bar-section.folded, +.fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-color); } -.fold-bar[data-border-color="system"] .fold-bar-section, -.fold-bar[data-border-color="system command-output"] .fold-bar-section { +.fold-bar[data-border-color="system"] .fold-bar-section.folded { border-bottom-color: var(--system-color); } -.fold-bar[data-border-color="system-warning"] .fold-bar-section { +.fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } -.fold-bar[data-border-color="system-error"] .fold-bar-section { +.fold-bar[data-border-color="system-error"] .fold-bar-section.folded { border-bottom-color: var(--system-error-color); } -.fold-bar[data-border-color="system-info"] .fold-bar-section { +.fold-bar[data-border-color="system-info"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } -.fold-bar[data-border-color="tool_use"] .fold-bar-section, -.fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section { +.fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { border-bottom-color: var(--tool-use-color); } -.fold-bar[data-border-color="tool_result"] .fold-bar-section, -.fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section { +.fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { border-bottom-color: var(--success-dimmed); } -.fold-bar[data-border-color="tool_result error"] .fold-bar-section, -.fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section { +.fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { border-bottom-color: var(--error-dimmed); } -.fold-bar[data-border-color="thinking"] .fold-bar-section, -.fold-bar[data-border-color="thinking sidechain"] .fold-bar-section { +.fold-bar[data-border-color="thinking"] .fold-bar-section.folded, +.fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-dimmed); } -.fold-bar[data-border-color="image"] .fold-bar-section, -.fold-bar[data-border-color="image sidechain"] .fold-bar-section { +.fold-bar[data-border-color="image"] .fold-bar-section.folded, +.fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } -.fold-bar[data-border-color="unknown"] .fold-bar-section, -.fold-bar[data-border-color="unknown sidechain"] .fold-bar-section { +.fold-bar[data-border-color="unknown"] .fold-bar-section.folded, +.fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { border-bottom-color: var(--neutral-dimmed); } -.fold-bar[data-border-color="bash-input"] .fold-bar-section { +.fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } -.fold-bar[data-border-color="bash-output"] .fold-bar-section { +.fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } -.fold-bar[data-border-color="session-header"] .fold-bar-section { +.fold-bar[data-border-color="session-header"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } -/* Sidechain (sub-assistant) fold-bar styling */ -.sidechain .fold-bar-section { - border-bottom-style: dashed; - border-bottom-width: 2px; -} - +/* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ .sidechain .fold-bar-section.folded { border-bottom-style: dashed; - border-bottom-width: 4px; } /* ======================================== @@ -261,17 +255,14 @@ } /* Paired message styling */ -.message.paired-message { +.message.pair_first { margin-bottom: 0; -} - -.message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } -.message.paired-message.pair_last { +.message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -279,8 +270,9 @@ border-top: 1px solid #00000011; } -.message.paired-message.pair_middle { +.message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -313,7 +305,7 @@ } /* Dimmed assistant when paired with thinking */ -.assistant.paired-message { +.assistant.pair_last { border-left-color: var(--assistant-dimmed); } @@ -547,7 +539,7 @@ } /* Full purple when thinking is paired (as pair_first) */ -.thinking.paired-message.pair_first { +.thinking.pair_first { border-left-color: var(--assistant-color); } @@ -642,6 +634,20 @@ font-family: var(--font-ui); } +.markdown table { + border-collapse: collapse; +} + +.markdown td { + padding: .5em; + border-top: 1px solid #bbb; +} + +.markdown th { + padding: .5em; + border-bottom: 1px solid #888; +} + /* Code block styling */ pre > code { display: block; diff --git a/claude_code_log/templates/components/project_card_styles.css b/claude_code_log/html/templates/components/project_card_styles.css similarity index 100% rename from claude_code_log/templates/components/project_card_styles.css rename to claude_code_log/html/templates/components/project_card_styles.css diff --git a/claude_code_log/templates/components/pygments_styles.css b/claude_code_log/html/templates/components/pygments_styles.css similarity index 100% rename from claude_code_log/templates/components/pygments_styles.css rename to claude_code_log/html/templates/components/pygments_styles.css diff --git a/claude_code_log/templates/components/search.html b/claude_code_log/html/templates/components/search.html similarity index 100% rename from claude_code_log/templates/components/search.html rename to claude_code_log/html/templates/components/search.html diff --git a/claude_code_log/templates/components/search_inline.html b/claude_code_log/html/templates/components/search_inline.html similarity index 100% rename from claude_code_log/templates/components/search_inline.html rename to claude_code_log/html/templates/components/search_inline.html diff --git a/claude_code_log/templates/components/search_inline_script.html b/claude_code_log/html/templates/components/search_inline_script.html similarity index 100% rename from claude_code_log/templates/components/search_inline_script.html rename to claude_code_log/html/templates/components/search_inline_script.html diff --git a/claude_code_log/templates/components/search_results_panel.html b/claude_code_log/html/templates/components/search_results_panel.html similarity index 100% rename from claude_code_log/templates/components/search_results_panel.html rename to claude_code_log/html/templates/components/search_results_panel.html diff --git a/claude_code_log/templates/components/search_styles.css b/claude_code_log/html/templates/components/search_styles.css similarity index 100% rename from claude_code_log/templates/components/search_styles.css rename to claude_code_log/html/templates/components/search_styles.css diff --git a/claude_code_log/templates/components/session_nav.html b/claude_code_log/html/templates/components/session_nav.html similarity index 100% rename from claude_code_log/templates/components/session_nav.html rename to claude_code_log/html/templates/components/session_nav.html diff --git a/claude_code_log/templates/components/session_nav_styles.css b/claude_code_log/html/templates/components/session_nav_styles.css similarity index 100% rename from claude_code_log/templates/components/session_nav_styles.css rename to claude_code_log/html/templates/components/session_nav_styles.css diff --git a/claude_code_log/templates/components/timeline.html b/claude_code_log/html/templates/components/timeline.html similarity index 100% rename from claude_code_log/templates/components/timeline.html rename to claude_code_log/html/templates/components/timeline.html diff --git a/claude_code_log/templates/components/timeline_styles.css b/claude_code_log/html/templates/components/timeline_styles.css similarity index 100% rename from claude_code_log/templates/components/timeline_styles.css rename to claude_code_log/html/templates/components/timeline_styles.css diff --git a/claude_code_log/templates/components/timezone_converter.js b/claude_code_log/html/templates/components/timezone_converter.js similarity index 100% rename from claude_code_log/templates/components/timezone_converter.js rename to claude_code_log/html/templates/components/timezone_converter.js diff --git a/claude_code_log/templates/components/todo_styles.css b/claude_code_log/html/templates/components/todo_styles.css similarity index 100% rename from claude_code_log/templates/components/todo_styles.css rename to claude_code_log/html/templates/components/todo_styles.css diff --git a/claude_code_log/templates/index.html b/claude_code_log/html/templates/index.html similarity index 100% rename from claude_code_log/templates/index.html rename to claude_code_log/html/templates/index.html diff --git a/claude_code_log/templates/transcript.html b/claude_code_log/html/templates/transcript.html similarity index 96% rename from claude_code_log/templates/transcript.html rename to claude_code_log/html/templates/transcript.html index e01e2300..8a256853 100644 --- a/claude_code_log/templates/transcript.html +++ b/claude_code_log/html/templates/transcript.html @@ -69,11 +69,11 @@

🔍 Search & Filter

{{ render_session_nav(sessions, "toc") }} {% endif %} - {% for message in messages %} + {% for message, html_content in messages %} {% if message.is_session_header %}
-
Session: {{ message.content_html }}
+
Session: {{ html_content }}
{% if message.session_subtitle %}
{{ message.session_subtitle }} ({{message.session_id}})
@@ -101,19 +101,14 @@

🔍 Search & Filter

{% endif %}
{% else %} - {% set markdown = message.css_class in ['assistant', 'thinking', 'sidechain'] or (message.css_class and 'compacted' in message.css_class) %} -
+ {%- set msg_css_class = css_class_from_message(message) %} + {% set markdown = message.type in ['assistant', 'thinking'] or message.modifiers.is_compacted %} +
+ {% set msg_emoji = get_message_emoji(message) -%} {% if message.message_title %}{% if message.message_title == 'Memory' %}💭 {% - elif message.css_class.startswith('user') %}🤷 {% - elif message.css_class.startswith('assistant') %}🤖 {% - elif message.css_class == 'system' %}⚙️ {% - elif message.css_class.startswith('tool_use') and not starts_with_emoji(message.message_title) %}🛠️ {% - elif message.css_class == 'tool_result error' %}🚨 {% - elif message.css_class.startswith('tool_result') %}🧰 {% - elif message.css_class.startswith('thinking') %}💭 {% - elif message.css_class == 'image' %}🖼️ {% endif %}{{ message.message_title | safe }}{% endif %} + elif msg_emoji and (message.type != 'tool_use' or not starts_with_emoji(message.message_title)) %}{{ msg_emoji }} {% endif %}{{ message.message_title | safe }}{% endif %}
{{ message.formatted_timestamp }} @@ -123,9 +118,9 @@

🔍 Search & Filter

{% endif %}
-
{{ message.content_html | safe }}
+
{{ html_content | safe }}
{% if message.has_children %} -
+
{% if message.immediate_children_count == message.total_descendants_count %} {# Same count = only one level, show single full-width button #}
diff --git a/claude_code_log/html/tool_formatters.py b/claude_code_log/html/tool_formatters.py new file mode 100644 index 00000000..35053562 --- /dev/null +++ b/claude_code_log/html/tool_formatters.py @@ -0,0 +1,950 @@ +"""HTML rendering functions for tool use and tool result content. + +This module contains all HTML formatters for specific tools: +- AskUserQuestion tool (input + result) +- ExitPlanMode tool (input + result) +- TodoWrite tool +- Read/Write/Edit/Multiedit tools +- Bash tool +- Task tool +- Generic parameter table rendering +- Tool use content dispatcher + +These formatters take tool-specific input/output data and generate +HTML for display in transcripts. +""" + +import json +import re +from typing import Any, Dict, List, Optional, cast + +from .utils import ( + escape_html, + render_file_content_collapsible, + render_markdown_collapsible, +) +from ..models import ( + AskUserQuestionInput, + AskUserQuestionItem, + BashInput, + EditInput, + EditOutput, + ExitPlanModeInput, + MultiEditInput, + ReadInput, + ReadOutput, + TaskInput, + TodoWriteInput, + ToolResultContent, + ToolUseContent, + WriteInput, +) +from .ansi_colors import convert_ansi_to_html +from .renderer_code import render_single_diff + + +# -- AskUserQuestion Tool ----------------------------------------------------- + + +def _render_question_item(q: AskUserQuestionItem) -> str: + """Render a single question item to HTML.""" + html_parts: List[str] = ['
'] + + # Header (if present) + if q.header: + escaped_header = escape_html(q.header) + html_parts.append(f'
{escaped_header}
') + + # Question text with icon + question_text = escape_html(q.question) + html_parts.append(f'
❓ {question_text}
') + + # Options (if present) + if q.options: + select_hint = "(select multiple)" if q.multiSelect else "(select one)" + html_parts.append(f'
{select_hint}
') + html_parts.append('
    ') + for opt in q.options: + label = escape_html(opt.label) + if opt.description: + desc_html = f' — {escape_html(opt.description)}' + else: + desc_html = "" + html_parts.append( + f'
  • {label}{desc_html}
  • ' + ) + html_parts.append("
") + + html_parts.append("
") # Close question-block + return "".join(html_parts) + + +def format_askuserquestion_content(ask_input: AskUserQuestionInput) -> str: + """Format AskUserQuestion tool use content with prominent question display. + + Args: + ask_input: Typed AskUserQuestionInput with questions list and/or single question. + + Handles multiple questions in a single tool use, each with optional header, + options (with label and description), and multiSelect flag. + """ + # Build list of questions from both formats + questions: List[AskUserQuestionItem] = list(ask_input.questions) + + # Handle single question format (legacy) + if not questions and ask_input.question: + questions.append(AskUserQuestionItem(question=ask_input.question)) + + if not questions: + return '
No question
' + + # Build HTML for all questions + html_parts: List[str] = ['
'] + for q in questions: + html_parts.append(_render_question_item(q)) + html_parts.append("
") # Close askuserquestion-content + return "".join(html_parts) + + +def format_askuserquestion_result(content: str) -> str: + """Format AskUserQuestion tool result with styled question/answer pairs. + + Parses the result format: + 'User has answered your questions: "Q1"="A1", "Q2"="A2". You can now continue...' + + Returns HTML with styled Q&A blocks matching the input styling. + """ + # Check if this is a successful answer + if not content.startswith("User has answered your question"): + # Return as-is for errors or unexpected format + return "" + + # Extract the Q&A portion between the colon and the final sentence + # Pattern: 'User has answered your questions: "Q"="A", "Q"="A". You can now...' + match = re.match( + r"User has answered your questions?: (.+)\. You can now continue", + content, + re.DOTALL, + ) + if not match: + return "" + + qa_portion = match.group(1) + + # Parse "Question"="Answer" pairs + # Pattern: "question text"="answer text" + qa_pattern = re.compile(r'"([^"]+)"="([^"]+)"') + pairs = qa_pattern.findall(qa_portion) + + if not pairs: + return "" + + # Build styled HTML + html_parts: List[str] = [ + '
' + ] + + for question, answer in pairs: + escaped_q = escape_html(question) + escaped_a = escape_html(answer) + html_parts.append('
') + html_parts.append(f'
❓ {escaped_q}
') + html_parts.append(f'
✅ {escaped_a}
') + html_parts.append("
") + + html_parts.append("
") + return "".join(html_parts) + + +# -- ExitPlanMode Tool -------------------------------------------------------- + + +def format_exitplanmode_content(exit_input: ExitPlanModeInput) -> str: + """Format ExitPlanMode tool use content with collapsible plan markdown. + + Args: + exit_input: Typed ExitPlanModeInput with plan content. + + Renders the plan markdown in a collapsible section, similar to Task tool results. + """ + if not exit_input.plan: + return '
No plan
' + + return render_markdown_collapsible(exit_input.plan, "plan-content") + + +def format_exitplanmode_result(content: str) -> str: + """Format ExitPlanMode tool result, truncating the redundant plan echo. + + When a plan is approved, the result contains: + 1. A confirmation message + 2. Path to saved plan file + 3. "## Approved Plan:" followed by full plan text (redundant) + + We truncate everything after "## Approved Plan:" to avoid duplication. + For error results (plan not approved), we keep the full content. + """ + # Check if this is a successful approval + if "User has approved your plan" in content: + # Truncate at "## Approved Plan:" + marker = "## Approved Plan:" + marker_pos = content.find(marker) + if marker_pos > 0: + # Keep everything before the marker, strip trailing whitespace + return content[:marker_pos].rstrip() + + # For errors or other cases, return as-is + return content + + +# -- TodoWrite Tool ----------------------------------------------------------- + + +def format_todowrite_content(todo_input: TodoWriteInput) -> str: + """Format TodoWrite tool use content as a todo list. + + Args: + todo_input: Typed TodoWriteInput with list of todo items. + """ + if not todo_input.todos: + return """ +
+

No todos found

+
+ """ + + # Status emojis + status_emojis = {"pending": "⏳", "in_progress": "🔄", "completed": "✅"} + + # Build todo list HTML - todos are typed TodoWriteItem objects + todo_items: List[str] = [] + for todo in todo_input.todos: + todo_id = escape_html(todo.id) if todo.id else "" + content = escape_html(todo.content) if todo.content else "" + status = todo.status or "pending" + priority = todo.priority or "medium" + status_emoji = status_emojis.get(status, "⏳") + + # CSS class for styling + item_class = f"todo-item {status} {priority}" + + id_html = f'#{todo_id}' if todo.id else "" + todo_items.append(f""" +
+ {status_emoji} + {content} + {id_html} +
+ """) + + todos_html = "".join(todo_items) + + return f""" +
+ {todos_html} +
+ """ + + +# -- File Tools (Read/Write) -------------------------------------------------- + + +def format_read_tool_content(read_input: ReadInput) -> str: # noqa: ARG001 + """Format Read tool use content showing file path. + + Args: + read_input: Typed ReadInput with file_path, offset, and limit. + + Note: File path is now shown in the header, so we skip content here. + """ + # File path is now shown in header, so no content needed + # Don't show offset/limit parameters as they'll be visible in the result + return "" + + +# -- Tool Result Parsing (cat-n format) --------------------------------------- + + +def _parse_cat_n_snippet( + lines: List[str], start_idx: int = 0 +) -> Optional[tuple[str, Optional[str], int]]: + """Parse cat-n formatted snippet from lines. + + Args: + lines: List of lines to parse + start_idx: Index to start parsing from (default: 0) + + Returns: + Tuple of (code_content, system_reminder, line_offset) or None if not parseable + """ + code_lines: List[str] = [] + system_reminder: Optional[str] = None + in_system_reminder = False + line_offset = 1 # Default offset + + for line in lines[start_idx:]: + # Check for system-reminder start + if "" in line: + in_system_reminder = True + system_reminder = "" + continue + + # Check for system-reminder end + if "" in line: + in_system_reminder = False + continue + + # If in system reminder, accumulate reminder text + if in_system_reminder: + if system_reminder is not None: + system_reminder += line + "\n" + continue + + # Parse regular code line (format: " 123→content") + match = re.match(r"\s+(\d+)→(.*)$", line) + if match: + line_num = int(match.group(1)) + # Capture the first line number as offset + if not code_lines: + line_offset = line_num + code_lines.append(match.group(2)) + elif line.strip() == "": # Allow empty lines between cat-n lines + continue + else: # Non-matching non-empty line, stop parsing + break + + if not code_lines: + return None + + # Join code lines and trim trailing reminder text + code_content = "\n".join(code_lines) + if system_reminder: + system_reminder = system_reminder.strip() + + return (code_content, system_reminder, line_offset) + + +def parse_read_output(content: str, file_path: str) -> Optional[ReadOutput]: + """Parse Read tool result into structured content. + + Args: + content: Raw tool result string + file_path: Path to the file that was read + + Returns: + ReadOutput if parsing succeeds, None otherwise + """ + # Check if content matches the cat-n format pattern (line_number → content) + lines = content.split("\n") + if not lines or not re.match(r"\s+\d+→", lines[0]): + return None + + result = _parse_cat_n_snippet(lines) + if result is None: + return None + + code_content, system_reminder, line_offset = result + num_lines = len(code_content.split("\n")) + + return ReadOutput( + file_path=file_path, + content=code_content, + start_line=line_offset, + num_lines=num_lines, + total_lines=num_lines, # We don't know total from result + is_truncated=False, # Can't determine from result + system_reminder=system_reminder, + ) + + +def format_read_tool_result(output: ReadOutput) -> str: + """Format Read tool result as HTML with syntax highlighting. + + Args: + output: Parsed ReadOutput + + Returns: + HTML string with syntax-highlighted, collapsible file content + """ + # Build system reminder suffix if present + suffix_html = "" + if output.system_reminder: + escaped_reminder = escape_html(output.system_reminder) + suffix_html = ( + f"
🤖 {escaped_reminder}
" + ) + + return render_file_content_collapsible( + output.content, + output.file_path, + "read-tool-result", + linenostart=output.start_line, + suffix_html=suffix_html, + ) + + +def parse_edit_output(content: str, file_path: str) -> Optional[EditOutput]: + """Parse Edit tool result into structured content. + + Edit tool results typically have format: + "The file ... has been updated. Here's the result of running `cat -n` on a snippet..." + followed by cat-n formatted lines. + + Args: + content: Raw tool result string + file_path: Path to the file that was edited + + Returns: + EditOutput if parsing succeeds, None otherwise + """ + # Look for the cat-n snippet after the preamble + # Pattern: look for first line that matches the cat-n format + lines = content.split("\n") + code_start_idx = None + + for i, line in enumerate(lines): + if re.match(r"\s+\d+→", line): + code_start_idx = i + break + + if code_start_idx is None: + return None + + result = _parse_cat_n_snippet(lines, code_start_idx) + if result is None: + return None + + code_content, _system_reminder, line_offset = result + # Edit tool doesn't use system_reminder + + return EditOutput( + file_path=file_path, + success=True, # If we got here, edit succeeded + diffs=[], # We don't have diff info from result + message=code_content, + start_line=line_offset, + ) + + +def format_edit_tool_result(output: EditOutput) -> str: + """Format Edit tool result as HTML with syntax highlighting. + + Args: + output: Parsed EditOutput + + Returns: + HTML string with syntax-highlighted, collapsible file content + """ + return render_file_content_collapsible( + output.message, # message contains the code snippet + output.file_path, + "edit-tool-result", + linenostart=output.start_line, + ) + + +def format_write_tool_content(write_input: WriteInput) -> str: + """Format Write tool use content with Pygments syntax highlighting. + + Args: + write_input: Typed WriteInput with file_path and content. + Note: File path is now shown in the header, so we skip it here. + """ + return render_file_content_collapsible( + write_input.content, write_input.file_path, "write-tool-content" + ) + + +# -- Edit Tools (Edit/Multiedit) ---------------------------------------------- + + +def format_edit_tool_content(edit_input: EditInput) -> str: + """Format Edit tool use content as a diff view with intra-line highlighting. + + Args: + edit_input: Typed EditInput with old_string, new_string, replace_all. + Note: File path is now shown in the header, so we skip it here. + """ + html_parts = ["
"] + + if edit_input.replace_all: + html_parts.append( + "
🔄 Replace all occurrences
" + ) + + # Use shared diff rendering helper + html_parts.append(render_single_diff(edit_input.old_string, edit_input.new_string)) + html_parts.append("
") + + return "".join(html_parts) + + +def format_multiedit_tool_content(multiedit_input: MultiEditInput) -> str: + """Format Multiedit tool use content showing multiple diffs. + + Args: + multiedit_input: Typed MultiEditInput with file_path and list of edits. + """ + escaped_path = escape_html(multiedit_input.file_path) + + html_parts = ["
"] + + # File path header + html_parts.append(f"
📝 {escaped_path}
") + html_parts.append( + f"
Applying {len(multiedit_input.edits)} edits
" + ) + + # Render each edit as a diff - edits are typed EditItem objects + for idx, edit in enumerate(multiedit_input.edits, 1): + html_parts.append( + f"
Edit #{idx}
" + ) + html_parts.append(render_single_diff(edit.old_string, edit.new_string)) + html_parts.append("
") + + html_parts.append("
") + return "".join(html_parts) + + +# -- Bash Tool ---------------------------------------------------------------- + + +def format_bash_tool_content(bash_input: BashInput) -> str: + """Format Bash tool use content in VS Code extension style. + + Args: + bash_input: Typed BashInput with command, description, timeout, etc. + Note: Description is now shown in the header, so we skip it here. + """ + escaped_command = escape_html(bash_input.command) + + html_parts = ["
"] + html_parts.append(f"
{escaped_command}
") + html_parts.append("
") + + return "".join(html_parts) + + +# -- Task Tool ---------------------------------------------------------------- + + +def format_task_tool_content(task_input: TaskInput) -> str: + """Format Task tool content with markdown-rendered prompt. + + Args: + task_input: Typed TaskInput with prompt, subagent_type, etc. + + Task tool spawns sub-agents. We render the prompt as the main content. + The sidechain user message (which would duplicate this prompt) is skipped. + + For long prompts (>20 lines), the content is made collapsible with a + preview of the first few lines to keep the transcript vertically compact. + """ + return render_markdown_collapsible(task_input.prompt, "task-prompt") + + +# -- Tool Summary and Title --------------------------------------------------- + + +def get_tool_summary(tool_use: ToolUseContent) -> Optional[str]: + """Extract a one-line summary from tool parameters for display in header. + + Returns a brief description or filename that can be shown in the message header + to save vertical space. Uses parsed_input for type-safe access. + """ + parsed = tool_use.parsed_input + + if isinstance(parsed, BashInput): + return parsed.description + + if isinstance(parsed, (ReadInput, EditInput, WriteInput)): + return parsed.file_path if parsed.file_path else None + + if isinstance(parsed, TaskInput): + return parsed.description if parsed.description else None + + # No summary for other tools + return None + + +def format_tool_use_title(tool_use: ToolUseContent) -> str: + """Generate the title HTML for a tool use message. + + Returns HTML string for the message header, with tool name, icon, + and optional summary/metadata. Uses parsed_input for type-safe access. + """ + escaped_name = escape_html(tool_use.name) + parsed = tool_use.parsed_input + summary = get_tool_summary(tool_use) + + # TodoWrite: fixed title + if tool_use.name == "TodoWrite": + return "📝 Todo List" + + # Task: show subagent_type and description + if isinstance(parsed, TaskInput): + escaped_subagent = ( + escape_html(parsed.subagent_type) if parsed.subagent_type else "" + ) + description = parsed.description + + if description and parsed.subagent_type: + escaped_desc = escape_html(description) + return f"🔧 {escaped_name} {escaped_desc} ({escaped_subagent})" + elif description: + escaped_desc = escape_html(description) + return f"🔧 {escaped_name} {escaped_desc}" + elif parsed.subagent_type: + return f"🔧 {escaped_name} ({escaped_subagent})" + else: + return f"🔧 {escaped_name}" + + # Edit/Write: use 📝 icon + if isinstance(parsed, (EditInput, WriteInput)): + if summary: + escaped_summary = escape_html(summary) + return ( + f"📝 {escaped_name} {escaped_summary}" + ) + else: + return f"📝 {escaped_name}" + + # Read: use 📄 icon + if isinstance(parsed, ReadInput): + if summary: + escaped_summary = escape_html(summary) + return ( + f"📄 {escaped_name} {escaped_summary}" + ) + else: + return f"📄 {escaped_name}" + + # Other tools: append summary if present + if summary: + escaped_summary = escape_html(summary) + return f"{escaped_name} {escaped_summary}" + + return escaped_name + + +# -- Generic Parameter Table -------------------------------------------------- + + +def render_params_table(params: Dict[str, Any]) -> str: + """Render a dictionary of parameters as an HTML table. + + Reusable for tool parameters, diagnostic objects, etc. + """ + if not params: + return "
No parameters
" + + html_parts = [""] + + for key, value in params.items(): + escaped_key = escape_html(str(key)) + + # If value is structured (dict/list), render as JSON + if isinstance(value, (dict, list)): + try: + formatted_value = json.dumps(value, indent=2, ensure_ascii=False) # type: ignore[arg-type] + escaped_value = escape_html(formatted_value) + + # Make long structured values collapsible + if len(formatted_value) > 200: + preview = escape_html(formatted_value[:100]) + "..." + value_html = f""" +
+ {preview} +
{escaped_value}
+
+ """ + else: + value_html = ( + f"
{escaped_value}
" + ) + except (TypeError, ValueError): + escaped_value = escape_html(str(value)) # type: ignore[arg-type] + value_html = escaped_value + else: + # Simple value, render as-is (or collapsible if long) + escaped_value = escape_html(str(value)) + + # Make long string values collapsible + if len(str(value)) > 100: + preview = escape_html(str(value)[:80]) + "..." + value_html = f""" +
+ {preview} +
{escaped_value}
+
+ """ + else: + value_html = escaped_value + + html_parts.append(f""" + + + + + """) + + html_parts.append("
{escaped_key}{value_html}
") + return "".join(html_parts) + + +# -- Tool Use Dispatcher ------------------------------------------------------ + + +def format_tool_use_content(tool_use: ToolUseContent) -> str: + """Format tool use content as HTML. + + Uses parsed_input which handles lenient parsing at the model layer, + then dispatches to specialized formatters based on type. + """ + parsed = tool_use.parsed_input + + # Dispatch based on parsed type (lenient parsing happens in parsed_input) + if isinstance(parsed, TodoWriteInput): + return format_todowrite_content(parsed) + + if isinstance(parsed, BashInput): + return format_bash_tool_content(parsed) + + if isinstance(parsed, EditInput): + return format_edit_tool_content(parsed) + + if isinstance(parsed, MultiEditInput): + return format_multiedit_tool_content(parsed) + + if isinstance(parsed, WriteInput): + return format_write_tool_content(parsed) + + if isinstance(parsed, TaskInput): + return format_task_tool_content(parsed) + + if isinstance(parsed, ReadInput): + return format_read_tool_content(parsed) + + if isinstance(parsed, AskUserQuestionInput): + return format_askuserquestion_content(parsed) + + if isinstance(parsed, ExitPlanModeInput): + return format_exitplanmode_content(parsed) + + # Default: render as key/value table using shared renderer + return render_params_table(tool_use.input) + + +# -- Tool Result Content Formatter ------------------------------------------- + + +def _looks_like_bash_output(content: str) -> bool: + """Check if content looks like it's from a Bash tool based on common patterns.""" + if not content: + return False + + # Check for ANSI escape sequences + if "\x1b[" in content: + return True + + # Check for common bash/terminal patterns + bash_indicators = [ + "$ ", # Shell prompt + "❯ ", # Modern shell prompt + "> ", # Shell continuation + "\n+ ", # Bash -x output + "bash: ", # Bash error messages + "/bin/bash", # Bash path + "command not found", # Common bash error + "Permission denied", # Common bash error + "No such file or directory", # Common bash error + ] + + # Check for file path patterns that suggest command output + if re.search(r"/[a-zA-Z0-9_-]+(/[a-zA-Z0-9_.-]+)*", content): # Unix-style paths + return True + + # Check for common command output patterns + if any(indicator in content for indicator in bash_indicators): + return True + + return False + + +def format_tool_result_content( + tool_result: ToolResultContent, + file_path: Optional[str] = None, + tool_name: Optional[str] = None, +) -> str: + """Format tool result content as HTML, including images. + + Args: + tool_result: The tool result content + file_path: Optional file path for context (used for Read/Edit/Write tool rendering) + tool_name: Optional tool name for specialized rendering (e.g., "Write", "Read", "Edit", "Task") + """ + # Handle both string and structured content + if isinstance(tool_result.content, str): + raw_content = tool_result.content + has_images = False + image_html_parts: List[str] = [] + else: + # Content is a list of structured items, extract text and images + content_parts: List[str] = [] + image_html_parts: List[str] = [] + for item in tool_result.content: + item_type = item.get("type") + if item_type == "text": + text_value = item.get("text") + if isinstance(text_value, str): + content_parts.append(text_value) + elif item_type == "image": + # Handle image content within tool results + source = cast(Dict[str, Any], item.get("source", {})) + if source: + media_type: str = str(source.get("media_type", "image/png")) + data: str = str(source.get("data", "")) + if data: + data_url = f"data:{media_type};base64,{data}" + image_html_parts.append( + f'Tool result image' + ) + raw_content = "\n".join(content_parts) + has_images = len(image_html_parts) > 0 + + # Strip XML tags but keep the content inside + # Also strip redundant "String: ..." portions that echo the input + if raw_content: + # Remove ... tags but keep inner content + raw_content = re.sub( + r"(.*?)", + r"\1", + raw_content, + flags=re.DOTALL, + ) + # Remove "String: ..." portions that echo the input (everything after "String:" to end) + raw_content = re.sub(r"\nString:.*$", "", raw_content, flags=re.DOTALL) + + # Special handling for Write tool: only show first line (acknowledgment) on success + if tool_name == "Write" and not tool_result.is_error and not has_images: + lines = raw_content.split("\n") + if lines: + # Keep only the first acknowledgment line and add ellipsis + first_line = lines[0] + escaped_html = escape_html(first_line) + return f"
{escaped_html} ...
" + + # Try to parse as Read tool result if file_path is provided + if file_path and tool_name == "Read" and not has_images: + read_output = parse_read_output(raw_content, file_path) + if read_output: + return format_read_tool_result(read_output) + + # Try to parse as Edit tool result if file_path is provided + if file_path and tool_name == "Edit" and not has_images: + edit_output = parse_edit_output(raw_content, file_path) + if edit_output: + return format_edit_tool_result(edit_output) + + # Special handling for Task tool: render result as markdown with Pygments (agent's final message) + # Deduplication is now handled retroactively by replacing the sub-assistant content + if tool_name == "Task" and not has_images: + return render_markdown_collapsible(raw_content, "task-result") + + # Special handling for ExitPlanMode tool: truncate redundant plan echo on success + if tool_name == "ExitPlanMode" and not has_images: + processed_content = format_exitplanmode_result(raw_content) + escaped_content = escape_html(processed_content) + return f"
{escaped_content}
" + + # Special handling for AskUserQuestion tool: render Q&A pairs with styling + if tool_name == "AskUserQuestion" and not has_images: + styled_result = format_askuserquestion_result(raw_content) + if styled_result: + return styled_result + # Fall through to default handling if parsing fails + + # Check if this looks like Bash tool output and process ANSI codes + # Bash tool results often contain ANSI escape sequences and terminal output + if _looks_like_bash_output(raw_content): + escaped_content = convert_ansi_to_html(raw_content) + else: + escaped_content = escape_html(raw_content) + + # Build final HTML based on content length and presence of images + if has_images: + # Combine text and images + text_html = f"
{escaped_content}
" if escaped_content else "" + images_html = "".join(image_html_parts) + combined_content = f"{text_html}{images_html}" + + # Always make collapsible when images are present + preview_text = "Text and image content" + return f""" +
+ + {preview_text} + +
+ {combined_content} +
+
+ """ + else: + # Text-only content (existing behavior) + # For simple content, show directly without collapsible wrapper + if len(escaped_content) <= 200: + return f"
{escaped_content}
" + + # For longer content, use collapsible details but no extra wrapper + preview_text = escaped_content[:200] + "..." + return f""" +
+ +
{preview_text}
+
+
+
{escaped_content}
+
+
+ """ + + +# -- Public Exports ----------------------------------------------------------- + +__all__ = [ + # AskUserQuestion + "format_askuserquestion_content", + "format_askuserquestion_result", + # ExitPlanMode + "format_exitplanmode_content", + "format_exitplanmode_result", + # TodoWrite + "format_todowrite_content", + # File tools (input) + "format_read_tool_content", + "format_write_tool_content", + # File tools (output/result) + "parse_read_output", + "format_read_tool_result", + "parse_edit_output", + "format_edit_tool_result", + # Edit tools + "format_edit_tool_content", + "format_multiedit_tool_content", + # Bash + "format_bash_tool_content", + # Task + "format_task_tool_content", + # Tool summary and title + "get_tool_summary", + "format_tool_use_title", + # Generic + "render_params_table", + # Dispatcher + "format_tool_use_content", + # Tool result + "format_tool_result_content", +] diff --git a/claude_code_log/html/user_formatters.py b/claude_code_log/html/user_formatters.py new file mode 100644 index 00000000..21528208 --- /dev/null +++ b/claude_code_log/html/user_formatters.py @@ -0,0 +1,365 @@ +"""HTML formatters for user message content. + +This module formats non-tool user message content types to HTML. +Part of the thematic formatter organization: +- system_formatters.py: SystemContent, HookSummaryContent +- user_formatters.py: SlashCommandContent, CommandOutputContent, etc. +- assistant_formatters.py: (future) assistant message variants +- tool_formatters.py: tool use/result content +""" + +from typing import List + +import mistune + +from .ansi_colors import convert_ansi_to_html +from ..models import ( + BashInputContent, + BashOutputContent, + CommandOutputContent, + CompactedSummaryContent, + IdeDiagnostic, + IdeNotificationContent, + IdeOpenedFile, + IdeSelection, + SlashCommandContent, + UserMemoryContent, + UserTextContent, +) +from .tool_formatters import render_params_table +from .utils import escape_html, render_collapsible_code, render_markdown_collapsible + + +# ============================================================================= +# Formatting Functions +# ============================================================================= + + +def format_slash_command_content(content: SlashCommandContent) -> str: + """Format slash command content as HTML. + + Args: + content: SlashCommandContent with command name, args, and contents + + Returns: + HTML string for the slash command display + """ + escaped_command_name = escape_html(content.command_name) + escaped_command_args = escape_html(content.command_args) + + # Format the command contents with proper line breaks + formatted_contents = content.command_contents.replace("\\n", "\n") + escaped_command_contents = escape_html(formatted_contents) + + # Build the content HTML - command name is the primary content + content_parts: List[str] = [f"{escaped_command_name}"] + if content.command_args: + content_parts.append(f"Args: {escaped_command_args}") + if content.command_contents: + lines = escaped_command_contents.splitlines() + line_count = len(lines) + if line_count <= 12: + # Short content, show inline + details_html = ( + f"Content:
{escaped_command_contents}
" + ) + else: + # Long content, make collapsible + preview = "\n".join(lines[:5]) + collapsible = render_collapsible_code( + f"
{preview}
", + f"
{escaped_command_contents}
", + line_count, + ) + details_html = f"Content:{collapsible}" + content_parts.append(details_html) + + return "
".join(content_parts) + + +def format_command_output_content(content: CommandOutputContent) -> str: + """Format command output content as HTML. + + Args: + content: CommandOutputContent with stdout and is_markdown flag + + Returns: + HTML string for the command output display + """ + if content.is_markdown: + # Render as markdown + markdown_html = mistune.html(content.stdout) + return f"
{markdown_html}
" + else: + # Convert ANSI codes to HTML for colored display + html_content = convert_ansi_to_html(content.stdout) + # Use
 to preserve formatting and line breaks
+        return f"
{html_content}
" + + +def format_bash_input_content(content: BashInputContent) -> str: + """Format bash input content as HTML. + + Args: + content: BashInputContent with the bash command + + Returns: + HTML string for the bash input display + """ + escaped_command = escape_html(content.command) + return ( + f" " + f"{escaped_command}" + ) + + +def format_bash_output_content( + content: BashOutputContent, + collapse_threshold: int = 10, + preview_lines: int = 3, +) -> str: + """Format bash output content as HTML. + + Args: + content: BashOutputContent with stdout and/or stderr + collapse_threshold: Number of lines before output becomes collapsible + preview_lines: Number of preview lines to show when collapsed + + Returns: + HTML string for the bash output display + """ + output_parts: List[tuple[str, str, int, str]] = [] + total_lines = 0 + + if content.stdout: + escaped_stdout = convert_ansi_to_html(content.stdout) + stdout_lines = content.stdout.count("\n") + 1 + total_lines += stdout_lines + output_parts.append(("stdout", escaped_stdout, stdout_lines, content.stdout)) + + if content.stderr: + escaped_stderr = convert_ansi_to_html(content.stderr) + stderr_lines = content.stderr.count("\n") + 1 + total_lines += stderr_lines + output_parts.append(("stderr", escaped_stderr, stderr_lines, content.stderr)) + + if not output_parts: + # Empty output + return ( + "
(no output)
" + ) + + # Build the HTML parts + html_parts: List[str] = [] + for output_type, escaped_content, _, _ in output_parts: + css_name = f"bash-{output_type}" + html_parts.append(f"
{escaped_content}
") + + full_html = "".join(html_parts) + + # Wrap in collapsible if output is large + if total_lines > collapse_threshold: + # Create preview (first few lines) + first_output = output_parts[0] + raw_preview = "\n".join(first_output[3].split("\n")[:preview_lines]) + preview_html = escape_html(raw_preview) + if total_lines > preview_lines: + preview_html += "\n..." + + return f"""
+ + {total_lines} lines +
{preview_html}
+
+
{full_html}
+
""" + + return full_html + + +def format_user_text_content(text: str) -> str: + """Format plain user text content as HTML. + + User text is displayed as-is in preformatted blocks to preserve + formatting and whitespace. + + Args: + text: The raw user message text + + Returns: + HTML string with escaped text in a pre tag + """ + escaped_text = escape_html(text) + return f"
{escaped_text}
" + + +def format_user_text_model_content(content: UserTextContent) -> str: + """Format UserTextContent model as HTML. + + Handles user text with optional IDE notifications, compacted summaries, + and memory input markers. + + Args: + content: UserTextContent with text and optional flags/notifications + + Returns: + HTML string combining IDE notifications and main text content + """ + parts: List[str] = [] + + # Add IDE notifications first if present + if content.ide_notifications: + notifications = format_ide_notification_content(content.ide_notifications) + parts.extend(notifications) + + # Format main text content based on type + if content.is_compacted: + # Render compacted summaries as markdown + text_html = render_markdown_collapsible( + content.text, "compacted-summary", line_threshold=20 + ) + elif content.is_memory_input: + # Render memory input as markdown + text_html = render_markdown_collapsible( + content.text, "user-memory", line_threshold=20 + ) + else: + # Regular user text as preformatted + text_html = format_user_text_content(content.text) + + parts.append(text_html) + return "\n".join(parts) + + +def format_compacted_summary_content(content: CompactedSummaryContent) -> str: + """Format compacted session summary content as HTML. + + Compacted summaries are rendered as collapsible markdown since they + contain structured summary text generated by Claude. + + Args: + content: CompactedSummaryContent with summary text + + Returns: + HTML string with collapsible markdown rendering + """ + return render_markdown_collapsible( + content.summary_text, + "compacted-summary", + line_threshold=30, + preview_line_count=10, + ) + + +def format_user_memory_content(content: UserMemoryContent) -> str: + """Format user memory input content as HTML. + + User memory content (from CLAUDE.md etc.) is rendered as preformatted text + to preserve the original formatting. + + Args: + content: UserMemoryContent with memory text + + Returns: + HTML string with escaped text in a pre tag + """ + escaped_text = escape_html(content.memory_text) + return f"
{escaped_text}
" + + +def _format_opened_file(opened_file: IdeOpenedFile) -> str: + """Format a single IDE opened file notification as HTML.""" + escaped_content = escape_html(opened_file.content) + return f"
🤖 {escaped_content}
" + + +def _format_selection(selection: IdeSelection) -> str: + """Format a single IDE selection notification as HTML.""" + escaped_content = escape_html(selection.content) + + # For large selections, make them collapsible + if len(selection.content) > 200: + preview = escape_html(selection.content[:150]) + "..." + return f""" +
+
+ 📝 {preview} +
{escaped_content}
+
+
+ """ + else: + return f"
📝 {escaped_content}
" + + +def _format_diagnostic(diagnostic: IdeDiagnostic) -> List[str]: + """Format a single IDE diagnostic as HTML (may produce multiple notifications).""" + notifications: List[str] = [] + + if diagnostic.diagnostics: + # Parsed JSON diagnostics - render each as a table + for diag_item in diagnostic.diagnostics: + table_html = render_params_table(diag_item) + notification_html = ( + f"
" + f"⚠️ IDE Diagnostic
{table_html}" + f"
" + ) + notifications.append(notification_html) + elif diagnostic.raw_content: + # JSON parsing failed, render as plain text + escaped_content = escape_html(diagnostic.raw_content[:200]) + notification_html = ( + f"
🤖 IDE Diagnostics (parse error)
" + f"
{escaped_content}...
" + ) + notifications.append(notification_html) + + return notifications + + +def format_ide_notification_content(content: IdeNotificationContent) -> List[str]: + """Format IDE notification content as HTML. + + Takes structured IdeNotificationContent and returns a list of HTML + notification strings. + + Args: + content: IdeNotificationContent with opened_files, selections, diagnostics + + Returns: + List of HTML notification strings + """ + notifications: List[str] = [] + + # Format opened files + for opened_file in content.opened_files: + notifications.append(_format_opened_file(opened_file)) + + # Format selections + for selection in content.selections: + notifications.append(_format_selection(selection)) + + # Format diagnostics (may produce multiple notifications per diagnostic) + for diagnostic in content.diagnostics: + notifications.extend(_format_diagnostic(diagnostic)) + + return notifications + + +# ============================================================================= +# Public Exports +# ============================================================================= + +__all__ = [ + # Formatting functions + "format_slash_command_content", + "format_command_output_content", + "format_bash_input_content", + "format_bash_output_content", + "format_user_text_content", + "format_user_text_model_content", + "format_compacted_summary_content", + "format_user_memory_content", + "format_ide_notification_content", +] diff --git a/claude_code_log/html/utils.py b/claude_code_log/html/utils.py new file mode 100644 index 00000000..fdd8c379 --- /dev/null +++ b/claude_code_log/html/utils.py @@ -0,0 +1,352 @@ +"""HTML-specific rendering utilities. + +This module contains all HTML generation code: +- CSS class computation from message type and modifiers +- Message emoji generation +- HTML escaping and markdown rendering +- Collapsible content rendering +- Tool-specific HTML formatters +- Message content HTML rendering +- Template environment management + +The functions here transform format-neutral TemplateMessage data into +HTML-specific output. +""" + +import html +from pathlib import Path +from typing import Any, Optional, TYPE_CHECKING + +import mistune +from jinja2 import Environment, FileSystemLoader, select_autoescape + +from .renderer_code import highlight_code_with_pygments, truncate_highlighted_preview +from ..renderer_timings import timing_stat + +if TYPE_CHECKING: + from ..renderer import TemplateMessage + + +# -- CSS and Message Display -------------------------------------------------- + + +def css_class_from_message(msg: "TemplateMessage") -> str: + """Generate CSS class string from message type and modifiers. + + This reconstructs the original css_class format for backward + compatibility with existing CSS and JavaScript. + + The order of classes follows the original pattern: + 1. Message type (required) + 2. Modifier flags in order: slash-command, command-output, compacted, + error, steering, sidechain + 3. System level suffix (e.g., "system-info", "system-warning") + + Args: + msg: The template message to generate CSS classes for + + Returns: + Space-separated CSS class string (e.g., "user slash-command sidechain") + """ + parts = [msg.type] + + mods = msg.modifiers + if mods.is_slash_command: + parts.append("slash-command") + if mods.is_command_output: + parts.append("command-output") + if mods.is_compacted: + parts.append("compacted") + if mods.is_error: + parts.append("error") + if mods.is_steering: + parts.append("steering") + if mods.is_sidechain: + parts.append("sidechain") + if mods.system_level: + parts.append(f"system-{mods.system_level}") + + return " ".join(parts) + + +def get_message_emoji(msg: "TemplateMessage") -> str: + """Return appropriate emoji for message type. + + Args: + msg: The template message to get emoji for + + Returns: + Emoji string for the message type, or empty string if no emoji + """ + msg_type = msg.type + + if msg_type == "session_header": + return "📋" + elif msg_type == "user": + return "🤷" + elif msg_type == "assistant": + return "🤖" + elif msg_type == "system": + return "⚙️" + elif msg_type == "tool_use": + return "🛠️" + elif msg_type == "tool_result": + if msg.modifiers.is_error: + return "🚨" + return "🧰" + elif msg_type == "thinking": + return "💭" + elif msg_type == "image": + return "🖼️" + return "" + + +# -- HTML Utilities ----------------------------------------------------------- + + +def escape_html(text: str) -> str: + """Escape HTML special characters in text. + + Also normalizes line endings (CRLF -> LF) to prevent double spacing in
 blocks.
+    """
+    # Normalize CRLF to LF to prevent double line breaks in HTML
+    normalized = text.replace("\r\n", "\n").replace("\r", "\n")
+    return html.escape(normalized)
+
+
+def _create_pygments_plugin() -> Any:
+    """Create a mistune plugin that uses Pygments for code block syntax highlighting."""
+    from pygments import highlight  # type: ignore[reportUnknownVariableType]
+    from pygments.lexers import get_lexer_by_name, TextLexer  # type: ignore[reportUnknownVariableType]
+    from pygments.formatters import HtmlFormatter  # type: ignore[reportUnknownVariableType]
+    from pygments.util import ClassNotFound  # type: ignore[reportUnknownVariableType]
+
+    def plugin_pygments(md: Any) -> None:
+        """Plugin to add Pygments syntax highlighting to code blocks."""
+        original_render = md.renderer.block_code
+
+        def block_code(code: str, info: Optional[str] = None) -> str:
+            """Render code block with Pygments syntax highlighting if language is specified."""
+            if info:
+                # Language hint provided, use Pygments
+                lang = info.split()[0] if info else ""
+                try:
+                    lexer = get_lexer_by_name(lang, stripall=True)  # type: ignore[reportUnknownVariableType]
+                except ClassNotFound:
+                    lexer = TextLexer()  # type: ignore[reportUnknownVariableType]
+
+                formatter = HtmlFormatter(  # type: ignore[reportUnknownVariableType]
+                    linenos=False,  # No line numbers in markdown code blocks
+                    cssclass="highlight",
+                    wrapcode=True,
+                )
+                # Track Pygments timing if enabled
+                with timing_stat("_pygments_timings"):
+                    return str(highlight(code, lexer, formatter))  # type: ignore[reportUnknownArgumentType]
+            else:
+                # No language hint, use default rendering
+                return original_render(code, info)
+
+        md.renderer.block_code = block_code
+
+    return plugin_pygments
+
+
+def render_markdown(text: str) -> str:
+    """Convert markdown text to HTML using mistune with Pygments syntax highlighting."""
+    # Track markdown rendering time if enabled
+    with timing_stat("_markdown_timings"):
+        # Configure mistune with GitHub-flavored markdown features
+        renderer = mistune.create_markdown(
+            plugins=[
+                "strikethrough",
+                "footnotes",
+                "table",
+                "url",
+                "task_lists",
+                "def_list",
+                _create_pygments_plugin(),
+            ],
+            escape=False,  # Don't escape HTML since we want to render markdown properly
+            hard_wrap=True,  # Line break for newlines (checklists in Assistant messages)
+        )
+        return str(renderer(text))
+
+
+# -- Collapsible Content Rendering --------------------------------------------
+
+
+def render_collapsible_code(
+    preview_html: str,
+    full_html: str,
+    line_count: int,
+    is_markdown: bool = False,
+) -> str:
+    """Render a collapsible code/content block with preview.
+
+    Creates a details element with a line count badge and preview content
+    that expands to show the full content.
+
+    Args:
+        preview_html: HTML content to show in the collapsed summary
+        full_html: HTML content to show when expanded
+        line_count: Number of lines (shown in the badge)
+        is_markdown: If True, adds 'markdown' class to preview and full content divs
+
+    Returns:
+        HTML string with collapsible details element
+    """
+    markdown_class = " markdown" if is_markdown else ""
+    return f"""
+ + {line_count} lines +
{preview_html}
+
+
{full_html}
+
""" + + +def render_markdown_collapsible( + raw_content: str, + css_class: str, + line_threshold: int = 20, + preview_line_count: int = 5, +) -> str: + """Render markdown content, making it collapsible if it exceeds a line threshold. + + For long content, creates a collapsible details element with a preview. + For short content, renders inline with the specified CSS class. + + Args: + raw_content: The raw text content to render as markdown + css_class: CSS class for the wrapper div (e.g., "task-prompt", "task-result") + line_threshold: Number of lines above which content becomes collapsible (default 20) + preview_line_count: Number of lines to show in the preview (default 5) + + Returns: + HTML string with rendered markdown, optionally wrapped in collapsible details + """ + rendered_html = render_markdown(raw_content) + + lines = raw_content.splitlines() + if len(lines) <= line_threshold: + # Short content, show inline + return f'
{rendered_html}
' + + # Long content - make collapsible with rendered preview + preview_lines = lines[:preview_line_count] + preview_text = "\n".join(preview_lines) + if len(lines) > preview_line_count: + preview_text += "\n\n..." + # Render truncated markdown (produces valid HTML with proper tag closure) + preview_html = render_markdown(preview_text) + + collapsible = render_collapsible_code( + preview_html, rendered_html, len(lines), is_markdown=True + ) + return f'
{collapsible}
' + + +def render_file_content_collapsible( + code_content: str, + file_path: str, + css_class: str, + linenostart: int = 1, + line_threshold: int = 12, + preview_line_count: int = 5, + suffix_html: str = "", +) -> str: + """Render file content with syntax highlighting, collapsible if long. + + Highlights code using Pygments and wraps in a collapsible details element + if the content exceeds the line threshold. Uses preview truncation from + already-highlighted HTML to avoid double Pygments calls. + + Args: + code_content: The raw code content to highlight + file_path: File path for syntax detection (extension-based) + css_class: CSS class for the wrapper div (e.g., 'write-tool-content') + linenostart: Starting line number for Pygments (default 1) + line_threshold: Number of lines above which content becomes collapsible + preview_line_count: Number of lines to show in the preview + suffix_html: Optional HTML to append after the code (inside wrapper div) + + Returns: + HTML string with highlighted code, collapsible if >line_threshold lines + """ + # Highlight code with Pygments (single call) + highlighted_html = highlight_code_with_pygments( + code_content, file_path, linenostart=linenostart + ) + + html_parts = [f"
"] + + lines = code_content.split("\n") + if len(lines) > line_threshold: + # Extract preview from already-highlighted HTML (avoids double highlighting) + preview_html = truncate_highlighted_preview( + highlighted_html, preview_line_count + ) + html_parts.append( + render_collapsible_code(preview_html, highlighted_html, len(lines)) + ) + else: + # Show directly without collapsible + html_parts.append(highlighted_html) + + if suffix_html: + html_parts.append(suffix_html) + + html_parts.append("
") + return "".join(html_parts) + + +# -- Template Environment ----------------------------------------------------- + + +def starts_with_emoji(text: str) -> bool: + """Check if a string starts with an emoji character. + + Checks common emoji Unicode ranges: + - Emoticons: U+1F600 - U+1F64F + - Misc Symbols and Pictographs: U+1F300 - U+1F5FF + - Transport and Map Symbols: U+1F680 - U+1F6FF + - Supplemental Symbols: U+1F900 - U+1F9FF + - Misc Symbols: U+2600 - U+26FF + - Dingbats: U+2700 - U+27BF + """ + if not text: + return False + + first_char = text[0] + code_point = ord(first_char) + + return ( + 0x1F600 <= code_point <= 0x1F64F # Emoticons + or 0x1F300 <= code_point <= 0x1F5FF # Misc Symbols and Pictographs + or 0x1F680 <= code_point <= 0x1F6FF # Transport and Map Symbols + or 0x1F900 <= code_point <= 0x1F9FF # Supplemental Symbols + or 0x2600 <= code_point <= 0x26FF # Misc Symbols + or 0x2700 <= code_point <= 0x27BF # Dingbats + ) + + +def get_template_environment() -> Environment: + """Get Jinja2 template environment for HTML rendering. + + Creates a Jinja2 environment configured with: + - Template loading from the templates directory + - HTML auto-escaping + - Custom template filters/functions (starts_with_emoji) + + Returns: + Configured Jinja2 Environment + """ + templates_dir = Path(__file__).parent / "templates" + env = Environment( + loader=FileSystemLoader(templates_dir), + autoescape=select_autoescape(["html", "xml"]), + ) + # Add custom filters/functions + env.globals["starts_with_emoji"] = starts_with_emoji # type: ignore[index] + return env diff --git a/claude_code_log/models.py b/claude_code_log/models.py index 8b40c10a..135983e1 100644 --- a/claude_code_log/models.py +++ b/claude_code_log/models.py @@ -3,20 +3,642 @@ Enhanced to leverage official Anthropic types where beneficial. """ -from typing import Any, List, Union, Optional, Dict, Literal, cast -from pydantic import BaseModel +from dataclasses import dataclass +from enum import Enum +from typing import Any, List, Union, Optional, Dict, Literal from anthropic.types import Message as AnthropicMessage from anthropic.types import StopReason from anthropic.types import Usage as AnthropicUsage from anthropic.types.content_block import ContentBlock +from pydantic import BaseModel -class TodoItem(BaseModel): - id: str +class MessageType(str, Enum): + """Primary message type classification. + + This enum covers both JSONL entry types and rendering types. + Using str as base class maintains backward compatibility with string comparisons. + + JSONL Entry Types (from transcript files): + - USER, ASSISTANT, SYSTEM, SUMMARY, QUEUE_OPERATION + + Rendering Types (derived during processing): + - TOOL_USE, TOOL_RESULT, THINKING, IMAGE + - BASH_INPUT, BASH_OUTPUT + - SESSION_HEADER, UNKNOWN + """ + + # JSONL entry types + USER = "user" + ASSISTANT = "assistant" + SYSTEM = "system" + SUMMARY = "summary" + QUEUE_OPERATION = "queue-operation" + + # Rendering/display types (derived from content) + TOOL_USE = "tool_use" + TOOL_RESULT = "tool_result" + THINKING = "thinking" + IMAGE = "image" + BASH_INPUT = "bash-input" + BASH_OUTPUT = "bash-output" + SESSION_HEADER = "session-header" + UNKNOWN = "unknown" + + # System subtypes (for css_class) + SYSTEM_INFO = "system-info" + SYSTEM_WARNING = "system-warning" + SYSTEM_ERROR = "system-error" + + +@dataclass +class MessageModifiers: + """Semantic modifiers that affect message display. + + These are format-neutral flags that renderers can use to determine + how to display a message. HTML renderer converts these to CSS classes, + text renderer might use them for indentation or formatting. + + The modifiers capture traits that were previously encoded in the + css_class string (e.g., "user sidechain slash-command"). + """ + + is_sidechain: bool = False + is_slash_command: bool = False + is_command_output: bool = False + is_compacted: bool = False + is_error: bool = False + is_steering: bool = False + # System message level (mutually exclusive: info, warning, error, hook) + system_level: Optional[str] = None + + +# ============================================================================= +# Message Content Models +# ============================================================================= +# Structured content models for format-neutral message representation. +# These replace the direct HTML generation in renderer.py, allowing different +# renderers (HTML, text, etc.) to format the content appropriately. + + +class MessageContent: + """Base class for structured message content. + + Subclasses represent specific content types that renderers can format + appropriately for their output format. + + Note: This is a plain class (not dataclass) to allow Pydantic BaseModel + subclasses like ToolUseContent and ImageContent to inherit from it. + """ + + pass + + +@dataclass +class SystemContent(MessageContent): + """System message with level indicator. + + Used for info, warning, and error system messages. + """ + + level: str # "info", "warning", "error" + text: str # Raw text content (may contain ANSI codes) + + +@dataclass +class HookInfo: + """Information about a single hook execution.""" + + command: str + # Could add more fields like exit_code, duration, etc. + + +@dataclass +class HookSummaryContent(MessageContent): + """Hook execution summary. + + Used for subtype="stop_hook_summary" system messages. + """ + + has_output: bool + hook_errors: List[str] # Error messages from hooks + hook_infos: List[HookInfo] # Info about each hook executed + + +# ============================================================================= +# User Message Content Models +# ============================================================================= +# Structured content models for user message variants. +# These classify user text based on flags and tag patterns. + + +@dataclass +class SlashCommandContent(MessageContent): + """Content for slash command invocations (e.g., /context, /model). + + These are user messages containing command-name, command-args, and + command-contents tags parsed from the text. + """ + + command_name: str + command_args: str + command_contents: str + + +@dataclass +class CommandOutputContent(MessageContent): + """Content for local command output (e.g., output from /context). + + These are user messages containing local-command-stdout tags. + """ + + stdout: str + is_markdown: bool # True if content appears to be markdown + + +@dataclass +class BashInputContent(MessageContent): + """Content for inline bash commands in user messages. + + These are user messages containing bash-input tags. + """ + + command: str + + +@dataclass +class BashOutputContent(MessageContent): + """Content for bash command output. + + These are user messages containing bash-stdout and/or bash-stderr tags. + """ + + stdout: Optional[str] = None # Raw stdout content (may contain ANSI codes) + stderr: Optional[str] = None # Raw stderr content (may contain ANSI codes) + + +@dataclass +class ToolResultContentModel(MessageContent): + """Content model for tool results with rendering context. + + Wraps ToolResultContent with additional context needed for rendering, + such as the associated tool name and file path. + """ + + tool_use_id: str + content: Any # Union[str, List[Dict[str, Any]]] + is_error: bool = False + tool_name: Optional[str] = None # Name of the tool that produced this result + file_path: Optional[str] = None # File path for Read/Edit/Write tools + + +@dataclass +class CompactedSummaryContent(MessageContent): + """Content for compacted session summaries. + + These are user messages that contain previous conversation context + in a compacted format when sessions run out of context. + Parsed by parse_compacted_summary() in parser.py, formatted by + format_compacted_summary_content() in html/user_formatters.py. + """ + + summary_text: str + + +@dataclass +class UserMemoryContent(MessageContent): + """Content for user memory input. + + These are user messages containing user-memory-input tags. + Parsed by parse_user_memory() in parser.py, formatted by + format_user_memory_content() in html/user_formatters.py. + """ + + memory_text: str + + +@dataclass +class IdeOpenedFile: + """IDE notification for an opened file.""" + + content: str # Raw content from the tag + + +@dataclass +class IdeSelection: + """IDE notification for a code selection.""" + + content: str # Raw selection content + + +@dataclass +class IdeDiagnostic: + """IDE diagnostic notification. + + Contains either parsed JSON diagnostics or raw content if parsing failed. + """ + + diagnostics: Optional[List[Dict[str, Any]]] = None # Parsed diagnostic objects + raw_content: Optional[str] = None # Fallback if JSON parsing failed + + +@dataclass +class IdeNotificationContent(MessageContent): + """Content for IDE notification tags. + + These are user messages containing IDE notification tags like: + - : File open notifications + - : Code selection notifications + - : Diagnostic JSON arrays + + Format-neutral: stores structured data, not HTML. + """ + + opened_files: List[IdeOpenedFile] + selections: List[IdeSelection] + diagnostics: List[IdeDiagnostic] + remaining_text: str # Text after notifications extracted + + +@dataclass +class UserTextContent(MessageContent): + """Content for plain user text with optional IDE notifications. + + Wraps user text that may have been preprocessed to extract + IDE notifications, compacted summaries, or memory input markers. + + TODO: Not currently instantiated - formatter exists but pipeline uses + separate IdeNotificationContent and plain text instead. + """ + + text: str + ide_notifications: Optional[IdeNotificationContent] = None + is_compacted: bool = False + is_memory_input: bool = False + + +# ============================================================================= +# Assistant Message Content Models +# ============================================================================= +# Structured content models for assistant message variants. +# These classify assistant message parts for format-neutral rendering. + + +@dataclass +class AssistantTextContent(MessageContent): + """Content for assistant text messages. + + These are the text portions of assistant messages that get + rendered as markdown with syntax highlighting. + """ + + text: str + + +@dataclass +class ThinkingContentModel(MessageContent): + """Content for assistant thinking/reasoning blocks. + + These are the blocks that show the assistant's + internal reasoning process. + + Note: This is distinct from ThinkingContent (the Pydantic model + for parsing JSONL). This dataclass is for rendering purposes. + """ + + thinking: str + signature: Optional[str] = None + + +@dataclass +class UnknownContent(MessageContent): + """Content for unknown/unrecognized content types. + + Used as a fallback when encountering content types that don't have + specific handlers. Stores the type name for display purposes. + """ + + type_name: str # The name/description of the unknown type + + +# ============================================================================= +# Tool Output Content Models +# ============================================================================= +# Structured content models for tool results (symmetric with Tool Input Models). +# These provide format-neutral representation of tool outputs that renderers +# can format appropriately. + + +@dataclass +class ReadOutput(MessageContent): + """Parsed Read tool output. + + Represents the result of reading a file with optional line range. + Symmetric with ReadInput for tool_use → tool_result pairing. + """ + + file_path: str + content: str # File content (may be truncated) + start_line: int # 1-based starting line number + num_lines: int # Number of lines in content + total_lines: int # Total lines in file + is_truncated: bool # Whether content was truncated + system_reminder: Optional[str] = None # Embedded system reminder text + + +@dataclass +class WriteOutput(MessageContent): + """Parsed Write tool output. + + Symmetric with WriteInput for tool_use → tool_result pairing. + + TODO: Not currently used - tool results handled as raw strings. + """ + + file_path: str + success: bool + message: str # Success or error message + + +@dataclass +class EditDiff: + """Single diff hunk for edit operations.""" + + old_text: str + new_text: str + + +@dataclass +class EditOutput(MessageContent): + """Parsed Edit tool output. + + Contains diff information for file edits. + Symmetric with EditInput for tool_use → tool_result pairing. + """ + + file_path: str + success: bool + diffs: List[EditDiff] # Changes made + message: str # Result message or code snippet + start_line: int = 1 # Starting line number for code display + + +@dataclass +class BashOutput(MessageContent): + """Parsed Bash tool output. + + Symmetric with BashInput for tool_use → tool_result pairing. + + TODO: Not currently used - tool results handled as raw strings. + """ + + stdout: str + stderr: str + exit_code: Optional[int] + interrupted: bool + is_image: bool # True if output contains image data + + +@dataclass +class TaskOutput(MessageContent): + """Parsed Task (sub-agent) tool output. + + Symmetric with TaskInput for tool_use → tool_result pairing. + + TODO: Not currently used - tool results handled as raw strings. + """ + + agent_id: Optional[str] + result: str # Agent's response + is_background: bool + + +@dataclass +class GlobOutput(MessageContent): + """Parsed Glob tool output. + + Symmetric with GlobInput for tool_use → tool_result pairing. + + TODO: Not currently used - tool results handled as raw strings. + """ + + pattern: str + files: List[str] # Matching file paths + truncated: bool # Whether list was truncated + + +@dataclass +class GrepOutput(MessageContent): + """Parsed Grep tool output. + + Symmetric with GrepInput for tool_use → tool_result pairing. + + TODO: Not currently used - tool results handled as raw strings. + """ + + pattern: str + matches: List[str] # Matching lines/files + output_mode: str # "content", "files_with_matches", or "count" + truncated: bool + + +# ============================================================================= +# Renderer Content Models +# ============================================================================= +# Structured content models for renderer-specific elements. +# These are used by the HTML renderer but represent format-neutral data. + + +@dataclass +class SessionHeaderContent(MessageContent): + """Content for session headers in transcript rendering. + + Represents the header displayed at the start of each session + with session title and optional summary. + """ + + title: str + session_id: str + summary: Optional[str] = None + + +@dataclass +class DedupNoticeContent(MessageContent): + """Content for deduplication notices. + + Displayed when content is deduplicated (e.g., sidechain assistant + text that duplicates the Task tool result). + """ + + notice_text: str + + +# ============================================================================= +# Tool Input Models +# ============================================================================= +# Typed models for tool inputs (Phase 11 of MESSAGE_REFACTORING.md) +# These provide type safety and IDE autocompletion for tool parameters. + + +class BashInput(BaseModel): + """Input parameters for the Bash tool.""" + + command: str + description: Optional[str] = None + timeout: Optional[int] = None + run_in_background: Optional[bool] = None + dangerouslyDisableSandbox: Optional[bool] = None + + +class ReadInput(BaseModel): + """Input parameters for the Read tool.""" + + file_path: str + offset: Optional[int] = None + limit: Optional[int] = None + + +class WriteInput(BaseModel): + """Input parameters for the Write tool.""" + + file_path: str content: str - status: Literal["pending", "in_progress", "completed"] - priority: Literal["high", "medium", "low"] + + +class EditInput(BaseModel): + """Input parameters for the Edit tool.""" + + file_path: str + old_string: str + new_string: str + replace_all: Optional[bool] = None + + +class EditItem(BaseModel): + """Single edit item for MultiEdit tool.""" + + old_string: str + new_string: str + + +class MultiEditInput(BaseModel): + """Input parameters for the MultiEdit tool.""" + + file_path: str + edits: List[EditItem] + + +class GlobInput(BaseModel): + """Input parameters for the Glob tool.""" + + pattern: str + path: Optional[str] = None + + +class GrepInput(BaseModel): + """Input parameters for the Grep tool. + + Note: Extra fields like -A, -B, -C are allowed for flexibility. + """ + + pattern: str + path: Optional[str] = None + glob: Optional[str] = None + type: Optional[str] = None + output_mode: Optional[Literal["content", "files_with_matches", "count"]] = None + multiline: Optional[bool] = None + head_limit: Optional[int] = None + offset: Optional[int] = None + + model_config = {"extra": "allow"} # Allow -A, -B, -C, -i, -n fields + + +class TaskInput(BaseModel): + """Input parameters for the Task tool.""" + + prompt: str + subagent_type: str + description: str + model: Optional[Literal["sonnet", "opus", "haiku"]] = None + run_in_background: Optional[bool] = None + resume: Optional[str] = None + + +class TodoWriteItem(BaseModel): + """Single todo item for TodoWrite tool (input format). + + All fields have defaults for lenient parsing of legacy/malformed data. + """ + + content: str = "" + status: str = "pending" # Allow any string, not just Literal, for flexibility + activeForm: str = "" + id: Optional[str] = None + priority: Optional[str] = None # Allow any string for flexibility + + +class TodoWriteInput(BaseModel): + """Input parameters for the TodoWrite tool.""" + + todos: List[TodoWriteItem] + + +class AskUserQuestionOption(BaseModel): + """Option for an AskUserQuestion question. + + All fields have defaults for lenient parsing. + """ + + label: str = "" + description: Optional[str] = None + + +class AskUserQuestionItem(BaseModel): + """Single question in AskUserQuestion input. + + All fields have defaults for lenient parsing. + """ + + question: str = "" + header: Optional[str] = None + options: List[AskUserQuestionOption] = [] + multiSelect: bool = False + + +class AskUserQuestionInput(BaseModel): + """Input parameters for the AskUserQuestion tool. + + Supports both modern format (questions list) and legacy format (single question). + """ + + questions: List[AskUserQuestionItem] = [] + question: Optional[str] = None # Legacy single question format + + +class ExitPlanModeInput(BaseModel): + """Input parameters for the ExitPlanMode tool.""" + + plan: str = "" + launchSwarm: Optional[bool] = None + teammateCount: Optional[int] = None + + +# Union of all typed tool inputs +ToolInput = Union[ + BashInput, + ReadInput, + WriteInput, + EditInput, + MultiEditInput, + GlobInput, + GrepInput, + TaskInput, + TodoWriteInput, + AskUserQuestionInput, + ExitPlanModeInput, + Dict[str, Any], # Fallback for unknown tools +] class UsageInfo(BaseModel): @@ -62,11 +684,28 @@ class TextContent(BaseModel): text: str -class ToolUseContent(BaseModel): +class ToolUseContent(BaseModel, MessageContent): type: Literal["tool_use"] id: str name: str input: Dict[str, Any] + _parsed_input: Optional["ToolInput"] = None # Cached parsed input + + @property + def parsed_input(self) -> "ToolInput": + """Get typed input model if available, otherwise return raw dict. + + Lazily parses the input dict into a typed model. + Uses strict validation first, then lenient parsing if available. + Result is cached for subsequent accesses. + """ + if self._parsed_input is None: + from .parser import parse_tool_input + + object.__setattr__( + self, "_parsed_input", parse_tool_input(self.name, self.input) + ) + return self._parsed_input # type: ignore[return-value] class ToolResultContent(BaseModel): @@ -89,7 +728,7 @@ class ImageSource(BaseModel): data: str -class ImageContent(BaseModel): +class ImageContent(BaseModel, MessageContent): type: Literal["image"] source: ImageSource @@ -108,6 +747,7 @@ class ImageContent(BaseModel): class UserMessage(BaseModel): role: Literal["user"] content: Union[str, List[ContentItem]] + usage: Optional["UsageInfo"] = None # For type compatibility with AssistantMessage class AssistantMessage(BaseModel): @@ -127,6 +767,8 @@ def from_anthropic_message( cls, anthropic_msg: AnthropicMessage ) -> "AssistantMessage": """Create AssistantMessage from official Anthropic Message.""" + from .parser import normalize_usage_info + # Convert Anthropic Message to our format, preserving official types where possible return cls( id=anthropic_msg.id, @@ -142,48 +784,13 @@ def from_anthropic_message( ) -class FileInfo(BaseModel): - filePath: str - content: str - numLines: int - startLine: int - totalLines: int - - -class FileReadResult(BaseModel): - type: Literal["text"] - file: FileInfo - - -class CommandResult(BaseModel): - stdout: str - stderr: str - interrupted: bool - isImage: bool - - -class TodoResult(BaseModel): - oldTodos: List[TodoItem] - newTodos: List[TodoItem] - - -class EditResult(BaseModel): - oldString: Optional[str] = None - newString: Optional[str] = None - replaceAll: Optional[bool] = None - originalFile: Optional[str] = None - structuredPatch: Optional[Any] = None - userModified: Optional[bool] = None - - +# Tool result type - flexible to accept various result formats from JSONL +# The specific parsing/formatting happens in tool_formatters.py using +# ReadOutput, EditOutput, etc. (see Tool Output Content Models section) ToolUseResult = Union[ str, - List[TodoItem], - FileReadResult, - CommandResult, - TodoResult, - EditResult, - List[ContentItem], + List[Any], # Covers List[TodoWriteItem], List[ContentItem], etc. + Dict[str, Any], # Covers structured results ] @@ -218,6 +825,7 @@ class SummaryTranscriptEntry(BaseModel): summary: str leafUuid: str cwd: Optional[str] = None + sessionId: None = None # Summaries don't have a sessionId class SystemTranscriptEntry(BaseTranscriptEntry): @@ -261,179 +869,3 @@ class QueueOperationTranscriptEntry(BaseModel): SystemTranscriptEntry, QueueOperationTranscriptEntry, ] - - -def normalize_usage_info(usage_data: Any) -> Optional[UsageInfo]: - """Normalize usage data to be compatible with both custom and Anthropic formats.""" - if usage_data is None: - return None - - # If it's already a UsageInfo instance, return as-is - if isinstance(usage_data, UsageInfo): - return usage_data - - # If it's an Anthropic Usage instance, convert using our method - if isinstance(usage_data, AnthropicUsage): - return UsageInfo.from_anthropic_usage(usage_data) - - # If it has the shape of an Anthropic Usage, try to construct it first - if hasattr(usage_data, "input_tokens") and hasattr(usage_data, "output_tokens"): - try: - # Try to create an Anthropic Usage first - anthropic_usage = AnthropicUsage.model_validate(usage_data) - return UsageInfo.from_anthropic_usage(anthropic_usage) - except Exception: - # Fall back to direct conversion - return UsageInfo( - input_tokens=getattr(usage_data, "input_tokens", None), - cache_creation_input_tokens=getattr( - usage_data, "cache_creation_input_tokens", None - ), - cache_read_input_tokens=getattr( - usage_data, "cache_read_input_tokens", None - ), - output_tokens=getattr(usage_data, "output_tokens", None), - service_tier=getattr(usage_data, "service_tier", None), - server_tool_use=getattr(usage_data, "server_tool_use", None), - ) - - # If it's a dict, validate and convert to our format - if isinstance(usage_data, dict): - return UsageInfo.model_validate(usage_data) - - return None - - -def parse_content_item(item_data: Dict[str, Any]) -> ContentItem: - """Parse a content item using enhanced approach with Anthropic types.""" - try: - content_type = item_data.get("type", "") - - # Try official Anthropic types first for better future compatibility - if content_type == "text": - try: - from anthropic.types.text_block import TextBlock - - return TextBlock.model_validate(item_data) - except Exception: - return TextContent.model_validate(item_data) - elif content_type == "tool_use": - try: - from anthropic.types.tool_use_block import ToolUseBlock - - return ToolUseBlock.model_validate(item_data) - except Exception: - return ToolUseContent.model_validate(item_data) - elif content_type == "thinking": - try: - from anthropic.types.thinking_block import ThinkingBlock - - return ThinkingBlock.model_validate(item_data) - except Exception: - return ThinkingContent.model_validate(item_data) - elif content_type == "tool_result": - return ToolResultContent.model_validate(item_data) - elif content_type == "image": - return ImageContent.model_validate(item_data) - else: - # Fallback to text content for unknown types - return TextContent(type="text", text=str(item_data)) - except Exception: - return TextContent(type="text", text=str(item_data)) - - -def parse_message_content(content_data: Any) -> Union[str, List[ContentItem]]: - """Parse message content, handling both string and list formats.""" - if isinstance(content_data, str): - return content_data - elif isinstance(content_data, list): - content_list = cast(List[Dict[str, Any]], content_data) - return [parse_content_item(item) for item in content_list] - else: - return str(content_data) - - -def parse_transcript_entry(data: Dict[str, Any]) -> TranscriptEntry: - """ - Parse a JSON dictionary into the appropriate TranscriptEntry type. - - Enhanced to optionally use official Anthropic types for assistant messages. - - Args: - data: Dictionary parsed from JSON - - Returns: - The appropriate TranscriptEntry subclass - - Raises: - ValueError: If the data doesn't match any known transcript entry type - """ - entry_type = data.get("type") - - if entry_type == "user": - # Parse message content if present - data_copy = data.copy() - if "message" in data_copy and "content" in data_copy["message"]: - data_copy["message"] = data_copy["message"].copy() - data_copy["message"]["content"] = parse_message_content( - data_copy["message"]["content"] - ) - # Parse toolUseResult if present and it's a list of content items - if "toolUseResult" in data_copy and isinstance( - data_copy["toolUseResult"], list - ): - # Check if it's a list of content items (MCP tool results) - tool_use_result = cast(List[Any], data_copy["toolUseResult"]) - if ( - tool_use_result - and isinstance(tool_use_result[0], dict) - and "type" in tool_use_result[0] - ): - data_copy["toolUseResult"] = [ - parse_content_item(cast(Dict[str, Any], item)) - for item in tool_use_result - if isinstance(item, dict) - ] - return UserTranscriptEntry.model_validate(data_copy) - - elif entry_type == "assistant": - # Enhanced assistant message parsing with optional Anthropic types - data_copy = data.copy() - - # Validate compatibility with official Anthropic Message type - if "message" in data_copy: - try: - message_data = data_copy["message"] - AnthropicMessage.model_validate(message_data) - # Successfully validated - our data is compatible with official Anthropic types - except Exception: - # Validation failed - continue with standard parsing - pass - - # Standard parsing path (works for all cases) - if "message" in data_copy and "content" in data_copy["message"]: - message_copy = data_copy["message"].copy() - message_copy["content"] = parse_message_content(message_copy["content"]) - - # Normalize usage data to support both Anthropic and custom formats - if "usage" in message_copy: - message_copy["usage"] = normalize_usage_info(message_copy["usage"]) - - data_copy["message"] = message_copy - return AssistantTranscriptEntry.model_validate(data_copy) - - elif entry_type == "summary": - return SummaryTranscriptEntry.model_validate(data) - - elif entry_type == "system": - return SystemTranscriptEntry.model_validate(data) - - elif entry_type == "queue-operation": - # Parse content if present (in enqueue and remove operations) - data_copy = data.copy() - if "content" in data_copy and isinstance(data_copy["content"], list): - data_copy["content"] = parse_message_content(data_copy["content"]) - return QueueOperationTranscriptEntry.model_validate(data_copy) - - else: - raise ValueError(f"Unknown transcript entry type: {entry_type}") diff --git a/claude_code_log/parser.py b/claude_code_log/parser.py index a34ca5af..23266d3d 100644 --- a/claude_code_log/parser.py +++ b/claude_code_log/parser.py @@ -2,48 +2,81 @@ """Parse and extract data from Claude transcript JSONL files.""" import json -from pathlib import Path import re -from typing import Any, List, Optional, Union, TYPE_CHECKING +from typing import Any, Callable, Dict, List, Optional, Union, cast, TypeGuard from datetime import datetime -import dateparser + +from anthropic.types import Message as AnthropicMessage +from anthropic.types import Usage as AnthropicUsage +from anthropic.types.text_block import TextBlock +from anthropic.types.thinking_block import ThinkingBlock +from pydantic import BaseModel from .models import ( - TranscriptEntry, - UserTranscriptEntry, - SummaryTranscriptEntry, - parse_transcript_entry, + # Content types ContentItem, TextContent, ThinkingContent, + ToolUseContent, + ToolResultContent, + ImageContent, + # User message content models + SlashCommandContent, + CommandOutputContent, + BashInputContent, + BashOutputContent, + CompactedSummaryContent, + UserMemoryContent, + UserTextContent, + IdeNotificationContent, + IdeOpenedFile, + IdeSelection, + IdeDiagnostic, + # Assistant message content models + BashInput, + ReadInput, + WriteInput, + EditInput, + EditItem, + MultiEditInput, + GlobInput, + GrepInput, + TaskInput, + TodoWriteInput, + TodoWriteItem, + AskUserQuestionInput, + AskUserQuestionItem, + AskUserQuestionOption, + ExitPlanModeInput, + ToolInput, + # Usage and transcript entry types + UsageInfo, + MessageType, + TranscriptEntry, + UserTranscriptEntry, + AssistantTranscriptEntry, + SummaryTranscriptEntry, + SystemTranscriptEntry, + QueueOperationTranscriptEntry, ) -if TYPE_CHECKING: - from .cache import CacheManager - def extract_text_content(content: Union[str, List[ContentItem], None]) -> str: - """Extract text content from Claude message content structure (supports both custom and Anthropic types).""" + """Extract text content from Claude message content structure. + + Supports both custom models (TextContent, ThinkingContent) and official + Anthropic SDK types (TextBlock, ThinkingBlock). + """ if content is None: return "" if isinstance(content, list): text_parts: List[str] = [] for item in content: - # Handle both custom TextContent and official Anthropic TextBlock - if isinstance(item, TextContent): + # Handle text content (custom TextContent or Anthropic TextBlock) + if isinstance(item, (TextContent, TextBlock)): text_parts.append(item.text) - elif ( - hasattr(item, "type") - and hasattr(item, "text") - and getattr(item, "type") == "text" - ): - # Official Anthropic TextBlock - text_parts.append(getattr(item, "text")) - elif isinstance(item, ThinkingContent): - # Skip thinking content in main text extraction - continue - elif hasattr(item, "type") and getattr(item, "type") == "thinking": - # Skip official Anthropic thinking content too + # Skip thinking content (custom ThinkingContent or Anthropic ThinkingBlock) + elif isinstance(item, (ThinkingContent, ThinkingBlock)): continue return "\n".join(text_parts) else: @@ -58,258 +91,866 @@ def parse_timestamp(timestamp_str: str) -> Optional[datetime]: return None -def filter_messages_by_date( - messages: List[TranscriptEntry], from_date: Optional[str], to_date: Optional[str] -) -> List[TranscriptEntry]: - """Filter messages based on date range.""" - if not from_date and not to_date: - return messages - - # Parse the date strings using dateparser - from_dt = None - to_dt = None - - if from_date: - from_dt = dateparser.parse(from_date) - if not from_dt: - raise ValueError(f"Could not parse from-date: {from_date}") - # If parsing relative dates like "today", start from beginning of day - if from_date in ["today", "yesterday"] or "days ago" in from_date: - from_dt = from_dt.replace(hour=0, minute=0, second=0, microsecond=0) - - if to_date: - to_dt = dateparser.parse(to_date) - if not to_dt: - raise ValueError(f"Could not parse to-date: {to_date}") - # If parsing relative dates like "today", end at end of day - if to_date in ["today", "yesterday"] or "days ago" in to_date: - to_dt = to_dt.replace(hour=23, minute=59, second=59, microsecond=999999) - - filtered_messages: List[TranscriptEntry] = [] - for message in messages: - # Handle SummaryTranscriptEntry which doesn't have timestamp - if isinstance(message, SummaryTranscriptEntry): - filtered_messages.append(message) - continue +# ============================================================================= +# User Message Content Parsing +# ============================================================================= + + +def parse_slash_command(text: str) -> Optional[SlashCommandContent]: + """Parse slash command tags from text. + + Args: + text: Raw text that may contain command-name, command-args, command-contents tags + + Returns: + SlashCommandContent if tags found, None otherwise + """ + command_name_match = re.search(r"([^<]+)", text) + if not command_name_match: + return None + + command_name = command_name_match.group(1).strip() + + command_args_match = re.search(r"([^<]*)", text) + command_args = command_args_match.group(1).strip() if command_args_match else "" + + # Parse command contents, handling JSON format + command_contents_match = re.search( + r"(.+?)", text, re.DOTALL + ) + command_contents = "" + if command_contents_match: + contents_text = command_contents_match.group(1).strip() + # Try to parse as JSON and extract the text field + try: + contents_json: Any = json.loads(contents_text) + if isinstance(contents_json, dict) and "text" in contents_json: + text_dict = cast(Dict[str, Any], contents_json) + text_value = text_dict["text"] + command_contents = str(text_value) + else: + command_contents = contents_text + except json.JSONDecodeError: + command_contents = contents_text + + return SlashCommandContent( + command_name=command_name, + command_args=command_args, + command_contents=command_contents, + ) + + +def parse_command_output(text: str) -> Optional[CommandOutputContent]: + """Parse command output tags from text. + + Args: + text: Raw text that may contain local-command-stdout tags - timestamp_str = message.timestamp - if not timestamp_str: - continue + Returns: + CommandOutputContent if tags found, None otherwise + """ + stdout_match = re.search( + r"(.*?)", + text, + re.DOTALL, + ) + if not stdout_match: + return None + + stdout_content = stdout_match.group(1).strip() + # Check if content looks like markdown (starts with markdown headers) + is_markdown = bool(re.match(r"^#+\s+", stdout_content, re.MULTILINE)) - message_dt = parse_timestamp(timestamp_str) - if not message_dt: - continue + return CommandOutputContent(stdout=stdout_content, is_markdown=is_markdown) - # Convert to naive datetime for comparison (dateparser returns naive datetimes) - if message_dt.tzinfo: - message_dt = message_dt.replace(tzinfo=None) - # Check if message falls within date range - if from_dt and message_dt < from_dt: - continue - if to_dt and message_dt > to_dt: - continue +def parse_bash_input(text: str) -> Optional[BashInputContent]: + """Parse bash input tags from text. - filtered_messages.append(message) + Args: + text: Raw text that may contain bash-input tags + + Returns: + BashInputContent if tags found, None otherwise + """ + bash_match = re.search(r"(.*?)", text, re.DOTALL) + if not bash_match: + return None - return filtered_messages + return BashInputContent(command=bash_match.group(1).strip()) -def load_transcript( - jsonl_path: Path, - cache_manager: Optional["CacheManager"] = None, - from_date: Optional[str] = None, - to_date: Optional[str] = None, - silent: bool = False, - _loaded_files: Optional[set[Path]] = None, -) -> List[TranscriptEntry]: - """Load and parse JSONL transcript file, using cache if available. +def parse_bash_output(text: str) -> Optional[BashOutputContent]: + """Parse bash output tags from text. Args: - _loaded_files: Internal parameter to track loaded files and prevent infinite recursion. + text: Raw text that may contain bash-stdout/bash-stderr tags + + Returns: + BashOutputContent if tags found, None otherwise """ - # Initialize loaded files set on first call - if _loaded_files is None: - _loaded_files = set() - - # Prevent infinite recursion by checking if this file is already being loaded - if jsonl_path in _loaded_files: - return [] - - _loaded_files.add(jsonl_path) - # Try to load from cache first - if cache_manager is not None: - # Use filtered loading if date parameters are provided - if from_date or to_date: - cached_entries = cache_manager.load_cached_entries_filtered( - jsonl_path, from_date, to_date - ) - else: - cached_entries = cache_manager.load_cached_entries(jsonl_path) - - if cached_entries is not None: - if not silent: - print(f"Loading {jsonl_path} from cache...") - return cached_entries - - # Parse from source file - messages: List[TranscriptEntry] = [] - agent_ids: set[str] = set() # Collect agentId references while parsing - - with open(jsonl_path, "r", encoding="utf-8", errors="replace") as f: - if not silent: - print(f"Processing {jsonl_path}...") - for line_no, line in enumerate(f, 1): # Start counting from 1 - line = line.strip() - if line: - try: - entry_dict: dict[str, Any] | str = json.loads(line) - if not isinstance(entry_dict, dict): - print( - f"Line {line_no} of {jsonl_path} is not a JSON object: {line}" - ) - continue - - # Check for agentId BEFORE Pydantic parsing - # agentId can be at top level OR nested in toolUseResult - # For UserTranscriptEntry, we need to copy it to top level so Pydantic preserves it - if "agentId" in entry_dict: - agent_id = entry_dict.get("agentId") - if agent_id: - agent_ids.add(agent_id) - elif "toolUseResult" in entry_dict: - tool_use_result = entry_dict.get("toolUseResult") - if ( - isinstance(tool_use_result, dict) - and "agentId" in tool_use_result - ): - agent_id_value = tool_use_result.get("agentId") # type: ignore[reportUnknownVariableType, reportUnknownMemberType] - if isinstance(agent_id_value, str): - agent_ids.add(agent_id_value) - # Copy agentId to top level for Pydantic to preserve - entry_dict["agentId"] = agent_id_value - - entry_type: str | None = entry_dict.get("type") - - if entry_type in [ - "user", - "assistant", - "summary", - "system", - "queue-operation", - ]: - # Parse using Pydantic models - entry = parse_transcript_entry(entry_dict) - messages.append(entry) - elif ( - entry_type - in [ - "file-history-snapshot", # Internal Claude Code file backup metadata - ] - ): - # Silently skip internal message types we don't render - pass - else: - print( - f"Line {line_no} of {jsonl_path} is not a recognised message type: {line}" - ) - except json.JSONDecodeError as e: - print( - f"Line {line_no} of {jsonl_path} | JSON decode error: {str(e)}" - ) - except ValueError as e: - # Extract a more descriptive error message - error_msg = str(e) - if "validation error" in error_msg.lower(): - err_no_url = re.sub( - r" For further information visit https://errors.pydantic(.*)\n?", - "", - error_msg, - ) - print(f"Line {line_no} of {jsonl_path} | {err_no_url}") - else: - print( - f"Line {line_no} of {jsonl_path} | ValueError: {error_msg}" - "\n{traceback.format_exc()}" - ) - except Exception as e: - print( - f"Line {line_no} of {jsonl_path} | Unexpected error: {str(e)}" - "\n{traceback.format_exc()}" + stdout_match = re.search(r"(.*?)", text, re.DOTALL) + stderr_match = re.search(r"(.*?)", text, re.DOTALL) + + if not stdout_match and not stderr_match: + return None + + stdout = stdout_match.group(1).strip() if stdout_match else None + stderr = stderr_match.group(1).strip() if stderr_match else None + + # Convert empty strings to None for cleaner representation + if stdout == "": + stdout = None + if stderr == "": + stderr = None + + return BashOutputContent(stdout=stdout, stderr=stderr) + + +# Shared regex patterns for IDE notification tags +IDE_OPENED_FILE_PATTERN = re.compile( + r"(.*?)", re.DOTALL +) +IDE_SELECTION_PATTERN = re.compile(r"(.*?)", re.DOTALL) +IDE_DIAGNOSTICS_PATTERN = re.compile( + r"\s*(.*?)\s*", + re.DOTALL, +) + + +def parse_ide_notifications(text: str) -> Optional[IdeNotificationContent]: + """Parse IDE notification tags from text. + + Handles: + - : Simple file open notifications + - : Code selection notifications + - : JSON diagnostic arrays + + Args: + text: Raw text that may contain IDE notification tags + + Returns: + IdeNotificationContent if any tags found, None otherwise + """ + opened_files: List[IdeOpenedFile] = [] + selections: List[IdeSelection] = [] + diagnostics: List[IdeDiagnostic] = [] + remaining_text = text + + # Pattern 1: content + for match in IDE_OPENED_FILE_PATTERN.finditer(remaining_text): + content = match.group(1).strip() + opened_files.append(IdeOpenedFile(content=content)) + + remaining_text = IDE_OPENED_FILE_PATTERN.sub("", remaining_text) + + # Pattern 2: content + for match in IDE_SELECTION_PATTERN.finditer(remaining_text): + content = match.group(1).strip() + selections.append(IdeSelection(content=content)) + + remaining_text = IDE_SELECTION_PATTERN.sub("", remaining_text) + + # Pattern 3: JSON + for match in IDE_DIAGNOSTICS_PATTERN.finditer(remaining_text): + json_content = match.group(1).strip() + try: + parsed_diagnostics: Any = json.loads(json_content) + if isinstance(parsed_diagnostics, list): + diagnostics.append( + IdeDiagnostic( + diagnostics=cast(List[Dict[str, Any]], parsed_diagnostics) ) + ) + else: + # Not a list, store as raw content + diagnostics.append(IdeDiagnostic(raw_content=json_content)) + except (json.JSONDecodeError, ValueError): + # JSON parsing failed, store raw content + diagnostics.append(IdeDiagnostic(raw_content=json_content)) - # Load agent files if any were referenced - # Build a map of agentId -> agent messages - agent_messages_map: dict[str, List[TranscriptEntry]] = {} - if agent_ids: - parent_dir = jsonl_path.parent - for agent_id in agent_ids: - agent_file = parent_dir / f"agent-{agent_id}.jsonl" - # Skip if the agent file is the same as the current file (self-reference) - if agent_file == jsonl_path: - continue - if agent_file.exists(): - if not silent: - print(f"Loading agent file {agent_file}...") - # Recursively load the agent file (it might reference other agents) - agent_messages = load_transcript( - agent_file, - cache_manager, - from_date, - to_date, - silent=True, - _loaded_files=_loaded_files, + remaining_text = IDE_DIAGNOSTICS_PATTERN.sub("", remaining_text) + + # Only return if we found any IDE tags + if not opened_files and not selections and not diagnostics: + return None + + return IdeNotificationContent( + opened_files=opened_files, + selections=selections, + diagnostics=diagnostics, + remaining_text=remaining_text.strip(), + ) + + +# Pattern for compacted session summary detection +COMPACTED_SUMMARY_PREFIX = "This session is being continued from a previous conversation that ran out of context" + + +def parse_compacted_summary( + content_list: List[ContentItem], +) -> Optional[CompactedSummaryContent]: + """Parse compacted session summary from content list. + + Compacted summaries are generated when a session runs out of context and + needs to be continued. They contain a summary of the previous conversation. + + If the first text item starts with the compacted summary prefix, all text + items are combined into a single CompactedSummaryContent. + + Args: + content_list: List of ContentItem from user message + + Returns: + CompactedSummaryContent if first text is a compacted summary, None otherwise + """ + if not content_list or not hasattr(content_list[0], "text"): + return None + + first_text = getattr(content_list[0], "text", "") + if not first_text.startswith(COMPACTED_SUMMARY_PREFIX): + return None + + # Combine all text content for compacted summaries + # Use hasattr check to handle both TextContent models and SDK TextBlock objects + texts = cast( + list[str], + [item.text for item in content_list if hasattr(item, "text")], # type: ignore[union-attr] + ) + all_text = "\n\n".join(texts) + return CompactedSummaryContent(summary_text=all_text) + + +# Pattern for user memory input tag +USER_MEMORY_PATTERN = re.compile( + r"(.*?)", re.DOTALL +) + + +def parse_user_memory(text: str) -> Optional[UserMemoryContent]: + """Parse user memory input tag from text. + + User memory input contains context that the user has provided from + their CLAUDE.md or other memory sources. + + Args: + text: Raw text that may contain user memory input tag + + Returns: + UserMemoryContent if tag found, None otherwise + """ + match = USER_MEMORY_PATTERN.search(text) + if match: + memory_content = match.group(1).strip() + return UserMemoryContent(memory_text=memory_content) + return None + + +# Type alias for content models returned by parse_user_message_content +UserMessageContent = Union[CompactedSummaryContent, UserMemoryContent, UserTextContent] + + +def parse_user_message_content( + content_list: List[ContentItem], +) -> Optional[UserMessageContent]: + """Parse user message content into a structured content model. + + Returns a content model for HtmlRenderer to format. The caller can use + isinstance() checks to determine the content type: + - CompactedSummaryContent: Session continuation summaries + - UserMemoryContent: User memory input from CLAUDE.md + - UserTextContent: Normal user text with optional IDE notifications + + Args: + content_list: List of ContentItem from user message + + Returns: + A content model, or None if content_list is empty or has no text. + """ + # Check first text item + if not content_list or not hasattr(content_list[0], "text"): + return None + + # Check for compacted session summary first (handles text combining internally) + compacted = parse_compacted_summary(content_list) + if compacted: + return compacted + + first_text = getattr(content_list[0], "text", "") + + # Check for user memory input + user_memory = parse_user_memory(first_text) + if user_memory: + return user_memory + + # Parse IDE notifications from first text item + ide_content = parse_ide_notifications(first_text) + + # Get remaining text after IDE notifications extracted + if ide_content: + remaining_text = ide_content.remaining_text + else: + remaining_text = first_text + + # Combine remaining text with any other text items + # Use hasattr check to handle both TextContent models and SDK TextBlock objects + other_text: list[str] = [ + item.text # type: ignore[union-attr] + for item in content_list[1:] + if hasattr(item, "text") + ] + all_text = remaining_text + if other_text: + all_text = "\n\n".join([remaining_text] + other_text) + + # Return UserTextContent with optional IDE notifications + return UserTextContent(text=all_text, ide_notifications=ide_content) + + +# ============================================================================= +# Message Type Detection +# ============================================================================= + + +def is_system_message(text_content: str) -> bool: + """Check if a message is a system message that should be filtered out.""" + system_message_patterns = [ + "Caveat: The messages below were generated by the user while running local commands. DO NOT respond to these messages or otherwise consider them in your response unless the user explicitly asks you to.", + "[Request interrupted by user for tool use]", + "", + ] + + return any(text_content.startswith(pattern) for pattern in system_message_patterns) + + +def is_command_message(text_content: str) -> bool: + """Check if a message contains command information that should be displayed.""" + return "" in text_content and "" in text_content + + +def is_local_command_output(text_content: str) -> bool: + """Check if a message contains local command output.""" + return "" in text_content + + +def is_bash_input(text_content: str) -> bool: + """Check if a message contains bash input command.""" + return "" in text_content and "" in text_content + + +def is_bash_output(text_content: str) -> bool: + """Check if a message contains bash command output.""" + return "" in text_content or "" in text_content + + +def is_warmup_only_session(messages: List[TranscriptEntry], session_id: str) -> bool: + """Check if a session contains only warmup user messages. + + A warmup session is one where ALL user messages are literally just "Warmup". + Sessions with no user messages return False (not considered warmup). + + Args: + messages: List of all transcript entries + session_id: The session ID to check + + Returns: + True if ALL user messages in the session are "Warmup", False otherwise + """ + user_messages_in_session: List[str] = [] + + for message in messages: + if ( + isinstance(message, UserTranscriptEntry) + and getattr(message, "sessionId", "") == session_id + and hasattr(message, "message") + ): + text_content = extract_text_content(message.message.content).strip() + user_messages_in_session.append(text_content) + + # No user messages = not a warmup session + if not user_messages_in_session: + return False + + # All user messages must be exactly "Warmup" + return all(msg == "Warmup" for msg in user_messages_in_session) + + +# ============================================================================= +# Type Guards for TranscriptEntry +# ============================================================================= + + +def is_user_entry(entry: TranscriptEntry) -> TypeGuard[UserTranscriptEntry]: + """Check if entry is a user transcript entry.""" + return entry.type == MessageType.USER + + +def is_assistant_entry(entry: TranscriptEntry) -> TypeGuard[AssistantTranscriptEntry]: + """Check if entry is an assistant transcript entry.""" + return entry.type == MessageType.ASSISTANT + + +# ============================================================================= +# Tool Input Parsing +# ============================================================================= + +TOOL_INPUT_MODELS: Dict[str, type[BaseModel]] = { + "Bash": BashInput, + "Read": ReadInput, + "Write": WriteInput, + "Edit": EditInput, + "MultiEdit": MultiEditInput, + "Glob": GlobInput, + "Grep": GrepInput, + "Task": TaskInput, + "TodoWrite": TodoWriteInput, + "AskUserQuestion": AskUserQuestionInput, + "ask_user_question": AskUserQuestionInput, # Legacy tool name + "ExitPlanMode": ExitPlanModeInput, +} + + +# -- Lenient Parsing Helpers -------------------------------------------------- +# These functions create typed models even when strict validation fails. +# They use defaults for missing fields and skip invalid nested items. + + +def _parse_todowrite_lenient(data: Dict[str, Any]) -> TodoWriteInput: + """Parse TodoWrite input leniently, handling malformed data.""" + todos_raw = data.get("todos", []) + valid_todos: List[TodoWriteItem] = [] + for item in todos_raw: + if isinstance(item, dict): + try: + valid_todos.append(TodoWriteItem.model_validate(item)) + except Exception: + pass + elif isinstance(item, str): + valid_todos.append(TodoWriteItem(content=item)) + return TodoWriteInput(todos=valid_todos) + + +def _parse_bash_lenient(data: Dict[str, Any]) -> BashInput: + """Parse Bash input leniently.""" + return BashInput( + command=data.get("command", ""), + description=data.get("description"), + timeout=data.get("timeout"), + run_in_background=data.get("run_in_background"), + ) + + +def _parse_write_lenient(data: Dict[str, Any]) -> WriteInput: + """Parse Write input leniently.""" + return WriteInput( + file_path=data.get("file_path", ""), + content=data.get("content", ""), + ) + + +def _parse_edit_lenient(data: Dict[str, Any]) -> EditInput: + """Parse Edit input leniently.""" + return EditInput( + file_path=data.get("file_path", ""), + old_string=data.get("old_string", ""), + new_string=data.get("new_string", ""), + replace_all=data.get("replace_all"), + ) + + +def _parse_multiedit_lenient(data: Dict[str, Any]) -> MultiEditInput: + """Parse Multiedit input leniently.""" + edits_raw = data.get("edits", []) + valid_edits: List[EditItem] = [] + for edit in edits_raw: + if isinstance(edit, dict): + try: + valid_edits.append(EditItem.model_validate(edit)) + except Exception: + pass + return MultiEditInput(file_path=data.get("file_path", ""), edits=valid_edits) + + +def _parse_task_lenient(data: Dict[str, Any]) -> TaskInput: + """Parse Task input leniently.""" + return TaskInput( + prompt=data.get("prompt", ""), + subagent_type=data.get("subagent_type", ""), + description=data.get("description", ""), + model=data.get("model"), + run_in_background=data.get("run_in_background"), + resume=data.get("resume"), + ) + + +def _parse_read_lenient(data: Dict[str, Any]) -> ReadInput: + """Parse Read input leniently.""" + return ReadInput( + file_path=data.get("file_path", ""), + offset=data.get("offset"), + limit=data.get("limit"), + ) + + +def _parse_askuserquestion_lenient(data: Dict[str, Any]) -> AskUserQuestionInput: + """Parse AskUserQuestion input leniently, handling malformed data.""" + questions_raw = data.get("questions", []) + valid_questions: List[AskUserQuestionItem] = [] + for q in questions_raw: + if isinstance(q, dict): + q_dict = cast(Dict[str, Any], q) + try: + # Parse options leniently + options_raw = q_dict.get("options", []) + valid_options: List[AskUserQuestionOption] = [] + for opt in options_raw: + if isinstance(opt, dict): + try: + valid_options.append( + AskUserQuestionOption.model_validate(opt) + ) + except Exception: + pass + valid_questions.append( + AskUserQuestionItem( + question=str(q_dict.get("question", "")), + header=q_dict.get("header"), + options=valid_options, + multiSelect=bool(q_dict.get("multiSelect", False)), + ) ) - agent_messages_map[agent_id] = agent_messages - - # Insert agent messages at their point of use - if agent_messages_map: - # Iterate through messages and insert agent messages after the message - # that references them (via UserTranscriptEntry.agentId) - result_messages: List[TranscriptEntry] = [] - for message in messages: - result_messages.append(message) - - # Check if this is a UserTranscriptEntry with agentId - if isinstance(message, UserTranscriptEntry) and message.agentId: - agent_id = message.agentId - if agent_id in agent_messages_map: - # Insert agent messages right after this message - result_messages.extend(agent_messages_map[agent_id]) - - messages = result_messages - - # Save to cache if cache manager is available - if cache_manager is not None: - cache_manager.save_cached_entries(jsonl_path, messages) - - return messages - - -def load_directory_transcripts( - directory_path: Path, - cache_manager: Optional["CacheManager"] = None, - from_date: Optional[str] = None, - to_date: Optional[str] = None, - silent: bool = False, -) -> List[TranscriptEntry]: - """Load all JSONL transcript files from a directory and combine them.""" - all_messages: List[TranscriptEntry] = [] - - # Find all .jsonl files - jsonl_files = list(directory_path.glob("*.jsonl")) - - for jsonl_file in jsonl_files: - messages = load_transcript( - jsonl_file, cache_manager, from_date, to_date, silent - ) - all_messages.extend(messages) - - # Sort all messages chronologically - def get_timestamp(entry: TranscriptEntry) -> str: - if hasattr(entry, "timestamp"): - return entry.timestamp # type: ignore - return "" + except Exception: + pass + return AskUserQuestionInput( + questions=valid_questions, + question=data.get("question"), + ) + + +def _parse_exitplanmode_lenient(data: Dict[str, Any]) -> ExitPlanModeInput: + """Parse ExitPlanMode input leniently.""" + return ExitPlanModeInput( + plan=data.get("plan", ""), + launchSwarm=data.get("launchSwarm"), + teammateCount=data.get("teammateCount"), + ) + + +# Mapping of tool names to their lenient parsers +TOOL_LENIENT_PARSERS: Dict[str, Any] = { + "Bash": _parse_bash_lenient, + "Write": _parse_write_lenient, + "Edit": _parse_edit_lenient, + "MultiEdit": _parse_multiedit_lenient, + "Task": _parse_task_lenient, + "TodoWrite": _parse_todowrite_lenient, + "Read": _parse_read_lenient, + "AskUserQuestion": _parse_askuserquestion_lenient, + "ask_user_question": _parse_askuserquestion_lenient, # Legacy tool name + "ExitPlanMode": _parse_exitplanmode_lenient, +} + + +def parse_tool_input(tool_name: str, input_data: Dict[str, Any]) -> ToolInput: + """Parse tool input dictionary into a typed model. + + Uses strict validation first, then lenient parsing if available. + + Args: + tool_name: The name of the tool (e.g., "Bash", "Read") + input_data: The raw input dictionary from the tool_use content + + Returns: + A typed input model if available, otherwise the original dictionary + """ + model_class = TOOL_INPUT_MODELS.get(tool_name) + if model_class is not None: + try: + return cast(ToolInput, model_class.model_validate(input_data)) + except Exception: + # Try lenient parsing if available + lenient_parser = TOOL_LENIENT_PARSERS.get(tool_name) + if lenient_parser is not None: + return cast(ToolInput, lenient_parser(input_data)) + return input_data + return input_data + + +# ============================================================================= +# Usage Info Normalization +# ============================================================================= + + +def normalize_usage_info(usage_data: Any) -> Optional[UsageInfo]: + """Normalize usage data to be compatible with both custom and Anthropic formats.""" + if usage_data is None: + return None + + # If it's already a UsageInfo instance, return as-is + if isinstance(usage_data, UsageInfo): + return usage_data + + # If it's an Anthropic Usage instance, convert using our method + if isinstance(usage_data, AnthropicUsage): + return UsageInfo.from_anthropic_usage(usage_data) + + # If it has the shape of an Anthropic Usage, try to construct it first + if hasattr(usage_data, "input_tokens") and hasattr(usage_data, "output_tokens"): + try: + # Try to create an Anthropic Usage first + anthropic_usage = AnthropicUsage.model_validate(usage_data) + return UsageInfo.from_anthropic_usage(anthropic_usage) + except Exception: + # Fall back to direct conversion + return UsageInfo( + input_tokens=getattr(usage_data, "input_tokens", None), + cache_creation_input_tokens=getattr( + usage_data, "cache_creation_input_tokens", None + ), + cache_read_input_tokens=getattr( + usage_data, "cache_read_input_tokens", None + ), + output_tokens=getattr(usage_data, "output_tokens", None), + service_tier=getattr(usage_data, "service_tier", None), + server_tool_use=getattr(usage_data, "server_tool_use", None), + ) + + # If it's a dict, validate and convert to our format + if isinstance(usage_data, dict): + return UsageInfo.model_validate(usage_data) + + return None + + +# ============================================================================= +# Content Item Parsing +# ============================================================================= +# Functions to parse content items from JSONL data. Organized by entry type +# to clarify which content types can appear in which context. + + +def _parse_text_content(item_data: Dict[str, Any]) -> ContentItem: + """Parse text content, trying Anthropic types first. + + Common to both user and assistant messages. + """ + try: + return TextBlock.model_validate(item_data) + except Exception: + return TextContent.model_validate(item_data) + + +def parse_user_content_item(item_data: Dict[str, Any]) -> ContentItem: + """Parse a content item from a UserTranscriptEntry. + + User messages can contain: + - text: User-typed text + - tool_result: Results from tool execution + - image: User-attached images + """ + try: + content_type = item_data.get("type", "") + + if content_type == "text": + return _parse_text_content(item_data) + elif content_type == "tool_result": + return ToolResultContent.model_validate(item_data) + elif content_type == "image": + return ImageContent.model_validate(item_data) + else: + # Fallback to text content for unknown types + return TextContent(type="text", text=str(item_data)) + except Exception: + return TextContent(type="text", text=str(item_data)) + + +def parse_assistant_content_item(item_data: Dict[str, Any]) -> ContentItem: + """Parse a content item from an AssistantTranscriptEntry. - all_messages.sort(key=get_timestamp) - return all_messages + Assistant messages can contain: + - text: Assistant's response text + - tool_use: Tool invocations + - thinking: Extended thinking blocks + """ + try: + content_type = item_data.get("type", "") + + if content_type == "text": + return _parse_text_content(item_data) + elif content_type == "tool_use": + try: + from anthropic.types.tool_use_block import ToolUseBlock + + return ToolUseBlock.model_validate(item_data) + except Exception: + return ToolUseContent.model_validate(item_data) + elif content_type == "thinking": + try: + from anthropic.types.thinking_block import ThinkingBlock + + return ThinkingBlock.model_validate(item_data) + except Exception: + return ThinkingContent.model_validate(item_data) + else: + # Fallback to text content for unknown types + return TextContent(type="text", text=str(item_data)) + except Exception: + return TextContent(type="text", text=str(item_data)) + + +def parse_content_item(item_data: Dict[str, Any]) -> ContentItem: + """Parse a content item (generic fallback). + + For cases where the entry type is unknown. Handles all content types. + Prefer parse_user_content_item or parse_assistant_content_item when + the entry type is known. + """ + try: + content_type = item_data.get("type", "") + + # User-specific content types + if content_type == "tool_result": + return ToolResultContent.model_validate(item_data) + elif content_type == "image": + return ImageContent.model_validate(item_data) + + # Assistant-specific content types + elif content_type == "tool_use": + try: + from anthropic.types.tool_use_block import ToolUseBlock + + return ToolUseBlock.model_validate(item_data) + except Exception: + return ToolUseContent.model_validate(item_data) + elif content_type == "thinking": + try: + from anthropic.types.thinking_block import ThinkingBlock + + return ThinkingBlock.model_validate(item_data) + except Exception: + return ThinkingContent.model_validate(item_data) + + # Common content types + elif content_type == "text": + return _parse_text_content(item_data) + else: + # Fallback to text content for unknown types + return TextContent(type="text", text=str(item_data)) + except Exception: + return TextContent(type="text", text=str(item_data)) + + +def parse_message_content( + content_data: Any, + item_parser: Callable[[Dict[str, Any]], ContentItem] = parse_content_item, +) -> Union[str, List[ContentItem]]: + """Parse message content, handling both string and list formats. + + Args: + content_data: Raw content data (string or list of items) + item_parser: Function to parse individual content items. Defaults to + generic parse_content_item, but can be parse_user_content_item or + parse_assistant_content_item for type-specific parsing. + """ + if isinstance(content_data, str): + return content_data + elif isinstance(content_data, list): + content_list = cast(List[Dict[str, Any]], content_data) + return [item_parser(item) for item in content_list] + else: + return str(content_data) + + +# ============================================================================= +# Transcript Entry Parsing +# ============================================================================= + + +def parse_transcript_entry(data: Dict[str, Any]) -> TranscriptEntry: + """ + Parse a JSON dictionary into the appropriate TranscriptEntry type. + + Enhanced to optionally use official Anthropic types for assistant messages. + + Args: + data: Dictionary parsed from JSON + + Returns: + The appropriate TranscriptEntry subclass + + Raises: + ValueError: If the data doesn't match any known transcript entry type + """ + entry_type = data.get("type") + + if entry_type == "user": + # Parse message content if present, using user-specific parser + data_copy = data.copy() + if "message" in data_copy and "content" in data_copy["message"]: + data_copy["message"] = data_copy["message"].copy() + data_copy["message"]["content"] = parse_message_content( + data_copy["message"]["content"], + item_parser=parse_user_content_item, + ) + # Parse toolUseResult if present and it's a list of content items + if "toolUseResult" in data_copy and isinstance( + data_copy["toolUseResult"], list + ): + # Check if it's a list of content items (MCP tool results) + tool_use_result = cast(List[Any], data_copy["toolUseResult"]) + if ( + tool_use_result + and isinstance(tool_use_result[0], dict) + and "type" in tool_use_result[0] + ): + data_copy["toolUseResult"] = [ + parse_content_item(cast(Dict[str, Any], item)) + for item in tool_use_result + if isinstance(item, dict) + ] + return UserTranscriptEntry.model_validate(data_copy) + + elif entry_type == "assistant": + # Enhanced assistant message parsing with optional Anthropic types + data_copy = data.copy() + + # Validate compatibility with official Anthropic Message type + if "message" in data_copy: + try: + message_data = data_copy["message"] + AnthropicMessage.model_validate(message_data) + # Successfully validated - our data is compatible with official Anthropic types + except Exception: + # Validation failed - continue with standard parsing + pass + + # Standard parsing path using assistant-specific parser + if "message" in data_copy and "content" in data_copy["message"]: + message_copy = data_copy["message"].copy() + message_copy["content"] = parse_message_content( + message_copy["content"], + item_parser=parse_assistant_content_item, + ) + + # Normalize usage data to support both Anthropic and custom formats + if "usage" in message_copy: + message_copy["usage"] = normalize_usage_info(message_copy["usage"]) + + data_copy["message"] = message_copy + return AssistantTranscriptEntry.model_validate(data_copy) + + elif entry_type == "summary": + return SummaryTranscriptEntry.model_validate(data) + + elif entry_type == "system": + return SystemTranscriptEntry.model_validate(data) + + elif entry_type == "queue-operation": + # Parse content if present (in enqueue and remove operations) + data_copy = data.copy() + if "content" in data_copy and isinstance(data_copy["content"], list): + data_copy["content"] = parse_message_content(data_copy["content"]) + return QueueOperationTranscriptEntry.model_validate(data_copy) + + else: + raise ValueError(f"Unknown transcript entry type: {entry_type}") diff --git a/claude_code_log/renderer.py b/claude_code_log/renderer.py index 6ea537a0..5b375380 100644 --- a/claude_code_log/renderer.py +++ b/claude_code_log/renderer.py @@ -1,1729 +1,88 @@ #!/usr/bin/env python3 """Render Claude transcript data to HTML format.""" -import json -import os -import re import time +from dataclasses import dataclass, replace from pathlib import Path -from typing import List, Optional, Dict, Any, cast, TYPE_CHECKING +from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING if TYPE_CHECKING: from .cache import CacheManager + from .models import MessageContent from datetime import datetime -import html -import mistune -from jinja2 import Environment, FileSystemLoader, select_autoescape -from pygments import highlight # type: ignore[reportUnknownVariableType] -from pygments.lexers import TextLexer # type: ignore[reportUnknownVariableType] -from pygments.formatters import HtmlFormatter # type: ignore[reportUnknownVariableType] -from pygments.util import ClassNotFound # type: ignore[reportUnknownVariableType] from .models import ( + MessageModifiers, + MessageType, TranscriptEntry, AssistantTranscriptEntry, - UserTranscriptEntry, SystemTranscriptEntry, SummaryTranscriptEntry, QueueOperationTranscriptEntry, ContentItem, TextContent, ToolResultContent, + ToolResultContentModel, ToolUseContent, ThinkingContent, + ThinkingContentModel, ImageContent, + # Structured content types + AssistantTextContent, + CompactedSummaryContent, + DedupNoticeContent, + HookInfo, + HookSummaryContent, + SessionHeaderContent, + SystemContent, + UnknownContent, + UserMemoryContent, + UserTextContent, +) +from .parser import ( + extract_text_content, + is_assistant_entry, + is_bash_input, + is_bash_output, + is_command_message, + is_local_command_output, + is_user_entry, +) +from .utils import ( + format_timestamp, + format_timestamp_range, + get_project_display_name, + should_skip_message, + should_use_as_session_starter, + create_session_preview, +) +from .renderer_timings import ( + DEBUG_TIMING, + report_timing_statistics, + set_timing_var, + log_timing, ) -from .parser import extract_text_content -from .utils import ( - is_command_message, - is_local_command_output, - is_bash_input, - is_bash_output, - should_skip_message, - should_use_as_session_starter, - create_session_preview, -) -from .renderer_timings import ( - DEBUG_TIMING, - report_timing_statistics, - timing_stat, - set_timing_var, - log_timing, -) -from .cache import get_library_version - - -def starts_with_emoji(text: str) -> bool: - """Check if a string starts with an emoji character. - - Checks common emoji Unicode ranges: - - Emoticons: U+1F600 - U+1F64F - - Misc Symbols and Pictographs: U+1F300 - U+1F5FF - - Transport and Map Symbols: U+1F680 - U+1F6FF - - Supplemental Symbols: U+1F900 - U+1F9FF - - Misc Symbols: U+2600 - U+26FF - - Dingbats: U+2700 - U+27BF - """ - if not text: - return False - - first_char = text[0] - code_point = ord(first_char) - - return ( - 0x1F600 <= code_point <= 0x1F64F # Emoticons - or 0x1F300 <= code_point <= 0x1F5FF # Misc Symbols and Pictographs - or 0x1F680 <= code_point <= 0x1F6FF # Transport and Map Symbols - or 0x1F900 <= code_point <= 0x1F9FF # Supplemental Symbols - or 0x2600 <= code_point <= 0x26FF # Misc Symbols - or 0x2700 <= code_point <= 0x27BF # Dingbats - ) - - -def get_project_display_name( - project_dir_name: str, working_directories: Optional[List[str]] = None -) -> str: - """Get the display name for a project based on working directories. - - Args: - project_dir_name: The Claude project directory name (e.g., "-Users-dain-workspace-claude-code-log") - working_directories: List of working directories from cache data - - Returns: - The project display name (e.g., "claude-code-log") - """ - if working_directories: - # Convert to Path objects with their original indices for tracking recency - paths_with_indices = [(Path(wd), i) for i, wd in enumerate(working_directories)] - - # Sort by: 1) path depth (fewer parts = less nested), 2) recency (lower index = more recent) - # This gives us the least nested path, with ties broken by recency - best_path, _ = min(paths_with_indices, key=lambda p: (len(p[0].parts), p[1])) - return best_path.name - else: - # Fall back to converting project directory name - display_name = project_dir_name - if display_name.startswith("-"): - display_name = display_name[1:].replace("-", "/") - return display_name - - -def check_html_version(html_file_path: Path) -> Optional[str]: - """Check the version of an existing HTML file from its comment. - - Returns: - The version string if found, None if no version comment or file doesn't exist. - """ - if not html_file_path.exists(): - return None - - try: - with open(html_file_path, "r", encoding="utf-8") as f: - # Read only the first few lines to find the version comment - for _ in range(5): # Check first 5 lines - line = f.readline() - if not line: - break - # Look for comment like: - if "' - start = line.find("v") + 1 - end = line.find(" -->") - if start > 0 and end > start: - return line[start:end] - except (IOError, UnicodeDecodeError): - pass - - return None - - -def is_html_outdated(html_file_path: Path) -> bool: - """Check if an HTML file is outdated based on its version comment. - - Returns: - True if the file should be regenerated (missing version, different version, or file doesn't exist). - False if the file is current. - """ - html_version = check_html_version(html_file_path) - current_version = get_library_version() - - # If no version found or different version, it's outdated - return html_version != current_version - - -def format_timestamp(timestamp_str: str | None) -> str: - """Format ISO timestamp for display, converting to UTC.""" - if timestamp_str is None: - return "" - try: - dt = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00")) - # Convert to UTC if timezone-aware - if dt.tzinfo is not None: - utc_timetuple = dt.utctimetuple() - dt = datetime( - utc_timetuple.tm_year, - utc_timetuple.tm_mon, - utc_timetuple.tm_mday, - utc_timetuple.tm_hour, - utc_timetuple.tm_min, - utc_timetuple.tm_sec, - ) - return dt.strftime("%Y-%m-%d %H:%M:%S") - except (ValueError, AttributeError): - return timestamp_str - - -def escape_html(text: str) -> str: - """Escape HTML special characters in text. - - Also normalizes line endings (CRLF -> LF) to prevent double spacing in
 blocks.
-    """
-    # Normalize CRLF to LF to prevent double line breaks in HTML
-    normalized = text.replace("\r\n", "\n").replace("\r", "\n")
-    return html.escape(normalized)
-
-
-def _create_pygments_plugin() -> Any:
-    """Create a mistune plugin that uses Pygments for code block syntax highlighting."""
-    from pygments import highlight  # type: ignore[reportUnknownVariableType]
-    from pygments.lexers import get_lexer_by_name, TextLexer  # type: ignore[reportUnknownVariableType]
-    from pygments.formatters import HtmlFormatter  # type: ignore[reportUnknownVariableType]
-    from pygments.util import ClassNotFound  # type: ignore[reportUnknownVariableType]
-
-    def plugin_pygments(md: Any) -> None:
-        """Plugin to add Pygments syntax highlighting to code blocks."""
-        original_render = md.renderer.block_code
-
-        def block_code(code: str, info: Optional[str] = None) -> str:
-            """Render code block with Pygments syntax highlighting if language is specified."""
-            if info:
-                # Language hint provided, use Pygments
-                lang = info.split()[0] if info else ""
-                try:
-                    lexer = get_lexer_by_name(lang, stripall=True)  # type: ignore[reportUnknownVariableType]
-                except ClassNotFound:
-                    lexer = TextLexer()  # type: ignore[reportUnknownVariableType]
-
-                formatter = HtmlFormatter(  # type: ignore[reportUnknownVariableType]
-                    linenos=False,  # No line numbers in markdown code blocks
-                    cssclass="highlight",
-                    wrapcode=True,
-                )
-                # Track Pygments timing if enabled
-                with timing_stat("_pygments_timings"):
-                    return str(highlight(code, lexer, formatter))  # type: ignore[reportUnknownArgumentType]
-            else:
-                # No language hint, use default rendering
-                return original_render(code, info)
-
-        md.renderer.block_code = block_code
-
-    return plugin_pygments
-
-
-def render_markdown(text: str) -> str:
-    """Convert markdown text to HTML using mistune with Pygments syntax highlighting."""
-    # Track markdown rendering time if enabled
-    with timing_stat("_markdown_timings"):
-        # Configure mistune with GitHub-flavored markdown features
-        renderer = mistune.create_markdown(
-            plugins=[
-                "strikethrough",
-                "footnotes",
-                "table",
-                "url",
-                "task_lists",
-                "def_list",
-                _create_pygments_plugin(),
-            ],
-            escape=False,  # Don't escape HTML since we want to render markdown properly
-            hard_wrap=True,  # Line break for newlines (checklists in Assistant messages)
-        )
-        return str(renderer(text))
-
-
-def render_collapsible_code(
-    preview_html: str,
-    full_html: str,
-    line_count: int,
-    is_markdown: bool = False,
-) -> str:
-    """Render a collapsible code/content block with preview.
-
-    Creates a details element with a line count badge and preview content
-    that expands to show the full content.
-
-    Args:
-        preview_html: HTML content to show in the collapsed summary
-        full_html: HTML content to show when expanded
-        line_count: Number of lines (shown in the badge)
-        is_markdown: If True, adds 'markdown' class to preview and full content divs
-
-    Returns:
-        HTML string with collapsible details element
-    """
-    markdown_class = " markdown" if is_markdown else ""
-    return f"""
- - {line_count} lines -
{preview_html}
-
-
{full_html}
-
""" - - -def render_markdown_collapsible( - raw_content: str, - css_class: str, - line_threshold: int = 20, - preview_line_count: int = 5, -) -> str: - """Render markdown content, making it collapsible if it exceeds a line threshold. - - For long content, creates a collapsible details element with a preview. - For short content, renders inline with the specified CSS class. - - Args: - raw_content: The raw text content to render as markdown - css_class: CSS class for the wrapper div (e.g., "task-prompt", "task-result") - line_threshold: Number of lines above which content becomes collapsible (default 20) - preview_line_count: Number of lines to show in the preview (default 5) - - Returns: - HTML string with rendered markdown, optionally wrapped in collapsible details - """ - rendered_html = render_markdown(raw_content) - - lines = raw_content.splitlines() - if len(lines) <= line_threshold: - # Short content, show inline - return f'
{rendered_html}
' - - # Long content - make collapsible with rendered preview - preview_lines = lines[:preview_line_count] - preview_text = "\n".join(preview_lines) - if len(lines) > preview_line_count: - preview_text += "\n\n..." - # Render truncated markdown (produces valid HTML with proper tag closure) - preview_html = render_markdown(preview_text) - - collapsible = render_collapsible_code( - preview_html, rendered_html, len(lines), is_markdown=True - ) - return f'
{collapsible}
' - - -def render_file_content_collapsible( - code_content: str, - file_path: str, - css_class: str, - linenostart: int = 1, - line_threshold: int = 12, - preview_line_count: int = 5, - suffix_html: str = "", -) -> str: - """Render file content with syntax highlighting, collapsible if long. - - Highlights code using Pygments and wraps in a collapsible details element - if the content exceeds the line threshold. Uses preview truncation from - already-highlighted HTML to avoid double Pygments calls. - - Args: - code_content: The raw code content to highlight - file_path: File path for syntax detection (extension-based) - css_class: CSS class for the wrapper div (e.g., 'write-tool-content') - linenostart: Starting line number for Pygments (default 1) - line_threshold: Number of lines above which content becomes collapsible - preview_line_count: Number of lines to show in the preview - suffix_html: Optional HTML to append after the code (inside wrapper div) - - Returns: - HTML string with highlighted code, collapsible if >line_threshold lines - """ - # Highlight code with Pygments (single call) - highlighted_html = _highlight_code_with_pygments( - code_content, file_path, linenostart=linenostart - ) - - html_parts = [f"
"] - - lines = code_content.split("\n") - if len(lines) > line_threshold: - # Extract preview from already-highlighted HTML (avoids double highlighting) - preview_html = _truncate_highlighted_preview( - highlighted_html, preview_line_count - ) - html_parts.append( - render_collapsible_code(preview_html, highlighted_html, len(lines)) - ) - else: - # Show directly without collapsible - html_parts.append(highlighted_html) - - if suffix_html: - html_parts.append(suffix_html) - - html_parts.append("
") - return "".join(html_parts) - - -def extract_command_info(text_content: str) -> tuple[str, str, str]: - """Extract command info from system message with command tags.""" - import re - - # Extract command name - command_name_match = re.search( - r"([^<]+)", text_content - ) - command_name = ( - command_name_match.group(1).strip() if command_name_match else "system" - ) - - # Extract command args - command_args_match = re.search( - r"([^<]*)", text_content - ) - command_args = command_args_match.group(1).strip() if command_args_match else "" - - # Extract command contents - command_contents_match = re.search( - r"(.+?)", text_content, re.DOTALL - ) - command_contents: str = "" - if command_contents_match: - contents_text = command_contents_match.group(1).strip() - # Try to parse as JSON and extract the text field - try: - contents_json: Any = json.loads(contents_text) - if isinstance(contents_json, dict) and "text" in contents_json: - text_dict = cast(Dict[str, Any], contents_json) - text_value = text_dict["text"] - command_contents = str(text_value) - else: - command_contents = contents_text - except json.JSONDecodeError: - command_contents = contents_text - - return command_name, command_args, command_contents - - -def format_askuserquestion_content(tool_use: ToolUseContent) -> str: - """Format AskUserQuestion tool use content with prominent question display. - - Handles multiple questions in a single tool use, each with optional header, - options (with label and description), and multiSelect flag. - """ - questions_data = tool_use.input.get("questions", []) - # Also handle single question format for backwards compatibility - if not questions_data: - single_question = tool_use.input.get("question", "") - if single_question: - questions_data = [{"question": single_question}] - - if not questions_data: - return render_params_table(tool_use.input) - - # Build HTML for all questions - html_parts: List[str] = ['
'] - - for q_data in questions_data: - try: - question_text = escape_html(str(q_data.get("question", ""))) - header = q_data.get("header", "") - options = q_data.get("options", []) - multi_select = q_data.get("multiSelect", False) - - # Question container - html_parts.append('
') - - # Header (if present) - if header: - escaped_header = escape_html(str(header)) - html_parts.append( - f'
{escaped_header}
' - ) - - # Question text with icon - html_parts.append(f'
❓ {question_text}
') - - # Options (if present) - if options: - select_hint = "(select multiple)" if multi_select else "(select one)" - html_parts.append( - f'
{select_hint}
' - ) - html_parts.append('
    ') - for opt in options: - label = escape_html(str(opt.get("label", ""))) - desc = opt.get("description", "") - if desc: - desc_html = f' — {escape_html(str(desc))}' - else: - desc_html = "" - html_parts.append( - f'
  • {label}{desc_html}
  • ' - ) - html_parts.append("
") - - html_parts.append("
") # Close question-block - except (AttributeError, TypeError): - # Fallback for unexpected format - html_parts.append( - f'
❓ {escape_html(str(q_data))}
' - ) - - html_parts.append("
") # Close askuserquestion-content - return "".join(html_parts) - - -def format_askuserquestion_result(content: str) -> str: - """Format AskUserQuestion tool result with styled question/answer pairs. - - Parses the result format: - 'User has answered your questions: "Q1"="A1", "Q2"="A2". You can now continue...' - - Returns HTML with styled Q&A blocks matching the input styling. - """ - import re - - # Check if this is a successful answer - if not content.startswith("User has answered your question"): - # Return as-is for errors or unexpected format - return "" - - # Extract the Q&A portion between the colon and the final sentence - # Pattern: 'User has answered your questions: "Q"="A", "Q"="A". You can now...' - match = re.match( - r"User has answered your questions?: (.+)\. You can now continue", - content, - re.DOTALL, - ) - if not match: - return "" - - qa_portion = match.group(1) - - # Parse "Question"="Answer" pairs - # Pattern: "question text"="answer text" - qa_pattern = re.compile(r'"([^"]+)"="([^"]+)"') - pairs = qa_pattern.findall(qa_portion) - - if not pairs: - return "" - - # Build styled HTML - html_parts: List[str] = [ - '
' - ] - - for question, answer in pairs: - escaped_q = escape_html(question) - escaped_a = escape_html(answer) - html_parts.append('
') - html_parts.append(f'
❓ {escaped_q}
') - html_parts.append(f'
✅ {escaped_a}
') - html_parts.append("
") - - html_parts.append("
") - return "".join(html_parts) - - -def format_exitplanmode_content(tool_use: ToolUseContent) -> str: - """Format ExitPlanMode tool use content with collapsible plan markdown. - - Renders the plan markdown in a collapsible section, similar to Task tool results. - """ - plan = tool_use.input.get("plan", "") - - if not plan: - # No plan, show parameters table as fallback - return render_params_table(tool_use.input) - - return render_markdown_collapsible(plan, "plan-content") - - -def format_exitplanmode_result(content: str) -> str: - """Format ExitPlanMode tool result, truncating the redundant plan echo. - - When a plan is approved, the result contains: - 1. A confirmation message - 2. Path to saved plan file - 3. "## Approved Plan:" followed by full plan text (redundant) - - We truncate everything after "## Approved Plan:" to avoid duplication. - For error results (plan not approved), we keep the full content. - """ - # Check if this is a successful approval - if "User has approved your plan" in content: - # Truncate at "## Approved Plan:" - marker = "## Approved Plan:" - marker_pos = content.find(marker) - if marker_pos > 0: - # Keep everything before the marker, strip trailing whitespace - return content[:marker_pos].rstrip() - - # For errors or other cases, return as-is - return content - - -def format_todowrite_content(tool_use: ToolUseContent) -> str: - """Format TodoWrite tool use content as a todo list.""" - # Parse todos from input - todos_data = tool_use.input.get("todos", []) - if not todos_data: - return """ -
-

No todos found

-
- """ - - # Status emojis - status_emojis = {"pending": "⏳", "in_progress": "🔄", "completed": "✅"} - - # Build todo list HTML - todo_items: List[str] = [] - for todo in todos_data: - try: - todo_id = escape_html(str(todo.get("id", ""))) - content = escape_html(str(todo.get("content", ""))) - status = str(todo.get("status", "pending")).lower() - priority = str(todo.get("priority", "medium")).lower() - status_emoji = status_emojis.get(status, "⏳") - - # CSS class for styling - item_class = f"todo-item {status} {priority}" - - todo_items.append(f""" -
- {status_emoji} - {content} - #{todo_id} -
- """) - except AttributeError: - escaped_fallback = escape_html(str(todo)) - todo_items.append(f""" -
- - {escaped_fallback} -
- """) - - todos_html = "".join(todo_items) - - return f""" -
- {todos_html} -
- """ - - -def _highlight_code_with_pygments( - code: str, file_path: str, show_linenos: bool = True, linenostart: int = 1 -) -> str: - """Highlight code using Pygments with appropriate lexer based on file path. - - Args: - code: The source code to highlight - file_path: Path to determine the appropriate lexer - show_linenos: Whether to show line numbers (default: True) - linenostart: Starting line number for display (default: 1) - - Returns: - HTML string with syntax-highlighted code - """ - # PERFORMANCE FIX: Use Pygments' public API to build filename pattern mapping, avoiding filesystem I/O - # get_lexer_for_filename performs I/O operations (file existence checks, reading bytes) - # which causes severe slowdowns, especially on Windows with antivirus scanning - # Solution: Build a reverse mapping from filename patterns to lexer aliases using get_all_lexers() (done once) - import fnmatch - from pygments.lexers import get_lexer_by_name, get_all_lexers # type: ignore[reportUnknownVariableType] - - # Build pattern->alias mapping on first call (cached as function attribute) - # OPTIMIZATION: Create both direct extension lookup and full pattern cache - if not hasattr(_highlight_code_with_pygments, "_pattern_cache"): - pattern_cache: dict[str, str] = {} - extension_cache: dict[str, str] = {} # Fast lookup for simple *.ext patterns - - # Use public API: get_all_lexers() returns (name, aliases, patterns, mimetypes) tuples - for name, aliases, patterns, mimetypes in get_all_lexers(): # type: ignore[reportUnknownVariableType] - if aliases and patterns: - # Use first alias as the lexer name - lexer_alias = aliases[0] - # Map each filename pattern to this lexer alias - for pattern in patterns: - pattern_lower = pattern.lower() - pattern_cache[pattern_lower] = lexer_alias - # Extract simple extension patterns (*.ext) for fast lookup - if ( - pattern_lower.startswith("*.") - and "*" not in pattern_lower[2:] - and "?" not in pattern_lower[2:] - ): - ext = pattern_lower[2:] # Remove "*." - # Prefer first match for each extension - if ext not in extension_cache: - extension_cache[ext] = lexer_alias - - _highlight_code_with_pygments._pattern_cache = pattern_cache # type: ignore[attr-defined] - _highlight_code_with_pygments._extension_cache = extension_cache # type: ignore[attr-defined] - - # Get basename for matching (patterns are like "*.py") - basename = os.path.basename(file_path).lower() - - try: - # Get caches - pattern_cache = _highlight_code_with_pygments._pattern_cache # type: ignore[attr-defined] - extension_cache = _highlight_code_with_pygments._extension_cache # type: ignore[attr-defined] - - # OPTIMIZATION: Try fast extension lookup first (O(1) dict lookup) - lexer_alias = None - if "." in basename: - ext = basename.split(".")[-1] # Get last extension (handles .tar.gz, etc.) - lexer_alias = extension_cache.get(ext) - - # Fall back to pattern matching only if extension lookup failed - if lexer_alias is None: - for pattern, lex_alias in pattern_cache.items(): - if fnmatch.fnmatch(basename, pattern): - lexer_alias = lex_alias - break - - # Get lexer or use TextLexer as fallback - # Note: stripall=False preserves leading whitespace (important for code indentation) - if lexer_alias: - lexer = get_lexer_by_name(lexer_alias, stripall=False) # type: ignore[reportUnknownVariableType] - else: - lexer = TextLexer() # type: ignore[reportUnknownVariableType] - except ClassNotFound: - # Fall back to plain text lexer - lexer = TextLexer() # type: ignore[reportUnknownVariableType] - - # Create formatter with line numbers in table format - formatter = HtmlFormatter( # type: ignore[reportUnknownVariableType] - linenos="table" if show_linenos else False, - cssclass="highlight", - wrapcode=True, - linenostart=linenostart, - ) - - # Highlight the code with timing if enabled - with timing_stat("_pygments_timings"): - return str(highlight(code, lexer, formatter)) # type: ignore[reportUnknownArgumentType] - - -def _truncate_highlighted_preview(highlighted_html: str, max_lines: int) -> str: - """Truncate Pygments highlighted HTML to first N lines. - - HtmlFormatter(linenos="table") produces a single with two s: -
LINE_NUMS
-
CODE
- - We truncate content within each
 tag to the first max_lines lines.
-
-    Args:
-        highlighted_html: Full Pygments-highlighted HTML
-        max_lines: Maximum number of lines to include in preview
-
-    Returns:
-        Truncated HTML with same structure but fewer lines
-    """
-
-    def truncate_pre_content(match: re.Match[str]) -> str:
-        """Truncate content inside a 
 tag to max_lines."""
-        prefix, content, suffix = match.groups()
-        lines = content.split("\n")
-        truncated = "\n".join(lines[:max_lines])
-        return prefix + truncated + suffix
-
-    # Truncate linenos 
 content (line numbers separated by newlines)
-    result = re.sub(
-        r'(
)(.*?)(
)', - truncate_pre_content, - highlighted_html, - flags=re.DOTALL, - ) - - # Truncate code
 content
-    result = re.sub(
-        r'(
]*>)(.*?)(
)', - truncate_pre_content, - result, - flags=re.DOTALL, - ) - - return result - - -def format_read_tool_content(tool_use: ToolUseContent) -> str: # noqa: ARG001 - """Format Read tool use content showing file path. - - Note: File path is now shown in the header, so we skip content here. - """ - # File path is now shown in header, so no content needed - # Don't show offset/limit parameters as they'll be visible in the result - return "" - - -def format_write_tool_content(tool_use: ToolUseContent) -> str: - """Format Write tool use content with Pygments syntax highlighting. - - Note: File path is now shown in the header, so we skip it here. - """ - file_path = tool_use.input.get("file_path", "") - content = tool_use.input.get("content", "") - - return render_file_content_collapsible(content, file_path, "write-tool-content") - - -def format_bash_tool_content(tool_use: ToolUseContent) -> str: - """Format Bash tool use content in VS Code extension style. - - Note: Description is now shown in the header, so we skip it here. - """ - command = tool_use.input.get("command", "") - - escaped_command = escape_html(command) - - html_parts = ["
"] - - # Description is now shown in header, so we skip it here - - # Add command in preformatted block - html_parts.append(f"
{escaped_command}
") - html_parts.append("
") - - return "".join(html_parts) - - -def render_params_table(params: Dict[str, Any]) -> str: - """Render a dictionary of parameters as an HTML table. - - Reusable for tool parameters, diagnostic objects, etc. - """ - if not params: - return "
No parameters
" - - html_parts = [""] - - for key, value in params.items(): - escaped_key = escape_html(str(key)) - - # If value is structured (dict/list), render as JSON - if isinstance(value, (dict, list)): - try: - formatted_value = json.dumps(value, indent=2, ensure_ascii=False) # type: ignore[arg-type] - escaped_value = escape_html(formatted_value) - - # Make long structured values collapsible - if len(formatted_value) > 200: - preview = escape_html(formatted_value[:100]) + "..." - value_html = f""" -
- {preview} -
{escaped_value}
-
- """ - else: - value_html = ( - f"
{escaped_value}
" - ) - except (TypeError, ValueError): - escaped_value = escape_html(str(value)) # type: ignore[arg-type] - value_html = escaped_value - else: - # Simple value, render as-is (or collapsible if long) - escaped_value = escape_html(str(value)) - - # Make long string values collapsible - if len(str(value)) > 100: - preview = escape_html(str(value)[:80]) + "..." - value_html = f""" -
- {preview} -
{escaped_value}
-
- """ - else: - value_html = escaped_value - - html_parts.append(f""" - - - - - """) - - html_parts.append("
{escaped_key}{value_html}
") - return "".join(html_parts) - - -def _render_single_diff(old_string: str, new_string: str) -> str: - """Render a single diff between old_string and new_string. - - Returns HTML for the diff view with intra-line highlighting. - """ - import difflib - - # Split into lines for diff - old_lines = old_string.splitlines(keepends=True) - new_lines = new_string.splitlines(keepends=True) - - # Generate unified diff to identify changed lines - differ = difflib.Differ() - diff: list[str] = list(differ.compare(old_lines, new_lines)) - - html_parts = ["
"] - - i = 0 - while i < len(diff): - line = diff[i] - prefix = line[0:2] - content = line[2:] - - if prefix == "- ": - # Removed line - look ahead for corresponding addition - removed_lines: list[str] = [content] - j = i + 1 - - # Collect consecutive removed lines - while j < len(diff) and diff[j].startswith("- "): - removed_lines.append(diff[j][2:]) - j += 1 - - # Skip '? ' hint lines - while j < len(diff) and diff[j].startswith("? "): - j += 1 - - # Collect consecutive added lines - added_lines: list[str] = [] - while j < len(diff) and diff[j].startswith("+ "): - added_lines.append(diff[j][2:]) - j += 1 - - # Skip '? ' hint lines - while j < len(diff) and diff[j].startswith("? "): - j += 1 - - # Generate character-level diff for paired lines - if added_lines: - for old_line, new_line in zip(removed_lines, added_lines): - html_parts.append(_render_line_diff(old_line, new_line)) - - # Handle any unpaired lines - for old_line in removed_lines[len(added_lines) :]: - escaped = escape_html(old_line.rstrip("\n")) - html_parts.append( - f"
-{escaped}
" - ) - - for new_line in added_lines[len(removed_lines) :]: - escaped = escape_html(new_line.rstrip("\n")) - html_parts.append( - f"
+{escaped}
" - ) - else: - # No corresponding addition - just removed - for old_line in removed_lines: - escaped = escape_html(old_line.rstrip("\n")) - html_parts.append( - f"
-{escaped}
" - ) - - i = j - - elif prefix == "+ ": - # Added line without corresponding removal - escaped = escape_html(content.rstrip("\n")) - html_parts.append( - f"
+{escaped}
" - ) - i += 1 - - elif prefix == "? ": - # Skip hint lines (already processed) - i += 1 - - else: - # Unchanged line - show for context - escaped = escape_html(content.rstrip("\n")) - html_parts.append( - f"
{escaped}
" - ) - i += 1 - - html_parts.append("
") - return "".join(html_parts) - - -def format_multiedit_tool_content(tool_use: ToolUseContent) -> str: - """Format Multiedit tool use content showing multiple diffs.""" - file_path = tool_use.input.get("file_path", "") - edits = tool_use.input.get("edits", []) - - escaped_path = escape_html(file_path) - - html_parts = ["
"] - - # File path header - html_parts.append(f"
📝 {escaped_path}
") - html_parts.append(f"
Applying {len(edits)} edits
") - - # Render each edit as a diff - for idx, edit in enumerate(edits, 1): - old_string = edit.get("old_string", "") - new_string = edit.get("new_string", "") - - html_parts.append( - f"
Edit #{idx}
" - ) - html_parts.append(_render_single_diff(old_string, new_string)) - html_parts.append("
") - - html_parts.append("
") - return "".join(html_parts) - - -def format_edit_tool_content(tool_use: ToolUseContent) -> str: - """Format Edit tool use content as a diff view with intra-line highlighting. - - Note: File path is now shown in the header, so we skip it here. - """ - old_string = tool_use.input.get("old_string", "") - new_string = tool_use.input.get("new_string", "") - replace_all = tool_use.input.get("replace_all", False) - - html_parts = ["
"] - - # File path is now shown in header, so we skip it here - - if replace_all: - html_parts.append( - "
🔄 Replace all occurrences
" - ) - - # Use shared diff rendering helper - html_parts.append(_render_single_diff(old_string, new_string)) - html_parts.append("
") - - return "".join(html_parts) - - -def _render_line_diff(old_line: str, new_line: str) -> str: - """Render a pair of changed lines with character-level highlighting.""" - import difflib - - # Use SequenceMatcher for character-level diff - sm = difflib.SequenceMatcher(None, old_line.rstrip("\n"), new_line.rstrip("\n")) - - # Build old line with highlighting - old_parts: list[str] = [] - old_parts.append( - "
-" - ) - for tag, i1, i2, j1, j2 in sm.get_opcodes(): - chunk = old_line[i1:i2] - if tag == "equal": - old_parts.append(escape_html(chunk)) - elif tag in ("delete", "replace"): - old_parts.append( - f"{escape_html(chunk)}" - ) - old_parts.append("
") - - # Build new line with highlighting - new_parts: list[str] = [] - new_parts.append( - "
+" - ) - for tag, i1, i2, j1, j2 in sm.get_opcodes(): - chunk = new_line[j1:j2] - if tag == "equal": - new_parts.append(escape_html(chunk)) - elif tag in ("insert", "replace"): - new_parts.append( - f"{escape_html(chunk)}" - ) - new_parts.append("
") - - return "".join(old_parts) + "".join(new_parts) - - -def format_task_tool_content(tool_use: ToolUseContent) -> str: - """Format Task tool content with markdown-rendered prompt. - - Task tool spawns sub-agents. We render the prompt as the main content. - The sidechain user message (which would duplicate this prompt) is skipped. - - For long prompts (>20 lines), the content is made collapsible with a - preview of the first few lines to keep the transcript vertically compact. - """ - prompt = tool_use.input.get("prompt", "") - - if not prompt: - # No prompt, show parameters table as fallback - return render_params_table(tool_use.input) - - return render_markdown_collapsible(prompt, "task-prompt") - - -def get_tool_summary(tool_use: ToolUseContent) -> Optional[str]: - """Extract a one-line summary from tool parameters for display in header. - - Returns a brief description or filename that can be shown in the message header - to save vertical space. - """ - tool_name = tool_use.name - params = tool_use.input - - if tool_name == "Bash": - # Return description if present - return params.get("description") - - elif tool_name in ("Read", "Edit", "Write"): - # Return file path (without icon - caller adds it) - file_path = params.get("file_path") - if file_path: - return file_path - - elif tool_name == "Task": - # Return description if present - description = params.get("description") - if description: - return description - - # No summary for other tools - return None - - -def format_tool_use_content(tool_use: ToolUseContent) -> str: - """Format tool use content as HTML.""" - # Special handling for TodoWrite - if tool_use.name == "TodoWrite": - return format_todowrite_content(tool_use) - - # Special handling for Bash - if tool_use.name == "Bash": - return format_bash_tool_content(tool_use) - - # Special handling for Edit - if tool_use.name == "Edit": - return format_edit_tool_content(tool_use) - - # Special handling for Multiedit - if tool_use.name == "Multiedit": - return format_multiedit_tool_content(tool_use) - - # Special handling for Read - if tool_use.name == "Read": - return format_read_tool_content(tool_use) - - # Special handling for Write - if tool_use.name == "Write": - return format_write_tool_content(tool_use) - - # Special handling for Task (agent spawning) - if tool_use.name == "Task": - return format_task_tool_content(tool_use) - - # Special handling for AskUserQuestion - if tool_use.name == "AskUserQuestion": - return format_askuserquestion_content(tool_use) - - # Special handling for ExitPlanMode - if tool_use.name == "ExitPlanMode": - return format_exitplanmode_content(tool_use) - - # Default: render as key/value table using shared renderer - return render_params_table(tool_use.input) - - -def _parse_cat_n_snippet( - lines: List[str], start_idx: int = 0 -) -> Optional[tuple[str, Optional[str], int]]: - """Parse cat-n formatted snippet from lines. - - Args: - lines: List of lines to parse - start_idx: Index to start parsing from (default: 0) - - Returns: - Tuple of (code_content, system_reminder, line_offset) or None if not parseable - """ - import re - - code_lines: List[str] = [] - system_reminder: Optional[str] = None - in_system_reminder = False - line_offset = 1 # Default offset - - for line in lines[start_idx:]: - # Check for system-reminder start - if "" in line: - in_system_reminder = True - system_reminder = "" - continue - - # Check for system-reminder end - if "" in line: - in_system_reminder = False - continue - - # If in system reminder, accumulate reminder text - if in_system_reminder: - if system_reminder is not None: - system_reminder += line + "\n" - continue - - # Parse regular code line (format: " 123→content") - match = re.match(r"\s+(\d+)→(.*)$", line) - if match: - line_num = int(match.group(1)) - # Capture the first line number as offset - if not code_lines: - line_offset = line_num - code_lines.append(match.group(2)) - elif line.strip() == "": # Allow empty lines between cat-n lines - continue - else: # Non-matching non-empty line, stop parsing - break - - if not code_lines: - return None - - return ( - "\n".join(code_lines), - system_reminder.strip() if system_reminder else None, - line_offset, - ) - - -def _parse_read_tool_result(content: str) -> Optional[tuple[str, Optional[str], int]]: - """Parse Read tool result in cat-n format. - - Returns: - Tuple of (code_content, system_reminder, line_offset) or None if not parseable - """ - import re - - # Check if content matches the cat-n format pattern (line_number → content) - lines = content.split("\n") - if not lines or not re.match(r"\s+\d+→", lines[0]): - return None - - return _parse_cat_n_snippet(lines) - - -def _parse_edit_tool_result(content: str) -> Optional[tuple[str, int]]: - """Parse Edit tool result to extract code snippet. - - Edit tool results typically have format: - "The file ... has been updated. Here's the result of running `cat -n` on a snippet..." - followed by cat-n formatted lines. - - Returns: - Tuple of (code_content, line_offset) or None if not parseable - """ - import re - - # Look for the cat-n snippet after the preamble - # Pattern: look for first line that matches the cat-n format - lines = content.split("\n") - code_start_idx = None - - for i, line in enumerate(lines): - if re.match(r"\s+\d+→", line): - code_start_idx = i - break - - if code_start_idx is None: - return None - - result = _parse_cat_n_snippet(lines, code_start_idx) - if result is None: - return None - - code_content, _system_reminder, line_offset = result - # Edit tool doesn't use system_reminder, so we just return code and offset - return (code_content, line_offset) - - -def format_tool_result_content( - tool_result: ToolResultContent, - file_path: Optional[str] = None, - tool_name: Optional[str] = None, -) -> str: - """Format tool result content as HTML, including images. - - Args: - tool_result: The tool result content - file_path: Optional file path for context (used for Read/Edit/Write tool rendering) - tool_name: Optional tool name for specialized rendering (e.g., "Write", "Read", "Edit", "Task") - """ - # Handle both string and structured content - if isinstance(tool_result.content, str): - raw_content = tool_result.content - has_images = False - image_html_parts: List[str] = [] - else: - # Content is a list of structured items, extract text and images - content_parts: List[str] = [] - image_html_parts: List[str] = [] - for item in tool_result.content: - item_type = item.get("type") - if item_type == "text": - text_value = item.get("text") - if isinstance(text_value, str): - content_parts.append(text_value) - elif item_type == "image": - # Handle image content within tool results - source = cast(Dict[str, Any], item.get("source", {})) - if source: - media_type: str = str(source.get("media_type", "image/png")) - data: str = str(source.get("data", "")) - if data: - data_url = f"data:{media_type};base64,{data}" - image_html_parts.append( - f'Tool result image' - ) - raw_content = "\n".join(content_parts) - has_images = len(image_html_parts) > 0 - - # Strip XML tags but keep the content inside - # Also strip redundant "String: ..." portions that echo the input - import re - - if raw_content: - # Remove ... tags but keep inner content - raw_content = re.sub( - r"(.*?)", - r"\1", - raw_content, - flags=re.DOTALL, - ) - # Remove "String: ..." portions that echo the input (everything after "String:" to end) - raw_content = re.sub(r"\nString:.*$", "", raw_content, flags=re.DOTALL) - - # Special handling for Write tool: only show first line (acknowledgment) on success - if tool_name == "Write" and not tool_result.is_error and not has_images: - lines = raw_content.split("\n") - if lines: - # Keep only the first acknowledgment line and add ellipsis - first_line = lines[0] - escaped_html = escape_html(first_line) - return f"
{escaped_html} ...
" - - # Try to parse as Read tool result if file_path is provided - if file_path and tool_name == "Read" and not has_images: - parsed_result = _parse_read_tool_result(raw_content) - - if parsed_result: - code_content, system_reminder, line_offset = parsed_result - - # Build system reminder suffix if present - suffix_html = "" - if system_reminder: - escaped_reminder = escape_html(system_reminder) - suffix_html = ( - f"
🤖 {escaped_reminder}
" - ) - - return render_file_content_collapsible( - code_content, - file_path, - "read-tool-result", - linenostart=line_offset, - suffix_html=suffix_html, - ) - - # Try to parse as Edit tool result if file_path is provided - if file_path and tool_name == "Edit" and not has_images: - parsed_result = _parse_edit_tool_result(raw_content) - if parsed_result: - parsed_code, line_offset = parsed_result - return render_file_content_collapsible( - parsed_code, - file_path, - "edit-tool-result", - linenostart=line_offset, - ) - - # Special handling for Task tool: render result as markdown with Pygments (agent's final message) - # Deduplication is now handled retroactively by replacing the sub-assistant content - if tool_name == "Task" and not has_images: - return render_markdown_collapsible(raw_content, "task-result") - - # Special handling for ExitPlanMode tool: truncate redundant plan echo on success - if tool_name == "ExitPlanMode" and not has_images: - processed_content = format_exitplanmode_result(raw_content) - escaped_content = escape_html(processed_content) - return f"
{escaped_content}
" - - # Special handling for AskUserQuestion tool: render Q&A pairs with styling - if tool_name == "AskUserQuestion" and not has_images: - styled_result = format_askuserquestion_result(raw_content) - if styled_result: - return styled_result - # Fall through to default handling if parsing fails - - # Check if this looks like Bash tool output and process ANSI codes - # Bash tool results often contain ANSI escape sequences and terminal output - if _looks_like_bash_output(raw_content): - escaped_content = _convert_ansi_to_html(raw_content) - else: - escaped_content = escape_html(raw_content) - - # Build final HTML based on content length and presence of images - if has_images: - # Combine text and images - text_html = f"
{escaped_content}
" if escaped_content else "" - images_html = "".join(image_html_parts) - combined_content = f"{text_html}{images_html}" - - # Always make collapsible when images are present - preview_text = "Text and image content" - return f""" -
- - {preview_text} - -
- {combined_content} -
-
- """ - else: - # Text-only content (existing behavior) - # For simple content, show directly without collapsible wrapper - if len(escaped_content) <= 200: - return f"
{escaped_content}
" - - # For longer content, use collapsible details but no extra wrapper - preview_text = escaped_content[:200] + "..." - return f""" -
- -
{preview_text}
-
-
-
{escaped_content}
-
-
- """ - - -def _looks_like_bash_output(content: str) -> bool: - """Check if content looks like it's from a Bash tool based on common patterns.""" - if not content: - return False - - # Check for ANSI escape sequences - if "\x1b[" in content: - return True - - # Check for common bash/terminal patterns - bash_indicators = [ - "$ ", # Shell prompt - "❯ ", # Modern shell prompt - "> ", # Shell continuation - "\n+ ", # Bash -x output - "bash: ", # Bash error messages - "/bin/bash", # Bash path - "command not found", # Common bash error - "Permission denied", # Common bash error - "No such file or directory", # Common bash error - ] - - # Check for file path patterns that suggest command output - import re - - if re.search(r"/[a-zA-Z0-9_-]+(/[a-zA-Z0-9_.-]+)*", content): # Unix-style paths - return True - - # Check for common command output patterns - if any(indicator in content for indicator in bash_indicators): - return True - - return False - - -def format_thinking_content(thinking: ThinkingContent) -> str: - """Format thinking content as HTML with markdown rendering.""" - thinking_text = thinking.thinking.strip() - - # Use line-based collapsible rendering (10 lines threshold, 5 preview) - return render_markdown_collapsible( - thinking_text, "thinking-text", line_threshold=10 - ) - - -def format_image_content(image: ImageContent) -> str: - """Format image content as HTML.""" - # Create a data URL from the base64 image data - data_url = f"data:{image.source.media_type};base64,{image.source.data}" - - return f'Uploaded image' - - -def _is_compacted_session_summary(text: str) -> bool: - """Check if text is a compacted session summary (model-generated markdown). - - Compacted summaries are generated when a session runs out of context and - needs to be continued. They are well-formed markdown and should be rendered - as such rather than in preformatted blocks. - """ - return text.startswith( - "This session is being continued from a previous conversation that ran out of context" - ) - - -def extract_ide_notifications(text: str) -> tuple[List[str], str]: - """Extract IDE notification tags from user message text. - - Handles: - - : Simple file open notifications - - : Code selection notifications (collapsible for large selections) - - : JSON diagnostic arrays - - Returns: - A tuple of (notifications_html_list, remaining_text) - where notifications are pre-rendered HTML divs and remaining_text - is the message content with IDE tags removed. - """ - import re - - notifications: List[str] = [] - remaining_text = text - - # Pattern 1: content - ide_file_pattern = r"(.*?)" - file_matches = list(re.finditer(ide_file_pattern, remaining_text, flags=re.DOTALL)) - - for match in file_matches: - content = match.group(1).strip() - escaped_content = escape_html(content) - notification_html = f"
🤖 {escaped_content}
" - notifications.append(notification_html) - - # Remove ide_opened_file tags - remaining_text = re.sub(ide_file_pattern, "", remaining_text, flags=re.DOTALL) - - # Pattern 2: content - selection_pattern = r"(.*?)" - selection_matches = list( - re.finditer(selection_pattern, remaining_text, flags=re.DOTALL) - ) - - for match in selection_matches: - content = match.group(1).strip() - escaped_content = escape_html(content) - - # For large selections, make them collapsible - if len(content) > 200: - preview = escape_html(content[:150]) + "..." - notification_html = f""" -
-
- 📝 {preview} -
{escaped_content}
-
-
- """ - else: - notification_html = f"
📝 {escaped_content}
" - - notifications.append(notification_html) - - # Remove ide_selection tags - remaining_text = re.sub(selection_pattern, "", remaining_text, flags=re.DOTALL) - - # Pattern 3: JSON - hook_pattern = r"\s*(.*?)\s*" - hook_matches = list(re.finditer(hook_pattern, remaining_text, flags=re.DOTALL)) - - for match in hook_matches: - json_content = match.group(1).strip() - try: - # Parse JSON array of diagnostic objects - diagnostics: Any = json.loads(json_content) - if isinstance(diagnostics, list): - # Render each diagnostic as a table - for diagnostic in cast(List[Any], diagnostics): - if isinstance(diagnostic, dict): - # Type assertion: we've confirmed it's a dict - diagnostic_dict = cast(Dict[str, Any], diagnostic) - table_html = render_params_table(diagnostic_dict) - notification_html = ( - f"
" - f"⚠️ IDE Diagnostic
{table_html}" - f"
" - ) - notifications.append(notification_html) - except (json.JSONDecodeError, ValueError): - # If JSON parsing fails, render as plain text - escaped_content = escape_html(json_content[:200]) - notification_html = ( - f"
🤖 IDE Diagnostics (parse error)
" - f"
{escaped_content}...
" - ) - notifications.append(notification_html) - - # Remove hook tags - remaining_text = re.sub(hook_pattern, "", remaining_text, flags=re.DOTALL) - - return notifications, remaining_text.strip() - - -def render_user_message_content( - content_list: List[ContentItem], -) -> tuple[str, bool, bool]: - """Render user message content with IDE tag extraction and compacted summary handling. - - Returns: - A tuple of (content_html, is_compacted, is_memory_input) - """ - # Check first text item - if content_list and hasattr(content_list[0], "text"): - first_text = getattr(content_list[0], "text", "") - - # Check for compacted session summary first - if _is_compacted_session_summary(first_text): - # Combine all text content for compacted summaries - all_text = "\n\n".join( - item.text for item in content_list if isinstance(item, TextContent) - ) - # Render as collapsible markdown (threshold=30, preview=10 for large summaries) - content_html = render_markdown_collapsible( - all_text, "compacted-summary", line_threshold=30, preview_line_count=10 - ) - return content_html, True, False - - # Check for user memory input - memory_match = re.search( - r"(.*?)", - first_text, - re.DOTALL, - ) - if memory_match: - memory_content = memory_match.group(1).strip() - # Render the memory content as user message - memory_content_list: List[ContentItem] = [ - TextContent(type="text", text=memory_content) - ] - content_html = render_message_content(memory_content_list, "user") - return content_html, False, True - - # Extract IDE notifications from first text item - ide_notifications_html, remaining_text = extract_ide_notifications(first_text) - modified_content = content_list[1:] - - # Build new content list with remaining text - if remaining_text: - # Replace first item with remaining text - modified_content = [ - TextContent(type="text", text=remaining_text) - ] + modified_content - - # Render the content - content_html = render_message_content(modified_content, "user") - - # Prepend IDE notifications - if ide_notifications_html: - content_html = "".join(ide_notifications_html) + content_html - else: - # No text in first item or empty list, render normally - content_html = render_message_content(content_list, "user") - - return content_html, False, False - - -def render_message_content(content: List[ContentItem], message_type: str) -> str: - """Render message content with proper tool use and tool result formatting. - - Note: This does NOT handle user-specific preprocessing like IDE tags or - compacted session summaries. Those should be handled by render_user_message_content. - """ - if len(content) == 1 and isinstance(content[0], TextContent): - if message_type == "user": - # User messages are shown as-is in preformatted blocks - escaped_text = escape_html(content[0].text) - return "
" + escaped_text + "
" - else: - # Assistant messages get markdown rendering with collapsible for long content - return render_markdown_collapsible( - content[0].text, - "assistant-text", - line_threshold=30, - preview_line_count=10, - ) - - # content is a list of ContentItem objects - rendered_parts: List[str] = [] - - for item in content: - # Handle both custom and Anthropic types - item_type = getattr(item, "type", None) - - if type(item) is TextContent or ( - hasattr(item, "type") and hasattr(item, "text") and item_type == "text" - ): - # Handle both TextContent and Anthropic TextBlock - text_value = getattr(item, "text", str(item)) - if message_type == "user": - # User messages are shown as-is in preformatted blocks - escaped_text = escape_html(text_value) - rendered_parts.append("
" + escaped_text + "
") - else: - # Assistant messages get markdown rendering with collapsible for long content - rendered_parts.append( - render_markdown_collapsible( - text_value, - "assistant-text", - line_threshold=30, - preview_line_count=10, - ) - ) - elif type(item) is ToolUseContent or ( - hasattr(item, "type") and item_type == "tool_use" - ): - # Tool use items should not appear here - they are filtered out before this function - print( - "Warning: tool_use content should not be processed in render_message_content", - flush=True, - ) - elif type(item) is ToolResultContent or ( - hasattr(item, "type") and item_type == "tool_result" - ): - # Tool result items should not appear here - they are filtered out before this function - print( - "Warning: tool_result content should not be processed in render_message_content", - flush=True, - ) - elif type(item) is ThinkingContent or ( - hasattr(item, "type") and item_type == "thinking" - ): - # Thinking items should not appear here - they are filtered out before this function - print( - "Warning: thinking content should not be processed in render_message_content", - flush=True, - ) - elif type(item) is ImageContent: - rendered_parts.append(format_image_content(item)) # type: ignore - return "\n".join(rendered_parts) +from .html import ( + escape_html, + format_tool_use_title, + parse_bash_input, + parse_bash_output, + parse_command_output, + parse_slash_command, +) +from .parser import parse_user_message_content -def _get_template_environment() -> Environment: - """Get Jinja2 template environment.""" - templates_dir = Path(__file__).parent / "templates" - env = Environment( - loader=FileSystemLoader(templates_dir), - autoescape=select_autoescape(["html", "xml"]), - ) - # Add custom filters/functions - env.globals["starts_with_emoji"] = starts_with_emoji # type: ignore[index] - return env +# -- Content Formatters ------------------------------------------------------- +# NOTE: Content formatters have been moved to html/ submodules: +# - format_thinking_content -> html/assistant_formatters.py +# - format_assistant_text_content -> html/assistant_formatters.py +# - format_tool_result_content -> html/tool_formatters.py +# - format_tool_use_content -> html/tool_formatters.py +# - format_image_content -> html/assistant_formatters.py +# - format_user_text_model_content -> html/user_formatters.py +# - parse_user_message_content -> parser.py def _format_type_counts(type_counts: dict[str, int]) -> str: @@ -1802,15 +161,16 @@ def _format_type_counts(type_counts: dict[str, int]) -> str: return f"{parts[0]}, {parts[1]}, {remaining} more" +# -- Template Classes --------------------------------------------------------- + + class TemplateMessage: """Structured message data for template rendering.""" def __init__( self, message_type: str, - content_html: str, formatted_timestamp: str, - css_class: str, raw_timestamp: Optional[str] = None, session_summary: Optional[str] = None, session_id: Optional[str] = None, @@ -1826,11 +186,14 @@ def __init__( uuid: Optional[str] = None, parent_uuid: Optional[str] = None, agent_id: Optional[str] = None, + modifiers: Optional[MessageModifiers] = None, + content: Optional["MessageContent"] = None, ): self.type = message_type - self.content_html = content_html + # Structured content for rendering + self.content = content self.formatted_timestamp = formatted_timestamp - self.css_class = css_class + self.modifiers = modifiers if modifiers is not None else MessageModifiers() self.raw_timestamp = raw_timestamp # Display title for message header (capitalized, with decorations) self.message_title = ( @@ -1864,6 +227,8 @@ def __init__( self.is_paired = False self.pair_role: Optional[str] = None # "pair_first", "pair_last", "pair_middle" self.pair_duration: Optional[str] = None # Duration for pair_last messages + # Children for tree-based rendering (future use) + self.children: List["TemplateMessage"] = [] def get_immediate_children_label(self) -> str: """Generate human-readable label for immediate children.""" @@ -1873,6 +238,30 @@ def get_total_descendants_label(self) -> str: """Generate human-readable label for all descendants.""" return _format_type_counts(self.total_descendants_by_type) + def flatten(self) -> List["TemplateMessage"]: + """Recursively flatten this message and all children into a list. + + Returns a list with this message followed by all descendants in + depth-first order. This provides backward compatibility with the + flat-list template rendering approach. + """ + result: List["TemplateMessage"] = [self] + for child in self.children: + result.extend(child.flatten()) + return result + + @staticmethod + def flatten_all(messages: List["TemplateMessage"]) -> List["TemplateMessage"]: + """Flatten a list of root messages into a single flat list. + + Useful for converting a tree structure back to a flat list for + templates that expect the traditional flat message list. + """ + result: List["TemplateMessage"] = [] + for message in messages: + result.extend(message.flatten()) + return result + class TemplateProject: """Structured project data for template rendering.""" @@ -2025,289 +414,216 @@ def __init__(self, project_summaries: List[Dict[str, Any]]): self.token_summary = " | ".join(token_parts) -def _render_hook_summary(message: "SystemTranscriptEntry") -> str: - """Render a hook summary as collapsible details. +# -- Template Generation ------------------------------------------------------ - Shows a compact summary with expandable hook commands and error output. + +def generate_template_messages( + messages: List[TranscriptEntry], +) -> Tuple[List[TemplateMessage], List[Dict[str, Any]]]: + """Generate template messages and session navigation from transcript messages. + + This is the format-neutral rendering step that produces data structures + ready for template rendering by any format-specific renderer. + + Args: + messages: List of transcript entries to process. + + Returns: + A tuple of (template_messages, session_nav) where: + - template_messages: Processed messages ready for template rendering + - session_nav: Session navigation data with summaries and metadata """ - # Extract command names from hookInfos - commands = [info.get("command", "unknown") for info in (message.hookInfos or [])] - - # Determine if this is a failure or just output - has_errors = bool(message.hookErrors) - summary_icon = "🪝" - summary_text = "Hook failed" if has_errors else "Hook output" - - # Build the command section - command_html = "" - if commands: - command_html = '
' - for cmd in commands: - # Truncate very long commands - display_cmd = cmd if len(cmd) <= 100 else cmd[:97] + "..." - command_html += f"{html.escape(display_cmd)}" - command_html += "
" - - # Build the error output section - error_html = "" - if message.hookErrors: - error_html = '
' - for err in message.hookErrors: - # Convert ANSI codes in error output - formatted_err = _convert_ansi_to_html(err) - error_html += f'
{formatted_err}
' - error_html += "
" - - return f"""
-{summary_icon} {summary_text} -
-{command_html} -{error_html} -
-
""" - - -def _convert_ansi_to_html(text: str) -> str: - """Convert ANSI escape codes to HTML spans with CSS classes. - - Supports: - - Colors (30-37, 90-97 for foreground; 40-47, 100-107 for background) - - RGB colors (38;2;r;g;b for foreground; 48;2;r;g;b for background) - - Bold (1), Dim (2), Italic (3), Underline (4) - - Reset (0, 39, 49, 22, 23, 24) - - Strips cursor movement and screen manipulation codes + from .utils import get_warmup_session_ids + + # Performance timing + t_start = time.time() + + # Filter out warmup-only sessions + with log_timing("Filter warmup sessions", t_start): + warmup_session_ids = get_warmup_session_ids(messages) + if warmup_session_ids: + messages = [ + msg + for msg in messages + if getattr(msg, "sessionId", None) not in warmup_session_ids + ] + + # Pre-process to find and attach session summaries + with log_timing("Session summary processing", t_start): + prepare_session_summaries(messages) + + # Filter messages (removes summaries, warmup, empty, etc.) + with log_timing("Filter messages", t_start): + filtered_messages = _filter_messages(messages) + + # Pass 1: Collect session metadata and token tracking + with log_timing("Collect session info", t_start): + sessions, session_order, show_tokens_for_message = _collect_session_info( + filtered_messages + ) + + # Pass 2: Render messages to TemplateMessage objects + with log_timing( + lambda: f"Render messages ({len(template_messages)} messages)", t_start + ): + template_messages = _render_messages( + filtered_messages, sessions, show_tokens_for_message + ) + + # Prepare session navigation data + with log_timing( + lambda: f"Session navigation building ({len(session_nav)} sessions)", t_start + ): + session_nav = prepare_session_navigation(sessions, session_order) + + # Reorder messages so each session's messages follow their session header + # This fixes interleaving that occurs when sessions are resumed + with log_timing("Reorder session messages", t_start): + template_messages = _reorder_session_template_messages(template_messages) + + # Identify and mark paired messages (command+output, tool_use+tool_result, etc.) + with log_timing("Identify message pairs", t_start): + _identify_message_pairs(template_messages) + + # Reorder messages so pairs are adjacent while preserving chronological order + with log_timing("Reorder paired messages", t_start): + template_messages = _reorder_paired_messages(template_messages) + + # Reorder sidechains to appear after their Task results + # This must happen AFTER pair reordering, since that moves tool_results + with log_timing("Reorder sidechain messages", t_start): + template_messages = _reorder_sidechain_template_messages(template_messages) + + # Build hierarchy (message_id and ancestry) based on final order + # This must happen AFTER all reordering to get correct parent-child relationships + with log_timing("Build message hierarchy", t_start): + _build_message_hierarchy(template_messages) + + # Mark messages that have children for fold/unfold controls + with log_timing("Mark messages with children", t_start): + _mark_messages_with_children(template_messages) + + # Build tree structure by populating children fields + # Returns root messages (typically session headers) with children populated + # HtmlRenderer flattens this via pre-order traversal for template rendering + with log_timing("Build message tree", t_start): + root_messages = _build_message_tree(template_messages) + + return root_messages, session_nav + + +# -- Session Utilities -------------------------------------------------------- + + +def prepare_session_summaries(messages: List[TranscriptEntry]) -> None: + """Pre-process messages to find and attach session summaries. + + Modifies messages in place by attaching _session_summary attribute. """ - import re - - # First, strip cursor movement and screen manipulation codes - # Common patterns: [1A (cursor up), [2K (erase line), [?25l (hide cursor), etc. - cursor_patterns = [ - r"\x1b\[[0-9]*[ABCD]", # Cursor movement (up, down, forward, back) - r"\x1b\[[0-9]*[EF]", # Cursor next/previous line - r"\x1b\[[0-9]*[GH]", # Cursor horizontal/home position - r"\x1b\[[0-9;]*[Hf]", # Cursor position - r"\x1b\[[0-9]*[JK]", # Erase display/line - r"\x1b\[[0-9]*[ST]", # Scroll up/down - r"\x1b\[\?[0-9]*[hl]", # Private mode set/reset (show/hide cursor, etc.) - r"\x1b\[[0-9]*[PXYZ@]", # Insert/delete operations - r"\x1b\[=[0-9]*[A-Za-z]", # Alternate character set - r"\x1b\][0-9];[^\x07]*\x07", # Operating System Command (OSC) - r"\x1b\][0-9];[^\x1b]*\x1b\\", # OSC with string terminator - ] - - # Strip all cursor movement and screen manipulation codes - for pattern in cursor_patterns: - text = re.sub(pattern, "", text) - - # Also strip any remaining unhandled escape sequences that aren't color codes - # This catches any we might have missed, but preserves \x1b[...m color codes - text = re.sub(r"\x1b\[(?![0-9;]*m)[0-9;]*[A-Za-z]", "", text) - - result: List[str] = [] - segments: List[Dict[str, Any]] = [] - - # First pass: split text into segments with their styles - last_end = 0 - current_fg = None - current_bg = None - current_bold = False - current_dim = False - current_italic = False - current_underline = False - current_rgb_fg = None - current_rgb_bg = None - - for match in re.finditer(r"\x1b\[([0-9;]+)m", text): - # Add text before this escape code - if match.start() > last_end: - segments.append( - { - "text": text[last_end : match.start()], - "fg": current_fg, - "bg": current_bg, - "bold": current_bold, - "dim": current_dim, - "italic": current_italic, - "underline": current_underline, - "rgb_fg": current_rgb_fg, - "rgb_bg": current_rgb_bg, - } - ) + session_summaries: Dict[str, str] = {} + uuid_to_session: Dict[str, str] = {} + uuid_to_session_backup: Dict[str, str] = {} - # Process escape codes - codes = match.group(1).split(";") - i = 0 - while i < len(codes): - code = codes[i] - - # Reset codes - if code == "0": - current_fg = None - current_bg = None - current_bold = False - current_dim = False - current_italic = False - current_underline = False - current_rgb_fg = None - current_rgb_bg = None - elif code == "39": - current_fg = None - current_rgb_fg = None - elif code == "49": - current_bg = None - current_rgb_bg = None - elif code == "22": - current_bold = False - current_dim = False - elif code == "23": - current_italic = False - elif code == "24": - current_underline = False - - # Style codes - elif code == "1": - current_bold = True - elif code == "2": - current_dim = True - elif code == "3": - current_italic = True - elif code == "4": - current_underline = True - - # Standard foreground colors - elif code in ["30", "31", "32", "33", "34", "35", "36", "37"]: - color_map = { - "30": "black", - "31": "red", - "32": "green", - "33": "yellow", - "34": "blue", - "35": "magenta", - "36": "cyan", - "37": "white", - } - current_fg = f"ansi-{color_map[code]}" - current_rgb_fg = None - - # Standard background colors - elif code in ["40", "41", "42", "43", "44", "45", "46", "47"]: - color_map = { - "40": "black", - "41": "red", - "42": "green", - "43": "yellow", - "44": "blue", - "45": "magenta", - "46": "cyan", - "47": "white", - } - current_bg = f"ansi-bg-{color_map[code]}" - current_rgb_bg = None - - # Bright foreground colors - elif code in ["90", "91", "92", "93", "94", "95", "96", "97"]: - color_map = { - "90": "bright-black", - "91": "bright-red", - "92": "bright-green", - "93": "bright-yellow", - "94": "bright-blue", - "95": "bright-magenta", - "96": "bright-cyan", - "97": "bright-white", - } - current_fg = f"ansi-{color_map[code]}" - current_rgb_fg = None - - # Bright background colors - elif code in ["100", "101", "102", "103", "104", "105", "106", "107"]: - color_map = { - "100": "bright-black", - "101": "bright-red", - "102": "bright-green", - "103": "bright-yellow", - "104": "bright-blue", - "105": "bright-magenta", - "106": "bright-cyan", - "107": "bright-white", - } - current_bg = f"ansi-bg-{color_map[code]}" - current_rgb_bg = None - - # RGB foreground color - elif code == "38" and i + 1 < len(codes) and codes[i + 1] == "2": - if i + 4 < len(codes): - r, g, b = codes[i + 2], codes[i + 3], codes[i + 4] - current_rgb_fg = f"color: rgb({r}, {g}, {b})" - current_fg = None - i += 4 - - # RGB background color - elif code == "48" and i + 1 < len(codes) and codes[i + 1] == "2": - if i + 4 < len(codes): - r, g, b = codes[i + 2], codes[i + 3], codes[i + 4] - current_rgb_bg = f"background-color: rgb({r}, {g}, {b})" - current_bg = None - i += 4 + # Build mapping from message UUID to session ID + for message in messages: + if hasattr(message, "uuid") and hasattr(message, "sessionId"): + message_uuid = getattr(message, "uuid", "") + session_id = getattr(message, "sessionId", "") + if message_uuid and session_id: + # There is often duplication, in that case we want to prioritise the assistant + # message because summaries are generated from Claude's (last) success message + if type(message) is AssistantTranscriptEntry: + uuid_to_session[message_uuid] = session_id + else: + uuid_to_session_backup[message_uuid] = session_id - i += 1 + # Map summaries to sessions via leafUuid -> message UUID -> session ID + for message in messages: + if isinstance(message, SummaryTranscriptEntry): + leaf_uuid = message.leafUuid + if leaf_uuid in uuid_to_session: + session_summaries[uuid_to_session[leaf_uuid]] = message.summary + elif ( + leaf_uuid in uuid_to_session_backup + and uuid_to_session_backup[leaf_uuid] not in session_summaries + ): + session_summaries[uuid_to_session_backup[leaf_uuid]] = message.summary + + # Attach summaries to messages + for message in messages: + if hasattr(message, "sessionId"): + session_id = getattr(message, "sessionId", "") + if session_id in session_summaries: + setattr(message, "_session_summary", session_summaries[session_id]) + + +def prepare_session_navigation( + sessions: Dict[str, Dict[str, Any]], + session_order: List[str], +) -> List[Dict[str, Any]]: + """Prepare session navigation data for template rendering. + + Args: + sessions: Dictionary mapping session_id to session info dict + session_order: List of session IDs in display order + + Returns: + List of session navigation dicts for template rendering + """ + session_nav: List[Dict[str, Any]] = [] + + for session_id in session_order: + session_info = sessions[session_id] + + # Skip empty sessions (agent-only, no user messages) + if not session_info["first_user_message"]: + continue - last_end = match.end() + # Format timestamp range + first_ts = session_info["first_timestamp"] + last_ts = session_info["last_timestamp"] + timestamp_range = format_timestamp_range(first_ts, last_ts) - # Add remaining text - if last_end < len(text): - segments.append( + # Format token usage summary + token_summary = "" + total_input = session_info["total_input_tokens"] + total_output = session_info["total_output_tokens"] + total_cache_creation = session_info["total_cache_creation_tokens"] + total_cache_read = session_info["total_cache_read_tokens"] + + if total_input > 0 or total_output > 0: + token_parts: List[str] = [] + if total_input > 0: + token_parts.append(f"Input: {total_input}") + if total_output > 0: + token_parts.append(f"Output: {total_output}") + if total_cache_creation > 0: + token_parts.append(f"Cache Creation: {total_cache_creation}") + if total_cache_read > 0: + token_parts.append(f"Cache Read: {total_cache_read}") + token_summary = "Token usage – " + " | ".join(token_parts) + + session_nav.append( { - "text": text[last_end:], - "fg": current_fg, - "bg": current_bg, - "bold": current_bold, - "dim": current_dim, - "italic": current_italic, - "underline": current_underline, - "rgb_fg": current_rgb_fg, - "rgb_bg": current_rgb_bg, + "id": session_id, + "summary": session_info["summary"], + "timestamp_range": timestamp_range, + "first_timestamp": first_ts, + "last_timestamp": last_ts, + "message_count": session_info["message_count"], + "first_user_message": session_info["first_user_message"] + if session_info["first_user_message"] != "" + else "[No user message found in session.]", + "token_summary": token_summary, } ) - # Second pass: build HTML - for segment in segments: - if not segment["text"]: - continue + return session_nav - classes: List[str] = [] - styles: List[str] = [] - - if segment["fg"]: - classes.append(segment["fg"]) - if segment["bg"]: - classes.append(segment["bg"]) - if segment["bold"]: - classes.append("ansi-bold") - if segment["dim"]: - classes.append("ansi-dim") - if segment["italic"]: - classes.append("ansi-italic") - if segment["underline"]: - classes.append("ansi-underline") - if segment["rgb_fg"]: - styles.append(segment["rgb_fg"]) - if segment["rgb_bg"]: - styles.append(segment["rgb_bg"]) - - escaped_text = escape_html(segment["text"]) - - if classes or styles: - attrs: List[str] = [] - if classes: - attrs.append(f'class="{" ".join(classes)}"') - if styles: - attrs.append(f'style="{"; ".join(styles)}"') - result.append(f"{escaped_text}") - else: - result.append(escaped_text) - return "".join(result) +# -- Message Processing Functions --------------------------------------------- +# Note: HTML formatting logic has been moved to html/content_formatters.py +# as part of the refactoring to support format-neutral content models. # def _process_summary_message(message: SummaryTranscriptEntry) -> tuple[str, str, str]: @@ -2318,192 +634,72 @@ def _convert_ansi_to_html(text: str) -> str: # return css_class, content_html, message_type -def _process_command_message(text_content: str) -> tuple[str, str, str, str]: - """Process a command message and return (css_class, content_html, message_type, message_title).""" - css_class = "system" - command_name, command_args, command_contents = extract_command_info(text_content) - escaped_command_name = escape_html(command_name) - escaped_command_args = escape_html(command_args) - - # Format the command contents with proper line breaks - formatted_contents = command_contents.replace("\\n", "\n") - escaped_command_contents = escape_html(formatted_contents) - - # Build the content HTML - content_parts: List[str] = [f"Command: {escaped_command_name}"] - if command_args: - content_parts.append(f"Args: {escaped_command_args}") - if command_contents: - lines = escaped_command_contents.splitlines() - line_count = len(lines) - if line_count <= 12: - # Short content, show inline - details_html = ( - f"Content:
{escaped_command_contents}
" - ) - else: - # Long content, make collapsible - preview = "\n".join(lines[:5]) - collapsible = render_collapsible_code( - f"
{preview}
", - f"
{escaped_command_contents}
", - line_count, - ) - details_html = f"Content:{collapsible}" - content_parts.append(details_html) - - content_html = "
".join(content_parts) - message_type = "system" - message_title = "System" - return css_class, content_html, message_type, message_title - +def _process_command_message( + text_content: str, +) -> tuple[MessageModifiers, Optional["MessageContent"], str, str]: + """Process a slash command message and return (modifiers, content, message_type, message_title). -def _process_local_command_output(text_content: str) -> tuple[str, str, str, str]: - """Process local command output and return (css_class, content_html, message_type, message_title).""" - import re + These are user messages containing slash command invocations (e.g., /context, /model). + The JSONL type is "user", not "system". + """ + modifiers = MessageModifiers(is_slash_command=True) - css_class = "system command-output" + # Parse to content model (formatting happens in HtmlRenderer) + content = parse_slash_command(text_content) + # If parsing fails, content will be None and caller will handle fallback - stdout_match = re.search( - r"(.*?)", - text_content, - re.DOTALL, - ) - if stdout_match: - stdout_content = stdout_match.group(1).strip() + message_type = "user" + message_title = "Slash Command" + return modifiers, content, message_type, message_title - # Check if content looks like markdown (starts with markdown headers) - is_markdown = bool(re.match(r"^#+\s+", stdout_content, re.MULTILINE)) - if is_markdown: - # Render as markdown - import mistune +def _process_local_command_output( + text_content: str, +) -> tuple[MessageModifiers, Optional["MessageContent"], str, str]: + """Process slash command output and return (modifiers, content, message_type, message_title). - markdown_html = mistune.html(stdout_content) - content_html = ( - f"Command Output:
" - f"
{markdown_html}
" - ) - else: - # Convert ANSI codes to HTML for colored display - html_content = _convert_ansi_to_html(stdout_content) - # Use
 to preserve formatting and line breaks
-            content_html = (
-                f"Command Output:
" - f"
{html_content}
" - ) - else: - content_html = escape_html(text_content) + These are user messages containing the output from slash commands (e.g., /context, /model). + The JSONL type is "user", not "system". + """ + modifiers = MessageModifiers(is_command_output=True) - message_type = "system" - message_title = "System" - return css_class, content_html, message_type, message_title + # Parse to content model (formatting happens in HtmlRenderer) + content = parse_command_output(text_content) + # If parsing fails, content will be None and caller will handle fallback + message_type = "user" + message_title = "Command Output" + return modifiers, content, message_type, message_title -def _process_bash_input(text_content: str) -> tuple[str, str, str, str]: - """Process bash input command and return (css_class, content_html, message_type, message_title).""" - import re - css_class = "bash-input" +def _process_bash_input( + text_content: str, +) -> tuple[MessageModifiers, Optional["MessageContent"], str, str]: + """Process bash input command and return (modifiers, content, message_type, message_title).""" + modifiers = MessageModifiers() # bash-input is a message type, not a modifier - bash_match = re.search( - r"(.*?)", - text_content, - re.DOTALL, - ) - if bash_match: - bash_command = bash_match.group(1).strip() - escaped_command = escape_html(bash_command) - content_html = ( - f" " - f"{escaped_command}" - ) - else: - content_html = escape_html(text_content) + # Parse to content model (formatting happens in HtmlRenderer) + content = parse_bash_input(text_content) + # If parsing fails, content will be None and caller will handle fallback - message_type = "bash" + message_type = "bash-input" message_title = "Bash" - return css_class, content_html, message_type, message_title - - -def _process_bash_output(text_content: str) -> tuple[str, str, str, str]: - """Process bash output and return (css_class, content_html, message_type, message_title).""" - import re - - css_class = "bash-output" - COLLAPSE_THRESHOLD = 10 # Collapse if more than this many lines - - stdout_match = re.search( - r"(.*?)", - text_content, - re.DOTALL, - ) - stderr_match = re.search( - r"(.*?)", - text_content, - re.DOTALL, - ) + return modifiers, content, message_type, message_title - output_parts: List[tuple[str, str, int, str]] = [] - total_lines = 0 - - if stdout_match: - stdout_content = stdout_match.group(1).strip() - if stdout_content: - escaped_stdout = _convert_ansi_to_html(stdout_content) - stdout_lines = stdout_content.count("\n") + 1 - total_lines += stdout_lines - output_parts.append( - ("stdout", escaped_stdout, stdout_lines, stdout_content) - ) - if stderr_match: - stderr_content = stderr_match.group(1).strip() - if stderr_content: - escaped_stderr = _convert_ansi_to_html(stderr_content) - stderr_lines = stderr_content.count("\n") + 1 - total_lines += stderr_lines - output_parts.append( - ("stderr", escaped_stderr, stderr_lines, stderr_content) - ) +def _process_bash_output( + text_content: str, +) -> tuple[MessageModifiers, Optional["MessageContent"], str, str]: + """Process bash output and return (modifiers, content, message_type, message_title).""" + modifiers = MessageModifiers() # bash-output is a message type, not a modifier - if output_parts: - # Build the HTML parts - html_parts: List[str] = [] - for output_type, escaped_content, _, _ in output_parts: - css_name = f"bash-{output_type}" - html_parts.append(f"
{escaped_content}
") - - full_html = "".join(html_parts) - - # Wrap in collapsible if output is large - if total_lines > COLLAPSE_THRESHOLD: - # Create preview (first few lines) - preview_lines = 3 - first_output = output_parts[0] - raw_preview = "\n".join(first_output[3].split("\n")[:preview_lines]) - preview_html = html.escape(raw_preview) - if total_lines > preview_lines: - preview_html += "\n..." - - content_html = f"""
- - {total_lines} lines -
{preview_html}
-
-
{full_html}
-
""" - else: - content_html = full_html - else: - # Empty output - content_html = ( - "
(no output)
" - ) + # Parse to content model (formatting happens in HtmlRenderer) + content = parse_bash_output(text_content) + # If parsing fails, content will be None - caller/renderer handles empty output message_type = "bash" message_title = "Bash" - return css_class, content_html, message_type, message_title + return modifiers, content, message_type, message_title def _process_regular_message( @@ -2511,8 +707,11 @@ def _process_regular_message( message_type: str, is_sidechain: bool, is_meta: bool = False, -) -> tuple[str, str, str, str]: - """Process regular message and return (css_class, content_html, message_type, message_title). +) -> tuple[MessageModifiers, Optional["MessageContent"], str, str]: + """Process regular message and return (modifiers, content_model, message_type, message_title). + + Returns content_model for user messages, None for non-user messages. + Non-user messages (assistant) are handled by the legacy render_message_content path. Note: Sidechain user messages (Sub-assistant prompts) are now skipped entirely in the main processing loop since they duplicate the Task tool input prompt. @@ -2520,17 +719,18 @@ def _process_regular_message( Args: is_meta: True for slash command expanded prompts (isMeta=True in JSONL) """ - css_class = f"{message_type}" message_title = message_type.title() # Default title is_compacted = False + is_slash_command = False + content_model: Optional["MessageContent"] = None # Handle user-specific preprocessing - if message_type == "user": + if message_type == MessageType.USER: # Note: sidechain user messages are skipped before reaching this function if is_meta: - # Slash command expanded prompts - render as collapsible markdown + # Slash command expanded prompts # These contain LLM-generated instruction text (markdown formatted) - css_class = f"{message_type} slash-command" + is_slash_command = True message_title = "User (slash command)" # Combine all text content (items may be TextContent, dicts, or SDK objects) all_text = "\n\n".join( @@ -2538,43 +738,400 @@ def _process_regular_message( for item in text_only_content if hasattr(item, "text") ) - content_html = render_markdown_collapsible( - all_text, - "slash-command-content", - line_threshold=20, - preview_line_count=5, - ) + # Use UserTextContent with is_slash_command flag for HtmlRenderer to format + content_model = UserTextContent(text=all_text) else: - content_html, is_compacted, is_memory_input = render_user_message_content( - text_only_content - ) - if is_compacted: - css_class = f"{message_type} compacted" + content_model = parse_user_message_content(text_only_content) + # Determine message_title and modifiers from content type + if isinstance(content_model, CompactedSummaryContent): + is_compacted = True message_title = "User (compacted conversation)" - elif is_memory_input: + elif isinstance(content_model, UserMemoryContent): message_title = "Memory" + elif message_type == MessageType.ASSISTANT: + # Create AssistantTextContent for assistant messages + all_text = "\n\n".join( + getattr(item, "text", "") + for item in text_only_content + if hasattr(item, "text") + ) + if all_text: + content_model = AssistantTextContent(text=all_text) + + if is_sidechain: + # Update message title for display (only non-user types reach here) + if not is_compacted: + message_title = "🔗 Sub-assistant" + + modifiers = MessageModifiers( + is_sidechain=is_sidechain, + is_slash_command=is_slash_command, + is_compacted=is_compacted, + ) + + return modifiers, content_model, message_type, message_title + + +def _process_system_message( + message: SystemTranscriptEntry, +) -> Optional[TemplateMessage]: + """Process a system message and return a TemplateMessage, or None if it should be skipped. + + Handles: + - Hook summaries (subtype="stop_hook_summary") + - Other system messages with level-specific styling (info, warning, error) + + Note: Slash command messages (, ) are user messages, + not system messages. They are handled by _process_command_message and + _process_local_command_output in the main processing loop. + """ + from .models import MessageContent # Local import to avoid circular dependency + + session_id = getattr(message, "sessionId", "unknown") + timestamp = getattr(message, "timestamp", "") + formatted_timestamp = format_timestamp(timestamp) if timestamp else "" + + # Build structured content based on message subtype + content: MessageContent + if message.subtype == "stop_hook_summary": + # Skip silent hook successes (no output, no errors) + if not message.hasOutput and not message.hookErrors: + return None + # Create structured hook summary content + hook_infos = [ + HookInfo(command=info.get("command", "unknown")) + for info in (message.hookInfos or []) + ] + content = HookSummaryContent( + has_output=bool(message.hasOutput), + hook_errors=message.hookErrors or [], + hook_infos=hook_infos, + ) + level = "hook" + elif not message.content: + # Skip system messages without content (shouldn't happen normally) + return None + else: + # Create structured system content + level = getattr(message, "level", "info") + content = SystemContent(level=level, text=message.content) + + # Store parent UUID for hierarchy rebuild (handled by _build_message_hierarchy) + parent_uuid = getattr(message, "parentUuid", None) + + return TemplateMessage( + message_type="system", + formatted_timestamp=formatted_timestamp, + raw_timestamp=timestamp, + session_id=session_id, + message_title=f"System {level.title()}", + message_id=None, # Will be assigned by _build_message_hierarchy + ancestry=[], # Will be assigned by _build_message_hierarchy + uuid=message.uuid, + parent_uuid=parent_uuid, + modifiers=MessageModifiers(system_level=level), + content=content, + ) + + +@dataclass +class ToolItemResult: + """Result of processing a single tool/thinking/image item.""" + + message_type: str + message_title: str + content: Optional["MessageContent"] = None # Structured content for rendering + tool_use_id: Optional[str] = None + title_hint: Optional[str] = None + pending_dedup: Optional[str] = None # For Task result deduplication + is_error: bool = False # For tool_result error state + + +def _process_tool_use_item( + tool_item: ContentItem, + tool_use_context: Dict[str, ToolUseContent], +) -> Optional[ToolItemResult]: + """Process a tool_use content item. + + Args: + tool_item: The tool use content item + tool_use_context: Dict to populate with tool_use_id -> ToolUseContent mapping + + Returns: + ToolItemResult with tool_use content model, or None if item should be skipped + """ + # Convert Anthropic type to our format if necessary + if not isinstance(tool_item, ToolUseContent): + tool_use = ToolUseContent( + type="tool_use", + id=getattr(tool_item, "id", ""), + name=getattr(tool_item, "name", ""), + input=getattr(tool_item, "input", {}), + ) + else: + tool_use = tool_item + + # Title is computed here but content formatting happens in HtmlRenderer + tool_message_title = format_tool_use_title(tool_use) + escaped_id = escape_html(tool_use.id) + item_tool_use_id = tool_use.id + tool_title_hint = f"ID: {escaped_id}" + + # Populate tool_use_context for later use when processing tool results + tool_use_context[item_tool_use_id] = tool_use + + return ToolItemResult( + message_type="tool_use", + message_title=tool_message_title, + content=tool_use, # ToolUseContent is the model + tool_use_id=item_tool_use_id, + title_hint=tool_title_hint, + ) + + +def _process_tool_result_item( + tool_item: ContentItem, + tool_use_context: Dict[str, ToolUseContent], +) -> Optional[ToolItemResult]: + """Process a tool_result content item. + + Args: + tool_item: The tool result content item + tool_use_context: Dict with tool_use_id -> ToolUseContent mapping + + Returns: + ToolItemResult with tool_result content model, or None if item should be skipped + """ + # Convert Anthropic type to our format if necessary + if not isinstance(tool_item, ToolResultContent): + tool_result = ToolResultContent( + type="tool_result", + tool_use_id=getattr(tool_item, "tool_use_id", ""), + content=getattr(tool_item, "content", ""), + is_error=getattr(tool_item, "is_error", False), + ) else: - # Non-user messages: render directly - content_html = render_message_content(text_only_content, message_type) + tool_result = tool_item + + # Get file_path and tool_name from tool_use context for specialized rendering + result_file_path: Optional[str] = None + result_tool_name: Optional[str] = None + if tool_result.tool_use_id in tool_use_context: + tool_use_from_ctx = tool_use_context[tool_result.tool_use_id] + result_tool_name = tool_use_from_ctx.name + if ( + result_tool_name in ("Read", "Edit", "Write") + and "file_path" in tool_use_from_ctx.input + ): + result_file_path = tool_use_from_ctx.input["file_path"] + + # Create content model with rendering context + content_model = ToolResultContentModel( + tool_use_id=tool_result.tool_use_id, + content=tool_result.content, + is_error=tool_result.is_error or False, + tool_name=result_tool_name, + file_path=result_file_path, + ) + + # Retroactive deduplication: if Task result, extract content for later matching + pending_dedup: Optional[str] = None + if result_tool_name == "Task": + # Extract text content from tool result + # Note: tool_result.content can be str or List[Dict[str, Any]] + if isinstance(tool_result.content, str): + task_result_content = tool_result.content.strip() + else: + # Handle list of dicts (tool result format) + content_parts: list[str] = [] + for item in tool_result.content: + text_val = item.get("text", "") + if isinstance(text_val, str): + content_parts.append(text_val) + task_result_content = "\n".join(content_parts).strip() + pending_dedup = task_result_content if task_result_content else None + + escaped_id = escape_html(tool_result.tool_use_id) + tool_title_hint = f"ID: {escaped_id}" + tool_message_title = "Error" if tool_result.is_error else "" + + return ToolItemResult( + message_type="tool_result", + message_title=tool_message_title, + content=content_model, + tool_use_id=tool_result.tool_use_id, + title_hint=tool_title_hint, + pending_dedup=pending_dedup, + is_error=tool_result.is_error or False, + ) + + +def _process_thinking_item(tool_item: ContentItem) -> Optional[ToolItemResult]: + """Process a thinking content item. + + Returns: + ToolItemResult with thinking content model + """ + # Extract thinking text from the content item + if isinstance(tool_item, ThinkingContent): + thinking_text = tool_item.thinking.strip() + signature = getattr(tool_item, "signature", None) + else: + thinking_text = getattr(tool_item, "thinking", str(tool_item)).strip() + signature = None + + # Create the content model (formatting happens in HtmlRenderer) + thinking_model = ThinkingContentModel(thinking=thinking_text, signature=signature) + + return ToolItemResult( + message_type="thinking", + message_title="Thinking", + content=thinking_model, + ) + + +def _process_image_item(tool_item: ContentItem) -> Optional[ToolItemResult]: + """Process an image content item. + + Returns: + ToolItemResult with image content model, or None if item should be skipped + """ + # Convert Anthropic type to our format if necessary + if not isinstance(tool_item, ImageContent): + # For now, skip Anthropic image types - we'll handle when we encounter them + return None + + return ToolItemResult( + message_type="image", + message_title="Image", + content=tool_item, # ImageContent is already the model + ) + + +# -- Message Pairing ---------------------------------------------------------- + + +@dataclass +class PairingIndices: + """Indices for efficient message pairing lookups. + + All indices are built in a single pass for efficiency. + """ + + # (session_id, tool_use_id) -> message index for tool_use messages + tool_use: Dict[tuple[str, str], int] + # (session_id, tool_use_id) -> message index for tool_result messages + tool_result: Dict[tuple[str, str], int] + # uuid -> message index for system messages (parent-child pairing) + uuid: Dict[str, int] + # parent_uuid -> message index for slash-command messages + slash_command_by_parent: Dict[str, int] + + +def _build_pairing_indices(messages: List[TemplateMessage]) -> PairingIndices: + """Build indices for efficient message pairing lookups. + + Single pass through messages to build all indices needed for pairing. + """ + tool_use_index: Dict[tuple[str, str], int] = {} + tool_result_index: Dict[tuple[str, str], int] = {} + uuid_index: Dict[str, int] = {} + slash_command_by_parent: Dict[str, int] = {} + + for i, msg in enumerate(messages): + # Index tool_use and tool_result by (session_id, tool_use_id) + if msg.tool_use_id and msg.session_id: + key = (msg.session_id, msg.tool_use_id) + if msg.type == "tool_use": + tool_use_index[key] = i + elif msg.type == "tool_result": + tool_result_index[key] = i + + # Index system messages by UUID for parent-child pairing + if msg.uuid and msg.type == "system": + uuid_index[msg.uuid] = i + + # Index slash-command user messages by parent_uuid + if msg.parent_uuid and msg.modifiers.is_slash_command: + slash_command_by_parent[msg.parent_uuid] = i + + return PairingIndices( + tool_use=tool_use_index, + tool_result=tool_result_index, + uuid=uuid_index, + slash_command_by_parent=slash_command_by_parent, + ) + - if is_sidechain: - css_class = f"{css_class} sidechain" - # Update message title for display (only non-user types reach here) - if not is_compacted: - message_title = "🔗 Sub-assistant" +def _mark_pair(first: TemplateMessage, last: TemplateMessage) -> None: + """Mark two messages as a pair.""" + first.is_paired = True + first.pair_role = "pair_first" + last.is_paired = True + last.pair_role = "pair_last" - return css_class, content_html, message_type, message_title +def _try_pair_adjacent( + current: TemplateMessage, + next_msg: TemplateMessage, +) -> bool: + """Try to pair adjacent messages based on their types. + + Returns True if messages were paired, False otherwise. + + Adjacent pairing rules: + - user slash-command + user command-output + - bash-input + bash-output + - thinking + assistant + """ + # Slash command + command output (both are user messages) + if current.modifiers.is_slash_command and next_msg.modifiers.is_command_output: + _mark_pair(current, next_msg) + return True + + # Bash input + bash output + if current.type == "bash-input" and next_msg.type == "bash-output": + _mark_pair(current, next_msg) + return True + + # Thinking + assistant + if current.type == "thinking" and next_msg.type == "assistant": + _mark_pair(current, next_msg) + return True + + return False -def _get_combined_transcript_link(cache_manager: "CacheManager") -> Optional[str]: - """Get link to combined transcript if available.""" - try: - project_cache = cache_manager.get_cached_project_data() - if project_cache and project_cache.sessions: - return "combined_transcripts.html" - return None - except Exception: - return None + +def _try_pair_by_index( + current: TemplateMessage, + messages: List[TemplateMessage], + indices: PairingIndices, +) -> None: + """Try to pair current message with another using index lookups. + + Index-based pairing rules (can be any distance apart): + - tool_use + tool_result (by tool_use_id within same session) + - system parent + system child (by uuid/parent_uuid) + - system + slash-command (by uuid -> parent_uuid) + """ + # Tool use + tool result (by tool_use_id within same session) + if current.type == "tool_use" and current.tool_use_id and current.session_id: + key = (current.session_id, current.tool_use_id) + if key in indices.tool_result: + result_msg = messages[indices.tool_result[key]] + _mark_pair(current, result_msg) + + # System child message finding its parent (by parent_uuid) + if current.type == "system" and current.parent_uuid: + if current.parent_uuid in indices.uuid: + parent_msg = messages[indices.uuid[current.parent_uuid]] + _mark_pair(parent_msg, current) + + # System command finding its slash-command child (by uuid -> parent_uuid) + if current.type == "system" and current.uuid: + if current.uuid in indices.slash_command_by_parent: + slash_msg = messages[indices.slash_command_by_parent[current.uuid]] + _mark_pair(current, slash_msg) def _identify_message_pairs(messages: List[TemplateMessage]) -> None: @@ -2583,39 +1140,15 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None: Modifies messages in-place by setting is_paired and pair_role fields. Uses a two-pass algorithm: - 1. First pass: Build index of (session_id, tool_use_id) -> message index for tool_use - and tool_result. Session ID is included to prevent cross-session pairing - when sessions are resumed (same tool_use_id can appear in multiple sessions). - Build index of uuid -> message index for parent-child system messages - Build index of parent_uuid -> message index for slash-command messages - 2. Second pass: Sequential scan for adjacent pairs (system+output, bash, thinking+assistant) - and match tool_use/tool_result and uuid-based pairs using the index - """ - # Pass 1: Build index of tool_use messages and tool_result messages - # Key is (session_id, tool_use_id) to prevent cross-session pairing on resume - tool_use_index: Dict[ - tuple[str, str], int - ] = {} # (session_id, tool_use_id) -> index - tool_result_index: Dict[ - tuple[str, str], int - ] = {} # (session_id, tool_use_id) -> index - uuid_index: Dict[str, int] = {} # uuid -> message index for parent-child pairing - # Index slash-command messages by their parent_uuid for pairing with system commands - slash_command_by_parent: Dict[str, int] = {} # parent_uuid -> message index + 1. First pass: Build indices for efficient lookups (tool_use_id, uuid, parent_uuid) + 2. Second pass: Sequential scan for adjacent pairs and index-based pairs - for i, msg in enumerate(messages): - if msg.tool_use_id and msg.session_id: - key = (msg.session_id, msg.tool_use_id) - if "tool_use" in msg.css_class: - tool_use_index[key] = i - elif "tool_result" in msg.css_class: - tool_result_index[key] = i - # Build UUID index for system messages (both parent and child) - if msg.uuid and "system" in msg.css_class: - uuid_index[msg.uuid] = i - # Index slash-command user messages by parent_uuid - if msg.parent_uuid and "slash-command" in msg.css_class: - slash_command_by_parent[msg.parent_uuid] = i + Pairing types: + - Adjacent: system+output, bash-input+output, thinking+assistant + - Indexed: tool_use+result (by ID), system parent+child (by UUID) + """ + # Pass 1: Build all indices for efficient lookups + indices = _build_pairing_indices(messages) # Pass 2: Sequential scan to identify pairs i = 0 @@ -2627,75 +1160,15 @@ def _identify_message_pairs(messages: List[TemplateMessage]) -> None: i += 1 continue - # Check for system command + command output pair (adjacent only) - if current.css_class == "system" and i + 1 < len(messages): - next_msg = messages[i + 1] - if "command-output" in next_msg.css_class: - current.is_paired = True - current.pair_role = "pair_first" - next_msg.is_paired = True - next_msg.pair_role = "pair_last" - i += 2 - continue - - # Check for tool_use + tool_result pair using index (no distance limit) - # Key includes session_id to prevent cross-session pairing on resume - if ( - "tool_use" in current.css_class - and current.tool_use_id - and current.session_id - ): - key = (current.session_id, current.tool_use_id) - if key in tool_result_index: - result_idx = tool_result_index[key] - result_msg = messages[result_idx] - current.is_paired = True - current.pair_role = "pair_first" - result_msg.is_paired = True - result_msg.pair_role = "pair_last" - - # Check for UUID-based parent-child system message pair (no distance limit) - if "system" in current.css_class and current.parent_uuid: - if current.parent_uuid in uuid_index: - parent_idx = uuid_index[current.parent_uuid] - parent_msg = messages[parent_idx] - parent_msg.is_paired = True - parent_msg.pair_role = "pair_first" - current.is_paired = True - current.pair_role = "pair_last" - - # Check for system command + user slash-command pair (via parent_uuid) - # The slash-command message's parent_uuid points to the system command's uuid - if "system" in current.css_class and current.uuid: - if current.uuid in slash_command_by_parent: - slash_idx = slash_command_by_parent[current.uuid] - slash_msg = messages[slash_idx] - current.is_paired = True - current.pair_role = "pair_first" - slash_msg.is_paired = True - slash_msg.pair_role = "pair_last" - - # Check for bash-input + bash-output pair (adjacent only) - if current.css_class == "bash-input" and i + 1 < len(messages): + # Try adjacent pairing first (can skip next message if paired) + if i + 1 < len(messages): next_msg = messages[i + 1] - if next_msg.css_class == "bash-output": - current.is_paired = True - current.pair_role = "pair_first" - next_msg.is_paired = True - next_msg.pair_role = "pair_last" + if _try_pair_adjacent(current, next_msg): i += 2 continue - # Check for thinking + assistant pair (adjacent only) - if "thinking" in current.css_class and i + 1 < len(messages): - next_msg = messages[i + 1] - if "assistant" in next_msg.css_class: - current.is_paired = True - current.pair_role = "pair_first" - next_msg.is_paired = True - next_msg.pair_role = "pair_last" - i += 2 - continue + # Try index-based pairing (doesn't skip, continues to next message) + _try_pair_by_index(current, messages, indices) i += 1 @@ -2736,7 +1209,7 @@ def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMe msg.is_paired and msg.pair_role == "pair_last" and msg.parent_uuid - and "slash-command" in msg.css_class + and msg.modifiers.is_slash_command ): slash_command_pair_index[msg.parent_uuid] = i @@ -2768,7 +1241,13 @@ def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMe last_idx = slash_command_pair_index[msg.uuid] pair_last = messages[last_idx] - if pair_last is not None and last_idx is not None: + # Only append if we haven't already added this pair_last + # (handles case where multiple pair_firsts match the same pair_last) + if ( + pair_last is not None + and last_idx is not None + and last_idx not in skip_indices + ): reordered.append(pair_last) skip_indices.add(last_idx) @@ -2803,42 +1282,11 @@ def _reorder_paired_messages(messages: List[TemplateMessage]) -> List[TemplateMe return reordered -def generate_session_html( - messages: List[TranscriptEntry], - session_id: str, - title: Optional[str] = None, - cache_manager: Optional["CacheManager"] = None, -) -> str: - """Generate HTML for a single session using Jinja2 templates.""" - # Filter messages for this session only - session_messages = [ - msg - for msg in messages - if hasattr(msg, "sessionId") and getattr(msg, "sessionId") == session_id - ] - - # Get combined transcript link if cache manager is available - combined_link = None - if cache_manager is not None: - combined_link = _get_combined_transcript_link(cache_manager) - - if not session_messages: - return generate_html( - [], - title or f"Session {session_id[:8]}", - combined_transcript_link=combined_link, - ) - - # Use the existing generate_html function but with filtered messages and combined link - return generate_html( - session_messages, - title or f"Session {session_id[:8]}", - combined_transcript_link=combined_link, - ) +# -- Message Hierarchy -------------------------------------------------------- -def _get_message_hierarchy_level(css_class: str, is_sidechain: bool) -> int: - """Determine the hierarchy level for a message based on its type and sidechain status. +def _get_message_hierarchy_level(msg: TemplateMessage) -> int: + """Determine the hierarchy level for a message based on its type and modifiers. Correct hierarchy based on logical nesting: - Level 0: Session headers @@ -2854,35 +1302,41 @@ def _get_message_hierarchy_level(css_class: str, is_sidechain: bool) -> int: Returns: Integer hierarchy level (1-5, session headers are 0) """ + msg_type = msg.type + is_sidechain = msg.modifiers.is_sidechain + system_level = msg.modifiers.system_level + # User messages at level 1 (under session) # Note: sidechain user messages are skipped before reaching this function - if "user" in css_class and not is_sidechain: + if msg_type == "user" and not is_sidechain: return 1 # System info/warning at level 3 (tool-related, e.g., hook notifications) if ( - "system-info" in css_class or "system-warning" in css_class - ) and not is_sidechain: + msg_type == "system" + and system_level in ("info", "warning") + and not is_sidechain + ): return 3 # System commands/errors at level 2 (siblings to assistant) - if "system" in css_class and not is_sidechain: + if msg_type == "system" and not is_sidechain: return 2 # Sidechain assistant/thinking at level 4 (nested under Task tool result) - if is_sidechain and ("assistant" in css_class or "thinking" in css_class): + if is_sidechain and msg_type in ("assistant", "thinking"): return 4 # Sidechain tools at level 5 - if is_sidechain and ("tool" in css_class): + if is_sidechain and msg_type in ("tool_use", "tool_result"): return 5 # Main assistant/thinking at level 2 (nested under user) - if "assistant" in css_class or "thinking" in css_class: + if msg_type in ("assistant", "thinking"): return 2 # Main tools at level 3 (nested under assistant) - if "tool" in css_class: + if msg_type in ("tool_use", "tool_result"): return 3 # Default to level 1 @@ -2909,11 +1363,8 @@ def _build_message_hierarchy(messages: List[TemplateMessage]) -> None: if message.is_session_header: current_level = 0 else: - # Determine level from css_class - is_sidechain = "sidechain" in message.css_class - current_level = _get_message_hierarchy_level( - message.css_class, is_sidechain - ) + # Determine level from message type and modifiers + current_level = _get_message_hierarchy_level(message) # Pop stack until we find the appropriate parent level while hierarchy_stack and hierarchy_stack[-1][0] >= current_level: @@ -2971,7 +1422,7 @@ def _mark_messages_with_children(messages: List[TemplateMessage]) -> None: immediate_parent_id = message.ancestry[-1] # Get message type for categorization - msg_type = message.css_class or message.type + msg_type = message.type # Increment immediate parent's child count if immediate_parent_id in message_by_id: @@ -2994,249 +1445,54 @@ def _mark_messages_with_children(messages: List[TemplateMessage]) -> None: ) -def deduplicate_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]: - """Remove duplicate messages based on (type, timestamp, sessionId, content_key). +def _build_message_tree(messages: List[TemplateMessage]) -> List[TemplateMessage]: + """Build tree structure by populating children fields based on ancestry. - Messages with the exact same timestamp are duplicates by definition - - the differences (like IDE selection tags) are just logging artifacts. + This function takes a flat list of messages (with message_id and ancestry + already set by _build_message_hierarchy) and populates the children field + of each message to form an explicit tree structure. - We need a content-based key to handle two cases: - 1. Version stutter: Same message logged twice during Claude Code upgrade - -> Same timestamp, same message.id or tool_use_id -> SHOULD deduplicate - 2. Concurrent tool results: Multiple tool results with same timestamp - -> Same timestamp, different tool_use_ids -> should NOT deduplicate + The tree structure enables: + - Recursive template rendering with nested DOM elements + - Simpler JavaScript fold/unfold (just hide/show children container) + - More natural parent-child traversal Args: - messages: List of transcript entries to deduplicate + messages: List of template messages with message_id and ancestry set Returns: - List of deduplicated messages, preserving order (first occurrence kept) + List of root messages (those with empty ancestry). Each message's + children field is populated with its direct children. """ - # Track seen (message_type, timestamp, is_meta, session_id, content_key) tuples - seen: set[tuple[str, str, bool, str, str]] = set() - deduplicated: List[TranscriptEntry] = [] - + # Build index of messages by ID for O(1) lookup + message_by_id: dict[str, TemplateMessage] = {} for message in messages: - # Get basic message type - message_type = getattr(message, "type", "unknown") - - # For system messages, include level to differentiate info/warning/error - if isinstance(message, SystemTranscriptEntry): - level = getattr(message, "level", "info") - message_type = f"system-{level}" - - # Get timestamp - timestamp = getattr(message, "timestamp", "") - - # Get isMeta flag (slash command prompts have isMeta=True with same timestamp as parent) - is_meta = getattr(message, "isMeta", False) - - # Get sessionId for multi-session report deduplication - session_id = getattr(message, "sessionId", "") - - # Get content key for differentiating concurrent messages - # - For assistant messages: use message.id (same for stutters, different for different msgs) - # - For user messages with tool results: use first tool_use_id - # - For other messages: use uuid as fallback - content_key = "" - if isinstance(message, AssistantTranscriptEntry): - # For assistant messages, use the message id - content_key = message.message.id - elif isinstance(message, UserTranscriptEntry): - # For user messages, check for tool results - if isinstance(message.message.content, list): - for item in message.message.content: - if isinstance(item, ToolResultContent): - content_key = item.tool_use_id - break - # Fallback to uuid if no content key found - if not content_key: - content_key = getattr(message, "uuid", "") - - # Create deduplication key - include content_key for proper handling - # of both version stutters and concurrent tool results - dedup_key = (message_type, timestamp, is_meta, session_id, content_key) - - # Keep only first occurrence - if dedup_key not in seen: - seen.add(dedup_key) - deduplicated.append(message) - - return deduplicated - - -def generate_html( - messages: List[TranscriptEntry], - title: Optional[str] = None, - combined_transcript_link: Optional[str] = None, -) -> str: - """Generate HTML from transcript messages using Jinja2 templates.""" - from .utils import get_warmup_session_ids - - # Performance timing - t_start = time.time() - - with log_timing("Initialization", t_start): - if not title: - title = "Claude Transcript" - - # Filter out warmup-only sessions - with log_timing("Filter warmup sessions", t_start): - warmup_session_ids = get_warmup_session_ids(messages) - if warmup_session_ids: - messages = [ - msg - for msg in messages - if getattr(msg, "sessionId", None) not in warmup_session_ids - ] - - # Pre-process to find and attach session summaries - with log_timing("Session summary processing", t_start): - session_summaries: Dict[str, str] = {} - uuid_to_session: Dict[str, str] = {} - uuid_to_session_backup: Dict[str, str] = {} - - # Build mapping from message UUID to session ID - for message in messages: - if hasattr(message, "uuid") and hasattr(message, "sessionId"): - message_uuid = getattr(message, "uuid", "") - session_id = getattr(message, "sessionId", "") - if message_uuid and session_id: - # There is often duplication, in that case we want to prioritise the assistant - # message because summaries are generated from Claude's (last) success message - if type(message) is AssistantTranscriptEntry: - uuid_to_session[message_uuid] = session_id - else: - uuid_to_session_backup[message_uuid] = session_id - - # Map summaries to sessions via leafUuid -> message UUID -> session ID - for message in messages: - if isinstance(message, SummaryTranscriptEntry): - leaf_uuid = message.leafUuid - if leaf_uuid in uuid_to_session: - session_summaries[uuid_to_session[leaf_uuid]] = message.summary - elif ( - leaf_uuid in uuid_to_session_backup - and uuid_to_session_backup[leaf_uuid] not in session_summaries - ): - session_summaries[uuid_to_session_backup[leaf_uuid]] = ( - message.summary - ) - - # Attach summaries to messages - for message in messages: - if hasattr(message, "sessionId"): - session_id = getattr(message, "sessionId", "") - if session_id in session_summaries: - setattr(message, "_session_summary", session_summaries[session_id]) - - # Process messages through the main rendering loop - template_messages, sessions, session_order = _process_messages_loop(messages) - - # Prepare session navigation data - session_nav: List[Dict[str, Any]] = [] - with log_timing( - lambda: f"Session navigation building ({len(session_nav)} sessions)", t_start - ): - for session_id in session_order: - session_info = sessions[session_id] - - # Skip empty sessions (agent-only, no user messages) - if not session_info["first_user_message"]: - continue - - # Format timestamp range - first_ts = session_info["first_timestamp"] - last_ts = session_info["last_timestamp"] - timestamp_range = "" - if first_ts and last_ts: - if first_ts == last_ts: - timestamp_range = format_timestamp(first_ts) - else: - timestamp_range = ( - f"{format_timestamp(first_ts)} - {format_timestamp(last_ts)}" - ) - elif first_ts: - timestamp_range = format_timestamp(first_ts) - - # Format token usage summary - token_summary = "" - total_input = session_info["total_input_tokens"] - total_output = session_info["total_output_tokens"] - total_cache_creation = session_info["total_cache_creation_tokens"] - total_cache_read = session_info["total_cache_read_tokens"] - - if total_input > 0 or total_output > 0: - token_parts: List[str] = [] - if total_input > 0: - token_parts.append(f"Input: {total_input}") - if total_output > 0: - token_parts.append(f"Output: {total_output}") - if total_cache_creation > 0: - token_parts.append(f"Cache Creation: {total_cache_creation}") - if total_cache_read > 0: - token_parts.append(f"Cache Read: {total_cache_read}") - token_summary = "Token usage – " + " | ".join(token_parts) - - session_nav.append( - { - "id": session_id, - "summary": session_info["summary"], - "timestamp_range": timestamp_range, - "first_timestamp": first_ts, - "last_timestamp": last_ts, - "message_count": session_info["message_count"], - "first_user_message": session_info["first_user_message"] - if session_info["first_user_message"] != "" - else "[No user message found in session.]", - "token_summary": token_summary, - } - ) - - # Reorder messages so each session's messages follow their session header - # This fixes interleaving that occurs when sessions are resumed - with log_timing("Reorder session messages", t_start): - template_messages = _reorder_session_template_messages(template_messages) - - # Identify and mark paired messages (command+output, tool_use+tool_result, etc.) - with log_timing("Identify message pairs", t_start): - _identify_message_pairs(template_messages) + if message.message_id: + message_by_id[message.message_id] = message - # Reorder messages so pairs are adjacent while preserving chronological order - with log_timing("Reorder paired messages", t_start): - template_messages = _reorder_paired_messages(template_messages) + # Clear any existing children (in case of re-processing) + for message in messages: + message.children = [] - # Reorder sidechains to appear after their Task results - # This must happen AFTER pair reordering, since that moves tool_results - with log_timing("Reorder sidechain messages", t_start): - template_messages = _reorder_sidechain_template_messages(template_messages) + # Collect root messages (those with no ancestry) + root_messages: List[TemplateMessage] = [] - # Build hierarchy (message_id and ancestry) based on final order - # This must happen AFTER all reordering to get correct parent-child relationships - with log_timing("Build message hierarchy", t_start): - _build_message_hierarchy(template_messages) + # Populate children based on ancestry + for message in messages: + if not message.ancestry: + # Root message (level 0, no parent) + root_messages.append(message) + else: + # Has a parent - add to parent's children + immediate_parent_id = message.ancestry[-1] + if immediate_parent_id in message_by_id: + parent = message_by_id[immediate_parent_id] + parent.children.append(message) - # Mark messages that have children for fold/unfold controls - with log_timing("Mark messages with children", t_start): - _mark_messages_with_children(template_messages) + return root_messages - # Render template - with log_timing("Template environment setup", t_start): - env = _get_template_environment() - template = env.get_template("transcript.html") - - with log_timing(lambda: f"Template rendering ({len(html_output)} chars)", t_start): - html_output = str( - template.render( - title=title, - messages=template_messages, - sessions=session_nav, - combined_transcript_link=combined_transcript_link, - library_version=get_library_version(), - ) - ) - return html_output +# -- Message Reordering ------------------------------------------------------- def _reorder_session_template_messages( @@ -3329,7 +1585,7 @@ def _reorder_sidechain_template_messages( sidechain_map: Dict[str, List[TemplateMessage]] = {} for message in messages: - is_sidechain = "sidechain" in message.css_class + is_sidechain = message.modifiers.is_sidechain agent_id = message.agent_id if is_sidechain and agent_id: @@ -3357,7 +1613,14 @@ def _reorder_sidechain_template_messages( # tool_use ever gets agent_id in the future agent_id = message.agent_id - if agent_id and message.type == "tool_result" and agent_id in sidechain_map: + # Only insert sidechain if not already inserted (handles case where + # multiple tool_results have the same agent_id) + if ( + agent_id + and message.type == MessageType.TOOL_RESULT + and agent_id in sidechain_map + and agent_id not in used_agents + ): sidechain_msgs = sidechain_map[agent_id] # Deduplicate: find the last sidechain assistant with text content @@ -3365,7 +1628,7 @@ def _reorder_sidechain_template_messages( task_result_content = ( message.raw_text_content.strip() if message.raw_text_content else None ) - if task_result_content and message.type == "tool_result": + if task_result_content and message.type == MessageType.TOOL_RESULT: # Find the last assistant message in this sidechain for sidechain_msg in reversed(sidechain_msgs): sidechain_text = ( @@ -3374,69 +1637,263 @@ def _reorder_sidechain_template_messages( else None ) if ( - sidechain_msg.type == "assistant" + sidechain_msg.type == MessageType.ASSISTANT and sidechain_text and sidechain_text == task_result_content ): # Replace with note pointing to the Task result - forward_link_html = "

(Task summary — already displayed in Task tool result above)

" - sidechain_msg.content_html = forward_link_html + sidechain_msg.content = DedupNoticeContent( + notice_text="(Task summary — already displayed in Task tool result above)" + ) # Mark as deduplicated for potential debugging sidechain_msg.raw_text_content = None break - # Insert the sidechain messages for this agent right after this message - # Note: ancestry will be rebuilt by _build_message_hierarchy() later - result.extend(sidechain_msgs) - used_agents.add(agent_id) + # Insert the sidechain messages for this agent right after this message + # Note: ancestry will be rebuilt by _build_message_hierarchy() later + result.extend(sidechain_msgs) + used_agents.add(agent_id) + + # Append any sidechains that weren't matched (shouldn't happen normally) + for agent_id, sidechain_msgs in sidechain_map.items(): + if agent_id not in used_agents: + result.extend(sidechain_msgs) + + return result + + +def _filter_messages(messages: List[TranscriptEntry]) -> List[TranscriptEntry]: + """Filter messages to those that should be rendered. + + This function filters out: + - Summary messages (already attached to sessions) + - Queue operations except 'remove' (steering messages) + - Messages with no meaningful content (no text and no tool items) + - Messages matching should_skip_message() (warmup, etc.) + - Sidechain user messages without tool results (prompts duplicate Task result) + + System messages are included as they need special processing in _render_messages. + + Args: + messages: List of transcript entries to filter + + Returns: + Filtered list of messages that should be rendered + """ + filtered: List[TranscriptEntry] = [] + + for message in messages: + message_type = message.type + + # Skip summary messages + if isinstance(message, SummaryTranscriptEntry): + continue + + # Skip most queue operations - only process 'remove' for counts + if isinstance(message, QueueOperationTranscriptEntry): + if message.operation != "remove": + continue + + # System messages bypass other checks but are included + if isinstance(message, SystemTranscriptEntry): + filtered.append(message) + continue + + # Get message content for filtering checks + if isinstance(message, QueueOperationTranscriptEntry): + message_content = message.content if message.content else [] + else: + message_content = message.message.content # type: ignore + + text_content = extract_text_content(message_content) + + # Skip if no meaningful content + if not text_content.strip(): + # Check for tool items + if isinstance(message_content, list): + has_tool_items = any( + isinstance( + item, (ToolUseContent, ToolResultContent, ThinkingContent) + ) + or getattr(item, "type", None) + in ("tool_use", "tool_result", "thinking") + for item in message_content + ) + if not has_tool_items: + continue + else: + continue + + # Skip messages that should be filtered out + if should_skip_message(text_content): + continue + + # Skip sidechain user messages that are just prompts (no tool results) + if message_type == MessageType.USER and getattr(message, "isSidechain", False): + if isinstance(message_content, list): + has_tool_results = any( + getattr(item, "type", None) == "tool_result" + or isinstance(item, ToolResultContent) + for item in message_content + ) + if not has_tool_results: + continue + + # Message passes all filters + filtered.append(message) + + return filtered + + +def _collect_session_info( + messages: List[TranscriptEntry], +) -> tuple[ + Dict[str, Dict[str, Any]], # sessions + List[str], # session_order + set[str], # show_tokens_for_message +]: + """Collect session metadata and token tracking from pre-filtered messages. + + This function iterates through messages to: + - Build session metadata (timestamps, message counts, first user message) + - Track token usage per session (deduplicating by requestId) + - Determine which messages should display token usage + + Note: Messages should be pre-filtered by _filter_messages. System messages + in the input are skipped for session tracking purposes. + + Args: + messages: Pre-filtered list of transcript entries + + Returns: + Tuple containing: + - sessions: Session metadata dict mapping session_id to info + - session_order: List of session IDs in chronological order + - show_tokens_for_message: Set of message UUIDs that should display tokens + """ + sessions: Dict[str, Dict[str, Any]] = {} + session_order: List[str] = [] + + # Track requestIds to avoid double-counting token usage + seen_request_ids: set[str] = set() + # Track which messages should show token usage (first occurrence of each requestId) + show_tokens_for_message: set[str] = set() + + for message in messages: + # Skip system messages for session tracking + if isinstance(message, SystemTranscriptEntry): + continue + + # Get message content + if isinstance(message, QueueOperationTranscriptEntry): + message_content = message.content if message.content else [] + else: + message_content = message.message.content # type: ignore + + text_content = extract_text_content(message_content) # type: ignore[arg-type] + + # Get session info + session_id = getattr(message, "sessionId", "unknown") + + # Initialize session if new + if session_id not in sessions: + current_session_summary = getattr(message, "_session_summary", None) + + # Get first user message content for preview + first_user_message = "" + if is_user_entry(message) and should_use_as_session_starter(text_content): + content = extract_text_content(message.message.content) + first_user_message = create_session_preview(content) + + sessions[session_id] = { + "id": session_id, + "summary": current_session_summary, + "first_timestamp": getattr(message, "timestamp", ""), + "last_timestamp": getattr(message, "timestamp", ""), + "message_count": 0, + "first_user_message": first_user_message, + "total_input_tokens": 0, + "total_output_tokens": 0, + "total_cache_creation_tokens": 0, + "total_cache_read_tokens": 0, + } + session_order.append(session_id) + + # Update first user message if this is a user message and we don't have one yet + elif is_user_entry(message) and not sessions[session_id]["first_user_message"]: + first_user_content = extract_text_content(message.message.content) + if should_use_as_session_starter(first_user_content): + sessions[session_id]["first_user_message"] = create_session_preview( + first_user_content + ) + + sessions[session_id]["message_count"] += 1 - # Append any sidechains that weren't matched (shouldn't happen normally) - for agent_id, sidechain_msgs in sidechain_map.items(): - if agent_id not in used_agents: - result.extend(sidechain_msgs) + # Update last timestamp for this session + current_timestamp = getattr(message, "timestamp", "") + if current_timestamp: + sessions[session_id]["last_timestamp"] = current_timestamp - return result + # Extract and accumulate token usage for assistant messages + # Only count tokens for the first message with each requestId to avoid duplicates + if is_assistant_entry(message): + assistant_message = message.message + request_id = message.requestId + message_uuid = message.uuid + + if ( + assistant_message.usage + and request_id + and request_id not in seen_request_ids + ): + # Mark this requestId as seen to avoid double-counting + seen_request_ids.add(request_id) + # Mark this specific message UUID as one that should show token usage + show_tokens_for_message.add(message_uuid) + + usage = assistant_message.usage + sessions[session_id]["total_input_tokens"] += usage.input_tokens + sessions[session_id]["total_output_tokens"] += usage.output_tokens + if usage.cache_creation_input_tokens: + sessions[session_id]["total_cache_creation_tokens"] += ( + usage.cache_creation_input_tokens + ) + if usage.cache_read_input_tokens: + sessions[session_id]["total_cache_read_tokens"] += ( + usage.cache_read_input_tokens + ) + + return sessions, session_order, show_tokens_for_message -def _process_messages_loop( +def _render_messages( messages: List[TranscriptEntry], -) -> tuple[ - List[TemplateMessage], - Dict[str, Dict[str, Any]], # sessions - List[str], # session_order -]: - """Process messages through the main rendering loop. + sessions: Dict[str, Dict[str, Any]], + show_tokens_for_message: set[str], +) -> List[TemplateMessage]: + """Pass 2: Render pre-filtered messages to TemplateMessage objects. - This function handles the core message processing logic: - - Processes each message into template-friendly format - - Tracks sessions and token usage - - Handles message deduplication and hierarchy + This pass creates the actual TemplateMessage objects for rendering: + - Creates session headers when entering new sessions + - Processes text content into HTML + - Handles tool use, tool result, thinking, and image content - Collects timing statistics - Note: Tool use context must be built before calling this function via - _define_tool_use_context() + Note: Messages are pre-filtered by _collect_session_info, so no additional + filtering is needed here except for system message processing. Args: - messages: List of transcript entries to process + messages: Pre-filtered list of transcript entries from _collect_session_info + sessions: Session metadata from _collect_session_info + show_tokens_for_message: Set of message UUIDs that should display tokens Returns: - Tuple containing: - - template_messages: Processed messages ready for template rendering - - sessions: Session metadata dict mapping session_id to info - - session_order: List of session IDs in chronological order + List of TemplateMessage objects ready for template rendering """ - # Group messages by session and collect session info for navigation - sessions: Dict[str, Dict[str, Any]] = {} - session_order: List[str] = [] + # Track which sessions have had headers added seen_sessions: set[str] = set() - # Track requestIds to avoid double-counting token usage - seen_request_ids: set[str] = set() - # Track which messages should show token usage (first occurrence of each requestId) - show_tokens_for_message: set[str] = set() - # Build mapping of tool_use_id to ToolUseContent for specialized tool result rendering - # This will be populated inline as we encounter tool_use items during message processing tool_use_context: Dict[str, ToolUseContent] = {} # Process messages into template-friendly format @@ -3464,169 +1921,54 @@ def _process_messages_loop( # Update current message UUID for timing tracking set_timing_var("_current_msg_uuid", msg_uuid) - # NOTE: Sidechain user messages are handled below after content extraction - # to distinguish prompts (skip) from tool results (render) - - # Skip summary messages - they should already be attached to their sessions - if isinstance(message, SummaryTranscriptEntry): - continue - - # Skip most queue operations - only render 'remove' as steering user messages - if isinstance(message, QueueOperationTranscriptEntry): - if message.operation != "remove": - continue - # 'remove' operations fall through to be rendered as user messages - - # Handle system messages separately + # Handle system messages separately (already filtered in pass 1) if isinstance(message, SystemTranscriptEntry): - session_id = getattr(message, "sessionId", "unknown") - timestamp = getattr(message, "timestamp", "") - formatted_timestamp = format_timestamp(timestamp) if timestamp else "" - - # Handle hook summaries (subtype="stop_hook_summary") - if message.subtype == "stop_hook_summary": - # Skip silent hook successes (no output, no errors) - if not message.hasOutput and not message.hookErrors: - continue - # Render hook summary with collapsible details - content_html = _render_hook_summary(message) - level_css = "system system-hook" - level = "hook" - elif not message.content: - # Skip system messages without content (shouldn't happen normally) - continue - else: - # Extract command name if present - command_name_match = re.search( - r"(.*?)", message.content, re.DOTALL - ) - # Also check for command output (child of user command) - command_output_match = re.search( - r"(.*?)", - message.content, - re.DOTALL, - ) - - # Create level-specific styling and icons - level = getattr(message, "level", "info") - level_icon = {"warning": "⚠️", "error": "❌", "info": "ℹ️"}.get( - level, "ℹ️" - ) - - # Determine CSS class: - # - Command name (user-initiated): "system" only - # - Command output (assistant response): "system system-{level}" - # - Other system messages: "system system-{level}" - if command_name_match: - # User-initiated command - level_css = "system" - else: - # Command output or other system message - level_css = f"system system-{level}" - - # Process content: extract command name or command output, or use full content - if command_name_match: - # Show just the command name - command_name = command_name_match.group(1).strip() - html_content = f"{html.escape(command_name)}" - content_html = f"{level_icon} {html_content}" - elif command_output_match: - # Extract and process command output - output = command_output_match.group(1).strip() - html_content = _convert_ansi_to_html(output) - content_html = f"{level_icon} {html_content}" - else: - # Process ANSI codes in system messages (they may contain command output) - html_content = _convert_ansi_to_html(message.content) - content_html = f"{level_icon} {html_content}" - - # Store parent UUID for hierarchy rebuild (handled by _build_message_hierarchy) - parent_uuid = getattr(message, "parentUuid", None) - - system_template_message = TemplateMessage( - message_type="system", - content_html=content_html, - formatted_timestamp=formatted_timestamp, - css_class=level_css, - raw_timestamp=timestamp, - session_id=session_id, - message_title=f"System {level.title()}", - message_id=None, # Will be assigned by _build_message_hierarchy - ancestry=[], # Will be assigned by _build_message_hierarchy - uuid=message.uuid, - parent_uuid=parent_uuid, - ) - template_messages.append(system_template_message) + system_template_message = _process_system_message(message) + if system_template_message: + template_messages.append(system_template_message) continue # Handle queue-operation 'remove' messages as user messages if isinstance(message, QueueOperationTranscriptEntry): - # Queue operations have content directly, not in message.message message_content = message.content if message.content else [] - # Treat as user message type - message_type = "queue-operation" + message_type = MessageType.QUEUE_OPERATION else: - # Extract message content first to check for duplicates - # Must be UserTranscriptEntry or AssistantTranscriptEntry message_content = message.message.content # type: ignore - text_content = extract_text_content(message_content) + text_content = extract_text_content(message_content) # type: ignore[arg-type] # Separate tool/thinking/image content from text content - # Images in user messages stay inline, images in assistant messages are separate tool_items: List[ContentItem] = [] text_only_content: List[ContentItem] = [] if isinstance(message_content, list): text_only_items: List[ContentItem] = [] - for item in message_content: - # Check for both custom types and Anthropic types - item_type = getattr(item, "type", None) + for item in message_content: # type: ignore[union-attr] + item_type = getattr(item, "type", None) # type: ignore[arg-type] is_image = isinstance(item, ImageContent) or item_type == "image" is_tool_item = isinstance( item, (ToolUseContent, ToolResultContent, ThinkingContent), ) or item_type in ("tool_use", "tool_result", "thinking") - # Keep images inline for user messages and queue operations (steering), - # extract for assistant messages if is_image and ( - message_type == "user" + message_type == MessageType.USER or isinstance(message, QueueOperationTranscriptEntry) ): - text_only_items.append(item) + text_only_items.append(item) # type: ignore[arg-type] elif is_tool_item or is_image: - tool_items.append(item) + tool_items.append(item) # type: ignore[arg-type] else: - text_only_items.append(item) + text_only_items.append(item) # type: ignore[arg-type] text_only_content = text_only_items else: - # Single string content - message_content = message_content.strip() + message_content = message_content.strip() # type: ignore[union-attr] if message_content: - text_only_content = [TextContent(type="text", text=message_content)] - - # Skip if no meaningful content - if not text_content.strip() and not tool_items: - continue - - # Skip messages that should be filtered out - if should_skip_message(text_content): - continue + text_only_content = [TextContent(type="text", text=message_content)] # type: ignore[arg-type] - # Skip sidechain user messages that are just prompts (no tool results) - # Sidechain prompts duplicate the Task tool input and are redundant, - # but tool results from sidechain agents should be rendered - if message_type == "user" and getattr(message, "isSidechain", False): - has_tool_results = any( - getattr(item, "type", None) == "tool_result" - or isinstance(item, ToolResultContent) - for item in tool_items - ) - if not has_tool_results: - continue - # For sidechain user messages with tool results, clear text content - # to avoid rendering the redundant prompt text + # For sidechain user messages with tool results, clear text content + # (prompts duplicate Task result; filtering already done in pass 1) + if message_type == MessageType.USER and getattr(message, "isSidechain", False): text_only_content = [] text_content = "" @@ -3636,130 +1978,50 @@ def _process_messages_loop( is_bash_cmd = is_bash_input(text_content) is_bash_result = is_bash_output(text_content) - # Check if we're in a new session + # Get session info session_id = getattr(message, "sessionId", "unknown") session_summary = getattr(message, "_session_summary", None) - # Track sessions for navigation and add session header if new - if session_id not in sessions: - # Get the session summary for this session (may be None) - current_session_summary = getattr(message, "_session_summary", None) - - # Get first user message content for preview - first_user_message = "" - if ( - message_type == "user" - and not isinstance(message, QueueOperationTranscriptEntry) - and hasattr(message, "message") - and should_use_as_session_starter(text_content) - ): - content = extract_text_content(message.message.content) - first_user_message = create_session_preview(content) - - sessions[session_id] = { - "id": session_id, - "summary": current_session_summary, - "first_timestamp": getattr(message, "timestamp", ""), - "last_timestamp": getattr(message, "timestamp", ""), - "message_count": 0, - "first_user_message": first_user_message, - "total_input_tokens": 0, - "total_output_tokens": 0, - "total_cache_creation_tokens": 0, - "total_cache_read_tokens": 0, - } - session_order.append(session_id) - - # Add session header message - if session_id not in seen_sessions: - seen_sessions.add(session_id) - # Create a meaningful session title - session_title = ( - f"{current_session_summary} • {session_id[:8]}" - if current_session_summary - else session_id[:8] - ) + # Add session header if this is a new session + if session_id not in seen_sessions: + seen_sessions.add(session_id) + current_session_summary = sessions.get(session_id, {}).get("summary") + session_title = ( + f"{current_session_summary} • {session_id[:8]}" + if current_session_summary + else session_id[:8] + ) - session_header = TemplateMessage( - message_type="session_header", - content_html=session_title, - formatted_timestamp="", - css_class="session-header", - raw_timestamp=None, - session_summary=current_session_summary, + session_header = TemplateMessage( + message_type="session_header", + formatted_timestamp="", + raw_timestamp=None, + session_summary=current_session_summary, + session_id=session_id, + is_session_header=True, + message_id=None, + ancestry=[], + modifiers=MessageModifiers(), + content=SessionHeaderContent( + title=session_title, session_id=session_id, - is_session_header=True, - message_id=None, # Will be assigned by _build_message_hierarchy - ancestry=[], # Session headers are top-level - ) - template_messages.append(session_header) - - # Update first user message if this is a user message and we don't have one yet - elif message_type == "user" and not sessions[session_id]["first_user_message"]: - if not isinstance(message, QueueOperationTranscriptEntry) and hasattr( - message, "message" - ): - first_user_content = extract_text_content(message.message.content) - if should_use_as_session_starter(first_user_content): - sessions[session_id]["first_user_message"] = create_session_preview( - first_user_content - ) - - sessions[session_id]["message_count"] += 1 - - # Update last timestamp for this session - current_timestamp = getattr(message, "timestamp", "") - if current_timestamp: - sessions[session_id]["last_timestamp"] = current_timestamp - - # Extract and accumulate token usage for assistant messages - # Only count tokens for the first message with each requestId to avoid duplicates - if message_type == "assistant" and hasattr(message, "message"): - assistant_message = getattr(message, "message") - request_id = getattr(message, "requestId", None) - message_uuid = getattr(message, "uuid", "") - - if ( - hasattr(assistant_message, "usage") - and assistant_message.usage - and request_id - and request_id not in seen_request_ids - ): - # Mark this requestId as seen to avoid double-counting - seen_request_ids.add(request_id) - # Mark this specific message UUID as one that should show token usage - show_tokens_for_message.add(message_uuid) - - usage = assistant_message.usage - sessions[session_id]["total_input_tokens"] += usage.input_tokens - sessions[session_id]["total_output_tokens"] += usage.output_tokens - if usage.cache_creation_input_tokens: - sessions[session_id]["total_cache_creation_tokens"] += ( - usage.cache_creation_input_tokens - ) - if usage.cache_read_input_tokens: - sessions[session_id]["total_cache_read_tokens"] += ( - usage.cache_read_input_tokens - ) + summary=current_session_summary, + ), + ) + template_messages.append(session_header) # Get timestamp (only for non-summary messages) - timestamp = ( - getattr(message, "timestamp", "") if hasattr(message, "timestamp") else "" - ) + timestamp = getattr(message, "timestamp", "") formatted_timestamp = format_timestamp(timestamp) if timestamp else "" # Extract token usage for assistant messages # Only show token usage for the first message with each requestId to avoid duplicates token_usage_str: Optional[str] = None - if message_type == "assistant" and hasattr(message, "message"): - assistant_message = getattr(message, "message") - message_uuid = getattr(message, "uuid", "") + if is_assistant_entry(message): + assistant_message = message.message + message_uuid = message.uuid - if ( - hasattr(assistant_message, "usage") - and assistant_message.usage - and message_uuid in show_tokens_for_message - ): + if assistant_message.usage and message_uuid in show_tokens_for_message: # Only show token usage for messages marked as first occurrence of requestId usage = assistant_message.usage token_parts = [ @@ -3774,22 +2036,24 @@ def _process_messages_loop( token_parts.append(f"Cache Read: {usage.cache_read_input_tokens}") token_usage_str = " | ".join(token_parts) - # Determine CSS class and content based on message type and duplicate status + # Determine modifiers and content based on message type + content_model: Optional[MessageContent] = None + if is_command: - css_class, content_html, message_type, message_title = ( + modifiers, content_model, message_type, message_title = ( _process_command_message(text_content) ) elif is_local_output: - css_class, content_html, message_type, message_title = ( + modifiers, content_model, message_type, message_title = ( _process_local_command_output(text_content) ) elif is_bash_cmd: - css_class, content_html, message_type, message_title = _process_bash_input( + modifiers, content_model, message_type, message_title = _process_bash_input( text_content ) elif is_bash_result: - css_class, content_html, message_type, message_title = _process_bash_output( - text_content + modifiers, content_model, message_type, message_title = ( + _process_bash_output(text_content) ) else: # For queue-operation messages, treat them as user messages @@ -3798,7 +2062,7 @@ def _process_messages_loop( else: effective_type = message_type - css_class, content_html, message_type_result, message_title = ( + modifiers, content_model, message_type_result, message_title = ( _process_regular_message( text_only_content, effective_type, @@ -3808,12 +2072,12 @@ def _process_messages_loop( ) message_type = message_type_result # Update message_type with result - # Add 'steering' CSS class for queue-operation 'remove' messages + # Add 'steering' modifier for queue-operation 'remove' messages if ( isinstance(message, QueueOperationTranscriptEntry) and message.operation == "remove" ): - css_class = f"{css_class} steering" + modifiers = replace(modifiers, is_steering=True) message_title = "User (steering)" # Only create main message if it has text content @@ -3822,9 +2086,7 @@ def _process_messages_loop( if text_only_content: template_message = TemplateMessage( message_type=message_type, - content_html=content_html, formatted_timestamp=formatted_timestamp, - css_class=css_class, raw_timestamp=timestamp, session_summary=session_summary, session_id=session_id, @@ -3835,6 +2097,8 @@ def _process_messages_loop( agent_id=getattr(message, "agentId", None), uuid=getattr(message, "uuid", None), parent_uuid=getattr(message, "parentUuid", None), + modifiers=modifiers, + content=content_model, ) # Store raw text content for potential future use (e.g., deduplication, @@ -3852,216 +2116,67 @@ def _process_messages_loop( # Handle both custom types and Anthropic types item_type = getattr(tool_item, "type", None) - item_tool_use_id: Optional[str] = None - tool_title_hint: Optional[str] = None - pending_dedup: Optional[str] = ( - None # Holds task result content for deduplication - ) + # Dispatch to appropriate handler based on item type + tool_result: Optional[ToolItemResult] = None if isinstance(tool_item, ToolUseContent) or item_type == "tool_use": - # Convert Anthropic type to our format if necessary - if not isinstance(tool_item, ToolUseContent): - tool_use = ToolUseContent( - type="tool_use", - id=getattr(tool_item, "id", ""), - name=getattr(tool_item, "name", ""), - input=getattr(tool_item, "input", {}), - ) - else: - tool_use = tool_item - - tool_content_html = format_tool_use_content(tool_use) - escaped_name = escape_html(tool_use.name) - escaped_id = escape_html(tool_use.id) - item_tool_use_id = tool_use.id - tool_title_hint = f"ID: {escaped_id}" - - # Populate tool_use_context for later use when processing tool results - tool_use_context[item_tool_use_id] = tool_use - - # Get summary for header (description or filepath) - summary = get_tool_summary(tool_use) - - # Set message_type (for CSS/logic) and message_title (for display) - tool_message_type = "tool_use" - if tool_use.name == "TodoWrite": - tool_message_title = "📝 Todo List" - elif tool_use.name == "Task": - # Special handling for Task tool: show subagent_type and description - subagent_type = tool_use.input.get("subagent_type", "") - description = tool_use.input.get("description", "") - escaped_subagent = ( - escape_html(subagent_type) if subagent_type else "" - ) - - if description and subagent_type: - escaped_desc = escape_html(description) - tool_message_title = f"🔧 {escaped_name} {escaped_desc} ({escaped_subagent})" - elif description: - escaped_desc = escape_html(description) - tool_message_title = f"🔧 {escaped_name} {escaped_desc}" - elif subagent_type: - tool_message_title = f"🔧 {escaped_name} ({escaped_subagent})" - else: - tool_message_title = f"🔧 {escaped_name}" - elif tool_use.name in ("Edit", "Write"): - # Use 📝 icon for Edit/Write - if summary: - escaped_summary = escape_html(summary) - tool_message_title = f"📝 {escaped_name} {escaped_summary}" - else: - tool_message_title = f"📝 {escaped_name}" - elif tool_use.name == "Read": - # Use 📄 icon for Read - if summary: - escaped_summary = escape_html(summary) - tool_message_title = f"📄 {escaped_name} {escaped_summary}" - else: - tool_message_title = f"📄 {escaped_name}" - elif summary: - # For other tools (like Bash), append summary - escaped_summary = escape_html(summary) - tool_message_title = f"{escaped_name} {escaped_summary}" - else: - tool_message_title = escaped_name - tool_css_class = "tool_use" + tool_result = _process_tool_use_item(tool_item, tool_use_context) elif isinstance(tool_item, ToolResultContent) or item_type == "tool_result": - # Convert Anthropic type to our format if necessary - if not isinstance(tool_item, ToolResultContent): - tool_result_converted = ToolResultContent( - type="tool_result", - tool_use_id=getattr(tool_item, "tool_use_id", ""), - content=getattr(tool_item, "content", ""), - is_error=getattr(tool_item, "is_error", False), - ) - else: - tool_result_converted = tool_item - - # Get file_path and tool_name from tool_use context for specialized rendering - result_file_path: Optional[str] = None - result_tool_name: Optional[str] = None - if tool_result_converted.tool_use_id in tool_use_context: - tool_use_from_ctx = tool_use_context[ - tool_result_converted.tool_use_id - ] - result_tool_name = tool_use_from_ctx.name - if ( - result_tool_name - in ( - "Read", - "Edit", - "Write", - ) - and "file_path" in tool_use_from_ctx.input - ): - result_file_path = tool_use_from_ctx.input["file_path"] - - tool_content_html = format_tool_result_content( - tool_result_converted, - result_file_path, - result_tool_name, - ) - - # Retroactive deduplication: if Task result matches a sidechain assistant, replace that assistant with a forward link - if result_tool_name == "Task": - # Extract text content from tool result - # Note: tool_result.content can be str or List[Dict[str, Any]] (not List[ContentItem]) - if isinstance(tool_result_converted.content, str): - task_result_content = tool_result_converted.content.strip() - else: - # Handle list of dicts (tool result format) - content_parts: list[str] = [] - for item in tool_result_converted.content: - # tool_result_converted.content is List[Dict[str, Any]] - text_val = item.get("text", "") - if isinstance(text_val, str): - content_parts.append(text_val) - task_result_content = "\n".join(content_parts).strip() - - # Store for deduplication - we'll check/update after we have the message_id - pending_dedup = task_result_content if task_result_content else None - else: - pending_dedup = None - - escaped_id = escape_html(tool_result_converted.tool_use_id) - item_tool_use_id = tool_result_converted.tool_use_id - tool_title_hint = f"ID: {escaped_id}" - # Simplified: no "Tool Result" heading, icon is set by template - tool_message_type = "tool_result" - tool_message_title = "Error" if tool_result_converted.is_error else "" - tool_css_class = ( - "tool_result error" - if tool_result_converted.is_error - else "tool_result" - ) + tool_result = _process_tool_result_item(tool_item, tool_use_context) elif isinstance(tool_item, ThinkingContent) or item_type == "thinking": - # Convert Anthropic type to our format if necessary - if not isinstance(tool_item, ThinkingContent): - thinking_converted = ThinkingContent( - type="thinking", - thinking=getattr(tool_item, "thinking", str(tool_item)), - ) - else: - thinking_converted = tool_item - - tool_content_html = format_thinking_content(thinking_converted) - tool_message_type = "thinking" - tool_message_title = "Thinking" - tool_css_class = "thinking" + tool_result = _process_thinking_item(tool_item) elif isinstance(tool_item, ImageContent) or item_type == "image": - # Convert Anthropic type to our format if necessary - if not isinstance(tool_item, ImageContent): - # For now, skip Anthropic image types - we'll handle when we encounter them - continue - else: - tool_content_html = format_image_content(tool_item) - tool_message_type = "image" - tool_message_title = "Image" - tool_css_class = "image" + tool_result = _process_image_item(tool_item) else: # Handle unknown content types - tool_content_html = ( - f"

Unknown content type: {escape_html(str(type(tool_item)))}

" + tool_result = ToolItemResult( + message_type="unknown", + content=UnknownContent(type_name=str(type(tool_item))), + message_title="Unknown Content", ) - tool_message_type = "unknown" - tool_message_title = "Unknown Content" - tool_css_class = "unknown" + + # Skip if handler returned None (e.g., unsupported image types) + if tool_result is None: + continue # Preserve sidechain context for tool/thinking/image content within sidechain messages tool_is_sidechain = getattr(message, "isSidechain", False) - if tool_is_sidechain: - tool_css_class += " sidechain" + + # Build modifiers directly from tool_result properties + tool_modifiers = MessageModifiers( + is_sidechain=tool_is_sidechain, + is_error=tool_result.is_error, + ) # Generate unique UUID for this tool message # Use tool_use_id if available, otherwise fall back to message UUID + index tool_uuid = ( - item_tool_use_id - if item_tool_use_id + tool_result.tool_use_id + if tool_result.tool_use_id else f"{msg_uuid}-tool-{len(template_messages)}" ) tool_template_message = TemplateMessage( - message_type=tool_message_type, - content_html=tool_content_html, + message_type=tool_result.message_type, formatted_timestamp=tool_formatted_timestamp, - css_class=tool_css_class, raw_timestamp=tool_timestamp, session_summary=session_summary, session_id=session_id, - tool_use_id=item_tool_use_id, - title_hint=tool_title_hint, - message_title=tool_message_title, + tool_use_id=tool_result.tool_use_id, + title_hint=tool_result.title_hint, + message_title=tool_result.message_title, message_id=None, # Will be assigned by _build_message_hierarchy ancestry=[], # Will be assigned by _build_message_hierarchy agent_id=getattr(message, "agentId", None), uuid=tool_uuid, + modifiers=tool_modifiers, + content=tool_result.content, # Structured content model ) # Store raw text for Task result deduplication # (handled later in _reorder_sidechain_template_messages) - if pending_dedup is not None: - tool_template_message.raw_text_content = pending_dedup - pending_dedup = None + if tool_result.pending_dedup is not None: + tool_template_message.raw_text_content = tool_result.pending_dedup template_messages.append(tool_template_message) @@ -4077,21 +2192,55 @@ def _process_messages_loop( [("Markdown", markdown_timings), ("Pygments", pygments_timings)], ) - return ( - template_messages, - sessions, - session_order, + return template_messages + + +# -- Project Index Generation ------------------------------------------------- + + +def prepare_projects_index( + project_summaries: List[Dict[str, Any]], +) -> tuple[List["TemplateProject"], "TemplateSummary"]: + """Prepare project data for rendering in any format. + + Args: + project_summaries: List of project summary dictionaries. + + Returns: + A tuple of (template_projects, template_summary) for use by renderers. + """ + # Sort projects by last modified (most recent first) + sorted_projects = sorted( + project_summaries, key=lambda p: p["last_modified"], reverse=True ) + # Convert to template-friendly format + template_projects = [TemplateProject(project) for project in sorted_projects] + template_summary = TemplateSummary(project_summaries) + + return template_projects, template_summary + -def generate_projects_index_html( +def title_for_projects_index( project_summaries: List[Dict[str, Any]], from_date: Optional[str] = None, to_date: Optional[str] = None, ) -> str: - """Generate an index HTML page listing all projects using Jinja2 templates.""" - # Try to get a better title from working directories in projects + """Generate a title for the projects index page. + + Determines a meaningful title based on working directories from projects, + with optional date range suffix. + + Args: + project_summaries: List of project summary dictionaries. + from_date: Optional start date filter string. + to_date: Optional end date filter string. + + Returns: + A title string for the projects index page. + """ title = "Claude Code Projects" + if project_summaries: # Collect all working directories from all projects all_working_dirs: set[str] = set() @@ -4134,6 +2283,8 @@ def generate_projects_index_html( except Exception: # Fall back to default title if path analysis fails pass + + # Add date range suffix if provided if from_date or to_date: date_range_parts: List[str] = [] if from_date: @@ -4143,23 +2294,77 @@ def generate_projects_index_html( date_range_str = " ".join(date_range_parts) title += f" ({date_range_str})" - # Sort projects by last modified (most recent first) - sorted_projects = sorted( - project_summaries, key=lambda p: p["last_modified"], reverse=True - ) + return title - # Convert to template-friendly format - template_projects = [TemplateProject(project) for project in sorted_projects] - template_summary = TemplateSummary(project_summaries) - # Render template - env = _get_template_environment() - template = env.get_template("index.html") - return str( - template.render( - title=title, - projects=template_projects, - summary=template_summary, - library_version=get_library_version(), - ) - ) +# -- Renderer Classes --------------------------------------------------------- + + +class Renderer: + """Base class for transcript renderers. + + Subclasses implement format-specific rendering (HTML, Markdown, etc.). + """ + + def generate( + self, + messages: List[TranscriptEntry], + title: Optional[str] = None, + combined_transcript_link: Optional[str] = None, + ) -> Optional[str]: + """Generate output from transcript messages. + + Returns None by default; subclasses override to return formatted output. + """ + return None + + def generate_session( + self, + messages: List[TranscriptEntry], + session_id: str, + title: Optional[str] = None, + cache_manager: Optional["CacheManager"] = None, + ) -> Optional[str]: + """Generate output for a single session. + + Returns None by default; subclasses override to return formatted output. + """ + return None + + def generate_projects_index( + self, + project_summaries: List[Dict[str, Any]], + from_date: Optional[str] = None, + to_date: Optional[str] = None, + ) -> Optional[str]: + """Generate a projects index page. + + Returns None by default; subclasses override to return formatted output. + """ + return None + + def is_outdated(self, file_path: Path) -> Optional[bool]: + """Check if a rendered file is outdated. + + Returns None by default; subclasses override to return True/False. + """ + return None + + +def get_renderer(format: str) -> Renderer: + """Get a renderer instance for the specified format. + + Args: + format: The output format (currently only "html" is supported). + + Returns: + A Renderer instance for the specified format. + + Raises: + ValueError: If the format is not supported. + """ + if format == "html": + from .html.renderer import HtmlRenderer + + return HtmlRenderer() + raise ValueError(f"Unsupported format: {format}") diff --git a/claude_code_log/tui.py b/claude_code_log/tui.py index d7f080f1..861a9db0 100644 --- a/claude_code_log/tui.py +++ b/claude_code_log/tui.py @@ -21,7 +21,7 @@ from .cache import CacheManager, SessionCacheData, get_library_version from .converter import ensure_fresh_cache -from .renderer import get_project_display_name +from .utils import get_project_display_name class ProjectSelector(App[Path]): diff --git a/claude_code_log/utils.py b/claude_code_log/utils.py index b146fc3c..82f3a733 100644 --- a/claude_code_log/utils.py +++ b/claude_code_log/utils.py @@ -2,41 +2,92 @@ """Utility functions for message filtering and processing.""" import re -from typing import Dict, List, Union +from datetime import datetime +from pathlib import Path +from typing import Dict, List, Optional, Union from claude_code_log.cache import SessionCacheData from .models import ContentItem, TextContent, TranscriptEntry, UserTranscriptEntry +from .parser import ( + IDE_DIAGNOSTICS_PATTERN, + IDE_OPENED_FILE_PATTERN, + IDE_SELECTION_PATTERN, + is_command_message, + is_local_command_output, + is_system_message, +) -def is_system_message(text_content: str) -> bool: - """Check if a message is a system message that should be filtered out.""" - system_message_patterns = [ - "Caveat: The messages below were generated by the user while running local commands. DO NOT respond to these messages or otherwise consider them in your response unless the user explicitly asks you to.", - "[Request interrupted by user for tool use]", - "", - ] - - return any(text_content.startswith(pattern) for pattern in system_message_patterns) - - -def is_command_message(text_content: str) -> bool: - """Check if a message contains command information that should be displayed.""" - return "" in text_content and "" in text_content +def format_timestamp(timestamp_str: str | None) -> str: + """Format ISO timestamp for display, converting to UTC.""" + if timestamp_str is None: + return "" + try: + dt = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00")) + # Convert to UTC if timezone-aware + if dt.tzinfo is not None: + utc_timetuple = dt.utctimetuple() + dt = datetime( + utc_timetuple.tm_year, + utc_timetuple.tm_mon, + utc_timetuple.tm_mday, + utc_timetuple.tm_hour, + utc_timetuple.tm_min, + utc_timetuple.tm_sec, + ) + return dt.strftime("%Y-%m-%d %H:%M:%S") + except (ValueError, AttributeError): + return timestamp_str + + +def format_timestamp_range(first_timestamp: str, last_timestamp: str) -> str: + """Format timestamp range for display. + Args: + first_timestamp: ISO timestamp for range start + last_timestamp: ISO timestamp for range end -def is_local_command_output(text_content: str) -> bool: - """Check if a message contains local command output.""" - return "" in text_content + Returns: + Formatted string like "2025-01-01 10:00:00 - 2025-01-01 11:00:00" + or single timestamp if both are equal, or empty string if neither provided. + """ + if first_timestamp and last_timestamp: + if first_timestamp == last_timestamp: + return format_timestamp(first_timestamp) + else: + return f"{format_timestamp(first_timestamp)} - {format_timestamp(last_timestamp)}" + elif first_timestamp: + return format_timestamp(first_timestamp) + else: + return "" -def is_bash_input(text_content: str) -> bool: - """Check if a message contains bash input command.""" - return "" in text_content and "" in text_content +def get_project_display_name( + project_dir_name: str, working_directories: Optional[List[str]] = None +) -> str: + """Get the display name for a project based on working directories. + Args: + project_dir_name: The Claude project directory name (e.g., "-Users-dain-workspace-claude-code-log") + working_directories: List of working directories from cache data -def is_bash_output(text_content: str) -> bool: - """Check if a message contains bash command output.""" - return "" in text_content or "" in text_content + Returns: + The project display name (e.g., "claude-code-log") + """ + if working_directories: + # Convert to Path objects with their original indices for tracking recency + paths_with_indices = [(Path(wd), i) for i, wd in enumerate(working_directories)] + + # Sort by: 1) path depth (fewer parts = less nested), 2) recency (lower index = more recent) + # This gives us the least nested path, with ties broken by recency + best_path, _ = min(paths_with_indices, key=lambda p: (len(p[0].parts), p[1])) + return best_path.name + else: + # Fall back to converting project directory name + display_name = project_dir_name + if display_name.startswith("-"): + display_name = display_name[1:].replace("-", "/") + return display_name def should_skip_message(text_content: str) -> bool: @@ -163,17 +214,6 @@ def extract_working_directories( return [path for path, _ in sorted_dirs] -# IDE tag patterns for compact preview rendering (same as renderer.py) -IDE_OPENED_FILE_PATTERN = re.compile( - r"(.*?)", re.DOTALL -) -IDE_SELECTION_PATTERN = re.compile(r"(.*?)", re.DOTALL) -IDE_DIAGNOSTICS_PATTERN = re.compile( - r"\s*(.*?)\s*", - re.DOTALL, -) - - def _compact_ide_tags_for_preview(text_content: str) -> str: """Replace verbose IDE/system tags with compact emoji indicators for previews. @@ -212,57 +252,52 @@ def _extract_file_path(content: str) -> str | None: # Process only LEADING IDE tags - stop when we hit non-IDE content # This prevents replacing tags inside quoted strings/JSONL content + # Uses shared patterns from parser.py for consistency compact_parts: list[str] = [] remaining = text_content + # Compiled pattern for bash-input (not in parser.py as it's preview-specific) + bash_input_pattern = re.compile(r"(.*?)", re.DOTALL) + while remaining: - # Try to match each IDE tag type at the start of remaining text - # Check for at start - match = re.match( - r"^\s*(.*?)", remaining, re.DOTALL - ) + # Strip leading whitespace for matching + stripped = remaining.lstrip() + + # Try to match each IDE tag type at the start of stripped text + # Check for at start (using shared pattern) + match = IDE_OPENED_FILE_PATTERN.match(stripped) if match: content = match.group(1).strip() filepath = _extract_file_path(content) compact_parts.append(f"📎 {filepath}" if filepath else "📎 file") - remaining = remaining[match.end() :] + remaining = stripped[match.end() :] continue - # Check for at start - match = re.match( - r"^\s*(.*?)", remaining, re.DOTALL - ) + # Check for at start (using shared pattern) + match = IDE_SELECTION_PATTERN.match(stripped) if match: content = match.group(1).strip() filepath = _extract_file_path(content) compact_parts.append(f"✂️ {filepath}" if filepath else "✂️ selection") - remaining = remaining[match.end() :] + remaining = stripped[match.end() :] continue - # Check for ... at start - match = re.match( - r"^\s*\s*.*?\s*", - remaining, - re.DOTALL, - ) + # Check for ... (using shared pattern) + match = IDE_DIAGNOSTICS_PATTERN.match(stripped) if match: compact_parts.append("🩺 diagnostics") - remaining = remaining[match.end() :] + remaining = stripped[match.end() :] continue # Check for command at start - match = re.match( - r"^\s*(.*?)", - remaining, - re.DOTALL, - ) + match = bash_input_pattern.match(stripped) if match: command = match.group(1).strip() # Truncate very long commands if len(command) > 50: command = command[:47] + "..." compact_parts.append(f"💻 {command}") - remaining = remaining[match.end() :] + remaining = stripped[match.end() :] continue # No more tags at start - stop processing @@ -279,40 +314,6 @@ def _extract_file_path(content: str) -> str | None: return text_content -def is_warmup_only_session(messages: List[TranscriptEntry], session_id: str) -> bool: - """Check if a session contains only warmup user messages. - - A warmup session is one where ALL user messages are literally just "Warmup". - Sessions with no user messages return False (not considered warmup). - - Args: - messages: List of all transcript entries - session_id: The session ID to check - - Returns: - True if ALL user messages in the session are "Warmup", False otherwise - """ - from .parser import extract_text_content - - user_messages_in_session: List[str] = [] - - for message in messages: - if ( - isinstance(message, UserTranscriptEntry) - and getattr(message, "sessionId", "") == session_id - and hasattr(message, "message") - ): - text_content = extract_text_content(message.message.content).strip() - user_messages_in_session.append(text_content) - - # No user messages = not a warmup session - if not user_messages_in_session: - return False - - # All user messages must be exactly "Warmup" - return all(msg == "Warmup" for msg in user_messages_in_session) - - def get_warmup_session_ids(messages: List[TranscriptEntry]) -> set[str]: """Get set of session IDs that are warmup-only sessions. diff --git a/dev-docs/FOLD_STATE_DIAGRAM.md b/dev-docs/FOLD_STATE_DIAGRAM.md index fd3df528..a8af2c2d 100644 --- a/dev-docs/FOLD_STATE_DIAGRAM.md +++ b/dev-docs/FOLD_STATE_DIAGRAM.md @@ -7,8 +7,9 @@ The virtual parent/child structure of a conversation determines how folding work ``` Session (level 0) └── User message (level 1) - ├── System: Info (level 2) + ├── System: command/error (level 2) └── Assistant response (level 2) + ├── System: info/warning (level 3) ├── Tool: Read ─────────────┐ (level 3) │ └── Tool result ────────┘ paired, fold together └── Tool: Task ─────────────┐ (level 3) @@ -128,3 +129,150 @@ Fold buttons display context-aware tooltips showing what will happen on click (n - **Performance**: Descendant counting is O(n) using cached hierarchy lookups - **Paired messages**: Pairs are counted as single units in child/descendant counts - **Labels**: Fold bars show type-aware labels like "3 assistant, 4 tools" or "2 tool pairs" + +--- + +## Hierarchy System Architecture + +The hierarchy system in `renderer.py` determines message nesting for the fold/unfold UI. +It consists of three main functions: + +### `_get_message_hierarchy_level(css_class, is_sidechain) -> int` + +Determines the hierarchy level for a message based on its CSS class and sidechain status. + +**Level Definitions:** + +| Level | Message Types | Description | +|-------|---------------|-------------| +| 0 | `session-header` | Session dividers | +| 1 | `user` | User messages (top-level conversation) | +| 2 | `assistant`, `thinking`, `system` (commands/errors) | Direct responses to user | +| 3 | `tool_use`, `tool_result`, `system-info`, `system-warning` | Nested under assistant | +| 4 | `assistant sidechain`, `thinking sidechain` | Sub-agent responses (from Task tool) | +| 5 | `tool_use sidechain`, `tool_result sidechain` | Sub-agent tools | + +**Decision Logic:** + +``` +css_class contains? is_sidechain? Result +──────────────────── ────────────── ────── +"user" false Level 1 +"system-info/warning" false Level 3 +"system" false Level 2 +"assistant/thinking" true Level 4 +"tool" true Level 5 +"assistant/thinking" false Level 2 +"tool" false Level 3 +(default) - Level 1 +``` + +**Edge Cases:** +- Sidechain user messages are skipped entirely (they duplicate Task tool input) +- `system-info` and `system-warning` are at level 3 (tool-related notifications) +- `system` (commands/errors) without info/warning are at level 2 + +### `_build_message_hierarchy(messages) -> None` + +Builds `message_id` and `ancestry` for all messages using a stack-based approach. + +**Algorithm:** + +1. Maintain a stack of `(level, message_id)` tuples +2. For each message: + - Determine level via `_get_message_hierarchy_level()` + - Pop stack until finding appropriate parent (level < current) + - Build ancestry from remaining stack entries + - Push current message onto stack +3. Session headers use `session-{uuid}` format for navigation +4. Other messages use `d-{counter}` format + +**Ancestry Example:** + +``` +Session (session-abc) ancestry: [] +└── User (d-0) ancestry: ["session-abc"] + └── Assistant (d-1) ancestry: ["session-abc", "d-0"] + └── Tool use (d-2) ancestry: ["session-abc", "d-0", "d-1"] + └── Tool result (d-3) ancestry: ["session-abc", "d-0", "d-1", "d-2"] +``` + +**Important:** This function must be called after all reordering operations (pair reordering, +sidechain reordering) to ensure hierarchy reflects final display order. + +### `_mark_messages_with_children(messages) -> None` + +Calculates descendant counts for fold bar labels. + +**Computed Fields:** + +| Field | Description | +|-------|-------------| +| `has_children` | True if message has any children | +| `immediate_children_count` | Count of direct children only | +| `total_descendants_count` | Count of all descendants recursively | +| `immediate_children_by_type` | Dict mapping css_class to count | +| `total_descendants_by_type` | Dict mapping css_class to count | + +**Algorithm:** + +1. Build O(1) lookup index of messages by ID +2. For each message with ancestry: + - Skip `pair_last` messages (pairs count as one unit) + - Increment immediate parent's `immediate_children_count` + - Increment all ancestors' `total_descendants_count` + - Track counts by message type for detailed labels + +**Time Complexity:** O(n) where n is message count + +### JavaScript Fold Controls Interaction + +The JavaScript in `templates/components/fold_bar.html` uses these computed values: + +1. **Ancestry classes**: Each message has `d-{n}` classes from ancestry for CSS targeting +2. **Child counts**: Displayed in fold bar buttons ("▶ 3 messages") +3. **Descendant counts**: Displayed in fold-all button ("▶▶ 125 total") +4. **Type counts**: Used for descriptive labels ("2 assistant, 4 tools") + +**Visibility Control:** + +```javascript +// Toggle immediate children visibility +document.querySelectorAll(`.d-${messageId}`).forEach(child => { + child.classList.toggle('filtered-hidden'); +}); + +// Toggle all descendants visibility +ancestry.forEach(ancestorId => { + document.querySelectorAll(`.d-${ancestorId}`).forEach(child => { + child.classList.toggle('filtered-hidden'); + }); +}); +``` + +### Sidechain (Sub-agent) Handling + +Messages from Task tool sub-agents are handled specially: + +1. **Identification**: `isSidechain: true` in JSONL → `sidechain` in css_class +2. **Level assignment**: Sidechain assistant/thinking at level 4, tools at level 5 +3. **Reordering**: Sidechain messages appear under their Task tool result +4. **Skipping**: Sidechain user messages are skipped (duplicate Task input) +5. **Deduplication**: Identical sidechain results are replaced with links + +### Paired Message Handling + +Paired messages (tool_use + tool_result, thinking + assistant) are handled as units: + +1. **Pairing**: `_identify_message_pairs()` links messages via `tool_use_id` +2. **Counting**: Only `pair_first` messages count toward parent's children +3. **Folding**: Both messages fold/unfold together +4. **Display**: Pair duration shown on `pair_last` message + +--- + +## References + +- [renderer.py](../claude_code_log/renderer.py) - Hierarchy functions (lines 2698-2850) +- [templates/components/fold_bar.html](../claude_code_log/templates/components/fold_bar.html) - JavaScript controls +- [css-classes.md](css-classes.md) - CSS class documentation diff --git a/dev-docs/MESSAGE_REFACTORING.md b/dev-docs/MESSAGE_REFACTORING.md new file mode 100755 index 00000000..230f5856 --- /dev/null +++ b/dev-docs/MESSAGE_REFACTORING.md @@ -0,0 +1,520 @@ +# Message Rendering Refactoring Plan + +This document tracks the ongoing refactoring effort to improve the message rendering code in `renderer.py`. + +## Current State (dev/message-tree-refactoring) + +As of December 2025, significant refactoring has been completed. The architecture now separates format-neutral message processing from HTML-specific rendering: + +| Module | Lines | Notes | +|--------|-------|-------| +| `renderer.py` | 2525 | Format-neutral: tree building, pairing, hierarchy | +| `html/renderer.py` | 297 | HtmlRenderer: tree traversal, template rendering | +| `html/tool_formatters.py` | 950 | Tool use/result HTML formatting | +| `html/user_formatters.py` | 326 | User message HTML formatting | +| `html/assistant_formatters.py` | 90 | Assistant/thinking HTML formatting | +| `html/system_formatters.py` | 113 | System message HTML formatting | +| `html/utils.py` | 352 | Shared HTML utilities (markdown, escape, etc.) | +| `html/ansi_colors.py` | 261 | ANSI → HTML conversion | +| `models.py` | 858 | Content models, MessageModifiers | + +**Key architectural changes:** +- **Tree-first architecture** - `generate_template_messages()` returns tree roots, HtmlRenderer flattens via pre-order traversal +- **Format-neutral Renderer base class** - Subclasses (HtmlRenderer) implement format-specific rendering +- **Content models in models.py** - SessionHeaderContent, DedupNoticeContent, IdeNotificationContent, etc. +- **Formatter separation** - HTML formatters split by message type in `html/` directory + +## Motivation + +The refactoring aims to: + +1. **Improve maintainability** - Functions are too large (some 600+ lines) +2. **Better separation of concerns** - Move specialized utilities to dedicated modules +3. **Improve type safety** - Use typed objects instead of generic dictionaries +4. **Enable testing** - Large functions are difficult to unit test +5. **Performance profiling** - Timing instrumentation to identify bottlenecks + +## Related Refactoring Branches + +### dev/message-tree-refactoring (Current Branch) + +This branch implements tree-based message rendering. See [TEMPLATE_MESSAGE_CHILDREN.md](TEMPLATE_MESSAGE_CHILDREN.md) for details. + +**Completed Work:** +- ✅ Phase 1: Added `children: List[TemplateMessage]` field to TemplateMessage +- ✅ Phase 1: Added `flatten()` and `flatten_all()` methods for backward compatibility +- ✅ Phase 2: Implemented `_build_message_tree()` function +- ✅ **Phase 2.5: Tree-first architecture** (December 2025) + - `generate_template_messages()` now returns tree roots, not flat list + - `HtmlRenderer._flatten_preorder()` traverses tree, formats content, builds flat list + - Content formatting happens during pre-order traversal (single pass) + - Template unchanged - still receives flat list (Phase 3 future work) + +**Architecture:** +``` +TranscriptEntry[] → generate_template_messages() → root_messages (tree) + ↓ + HtmlRenderer._flatten_preorder() → flat_list + ↓ + template.render(messages=flat_list) +``` + +**Integration with this refactoring:** +- Tree structure enables future **recursive template rendering** (Phase 3 in TEMPLATE_MESSAGE_CHILDREN.md) +- Provides foundation for **Visitor pattern** output formats (HTML, Markdown, JSON) +- Format-neutral `Renderer` base class allows alternative renderer implementations + +### golergka's text-output-format Branch (ada7ef5) + +Adds text/markdown/chat output formats via new `content_extractor.py` module. + +**Key Changes:** +- Created `content_extractor.py` with dataclasses: `ExtractedText`, `ExtractedThinking`, `ExtractedToolUse`, `ExtractedToolResult`, `ExtractedImage` +- Refactored `render_message_content()` to use extraction layer (~70 lines changed) +- Added `text_renderer.py` for text-based output (426 lines) +- CLI `--format` option: html, text, markdown, chat + +**Relationship to This Refactoring:** + +| Aspect | golergka's Approach | This Refactoring | +|--------|---------------------|------------------| +| Focus | Multi-format output | Code organization | +| Data layer | ContentItem → ExtractedContent | TemplateMessage tree | +| Presentation | Separate renderers per format | Modular HTML renderer | +| Compatibility | Parallel to HTML | Refactor existing HTML | + +**Integration Assessment:** +- **Complementary**: golergka's extraction layer operates at ContentItem level, this refactoring at TemplateMessage level +- **Low conflict**: `content_extractor.py` is a new module, doesn't touch hierarchy/pairing code +- **Synergy opportunity**: Text renderer could benefit from tree structure for nested output +- **Risk**: `render_message_content()` changes in golergka's PR conflict with local changes + +**Recommendation:** Consider integrating golergka's work **after** completing Phase 3 (ANSI extraction) and Phase 4 (Tool formatters extraction). The content extraction layer is useful for multi-format support, but is tangential to the core refactoring goals of reducing renderer.py complexity. + +## Completed Phases + +### Phase 1: Timing Infrastructure (Commits: 56b2807, 8426f39) + +**Goal**: Centralize timing utilities and standardize timing instrumentation patterns + +**Changes**: +- ✅ Extracted timing utilities to `renderer_timings.py` module +- ✅ Moved `DEBUG_TIMING` environment variable handling to timing module +- ✅ Standardized `log_timing` context manager pattern - work goes INSIDE the `with` block +- ✅ Added support for dynamic phase names using lambda expressions +- ✅ Removed top-level `os` import from renderer.py (no longer needed) + +**Benefits**: +- All timing-related code centralized in one module +- Consistent timing instrumentation throughout renderer +- Easy to enable/disable timing with `CLAUDE_CODE_LOG_DEBUG_TIMING` environment variable +- Better insight into rendering performance + +### Phase 2: Tool Use Context Optimization (Commit: 56b2807) + +**Goal**: Simplify tool use context management and eliminate unnecessary pre-processing + +**Analysis**: +- `tool_use_context` was only used when processing tool results +- The "prompt" member stored for Task tools wasn't actually used in lookups +- Tool uses always appear before tool results chronologically +- No need for separate pre-processing pass + +**Changes**: +- ✅ Removed `_define_tool_use_context()` function (68 lines eliminated) +- ✅ Changed `tool_use_context` from `Dict[str, Dict[str, Any]]` to `Dict[str, ToolUseContent]` +- ✅ Build index inline when creating ToolUseContent objects during message processing +- ✅ Use attribute access instead of dictionary access for better type safety +- ✅ Replaced dead code in `render_message_content` with warnings + +**Benefits**: +- Eliminated entire pre-processing pass through messages +- Better type safety with ToolUseContent objects +- Cleaner code with inline index building +- ~70 lines of code removed + +### Phase 3: ANSI Color Module Extraction ✅ COMPLETE + +**Goal**: Extract ANSI color conversion to dedicated module + +**Changes**: +- ✅ Created `claude_code_log/ansi_colors.py` (261 lines) +- ✅ Moved `_convert_ansi_to_html()` → `convert_ansi_to_html()` +- ✅ Updated imports in `renderer.py` +- ✅ Updated test imports in `test_ansi_colors.py` + +**Result**: 242 lines removed from renderer.py (4246 → 4004) + +### Phase 4: Code Rendering Module Extraction ✅ COMPLETE + +**Goal**: Extract code-related rendering (Pygments highlighting, diff rendering) to dedicated module + +**Changes**: +- ✅ Created `claude_code_log/renderer_code.py` (330 lines) +- ✅ Moved `_highlight_code_with_pygments()` → `highlight_code_with_pygments()` +- ✅ Moved `_truncate_highlighted_preview()` → `truncate_highlighted_preview()` +- ✅ Moved `_render_single_diff()` → `render_single_diff()` +- ✅ Moved `_render_line_diff()` → `render_line_diff()` +- ✅ Updated imports in `renderer.py` +- ✅ Updated test imports in `test_preview_truncation.py` +- ✅ Removed unused Pygments imports from renderer.py + +**Result**: 274 lines removed from renderer.py (4004 → 3730) + +**Note**: The original Phase 4 plan targeted tool formatters (~600 lines), but due to tight coupling with `escape_html`, `render_markdown`, and other utilities, we extracted a cleaner subset: code highlighting and diff rendering. The remaining tool formatters could be extracted in a future phase once the shared utilities are better factored. + +### Phase 5: Message Processing Decomposition ✅ PARTIAL + +**Goal**: Break down the 687-line `_process_messages_loop()` into smaller functions + +**Changes**: +- ✅ Created `_process_system_message()` function (~88 lines) - handles hook summaries, commands, system messages +- ✅ Created `ToolItemResult` dataclass for structured tool processing results +- ✅ Created `_process_tool_use_item()` function (~84 lines) - handles tool_use content items +- ✅ Created `_process_tool_result_item()` function (~71 lines) - handles tool_result content items +- ✅ Created `_process_thinking_item()` function (~21 lines) - handles thinking content +- ✅ Created `_process_image_item()` function (~17 lines) - handles image content +- ✅ Replaced ~220 lines of nested conditionals with clean dispatcher pattern + +**Result**: `_process_messages_loop()` reduced from ~687 to ~460 lines (33% smaller) + +**Note**: File size increased slightly (3730 → 3814 lines) due to new helper functions, but the main loop is now much more maintainable with focused, testable helper functions. Further decomposition (session tracking, token usage extraction) could reduce it to ~200 lines but would require more complex parameter passing. + +### Phase 6: Message Pairing Simplification ✅ COMPLETE + +**Goal**: Simplify the complex pairing logic in `_identify_message_pairs()` + +**Changes**: +- ✅ Created `PairingIndices` dataclass to hold all lookup indices in one place +- ✅ Extracted `_build_pairing_indices()` function (~35 lines) - builds all indices in single pass +- ✅ Extracted `_mark_pair()` utility (~8 lines) - marks first/last message pairing +- ✅ Extracted `_try_pair_adjacent()` function (~25 lines) - handles adjacent message pairs +- ✅ Extracted `_try_pair_by_index()` function (~30 lines) - handles index-based pairing +- ✅ Simplified `_identify_message_pairs()` from ~120 lines to ~37 lines (69% smaller) + +**Result**: Pairing logic decomposed into focused helpers with clear responsibilities: +- `_build_pairing_indices()`: O(n) index building for tool_use, tool_result, uuid, slash_command lookups +- `_try_pair_adjacent()`: Handles system+slash, command+output, tool_use+result adjacent pairs +- `_try_pair_by_index()`: Handles index-based pairing for non-adjacent messages + +**Note**: File size increased slightly (3814 → 3853 lines) due to new helper functions, but the main pairing function is now much cleaner and each helper is independently testable. + +## Planned Future Phases + +### Phase 7: Message Type Documentation ✅ COMPLETE + +**Goal**: Document message types and CSS classes comprehensively + +**Completed Work**: +- ✅ Created comprehensive [css-classes.md](css-classes.md) with: + - Complete CSS class combinations (19 semantic patterns) + - CSS rule support status (24 full, 7 partial, 1 none) + - Pairing behavior documentation (pair_first/pair_last rules) + - Fold-bar support analysis +- ✅ Updated [messages.md](messages.md) with: + - Complete css_class trait mapping table + - Pairing patterns and rules by type + - Full tool table (16 tools with model info) + - Cross-references to css-classes.md + +### Phase 8: Testing Infrastructure ✅ COMPLETE + +**Goal**: Improve test coverage for refactored modules + +**Completed Work**: +- ✅ Created `test/test_phase8_message_variants.py` with tests for: + - Slash command rendering (`isMeta=True` flag) + - Queue operations skip behavior (enqueue/dequeue not rendered) + - CSS class modifiers composition (`error`, `sidechain`, combinations) + - Deduplication with modifiers +- ✅ Created `test/test_renderer.py` with edge case tests for: + - System message handling + - Write and Edit tool rendering +- ✅ Created `test/test_renderer_code.py` with tests for: + - Pygments highlighting (pattern matching, unknown extensions, ClassNotFound) + - Truncated highlighted preview + - Diff rendering edge cases (consecutive removals, hint line skipping) +- ✅ Simplified CSS by removing redundant `paired-message` class +- ✅ Updated snapshot tests and documentation + +**Test Files Added**: +- [test/test_phase8_message_variants.py](../test/test_phase8_message_variants.py) - Message type variants +- [test/test_renderer.py](../test/test_renderer.py) - Renderer edge cases +- [test/test_renderer_code.py](../test/test_renderer_code.py) - Code highlighting/diff tests + +**Coverage Notes**: +- Some lines in `renderer_code.py` (116-118, 319) are unreachable due to algorithm behavior +- Pygments `ClassNotFound` exception path covered via mock testing + +### Phase 9: Type Safety Improvements ✅ COMPLETE + +**Goal**: Replace string-based type checking with enums and typed structures + +**Completed Work**: +- ✅ Added `MessageType(str, Enum)` in `models.py` with all message types +- ✅ Added type guards for TranscriptEntry union narrowing (available for future use) +- ✅ Updated `renderer.py` to use `MessageType` enum for key comparisons +- ✅ Maintained backward compatibility via `str` base class + +**MessageType Enum Values**: +- JSONL entry types: `USER`, `ASSISTANT`, `SYSTEM`, `SUMMARY`, `QUEUE_OPERATION` +- Rendering types: `TOOL_USE`, `TOOL_RESULT`, `THINKING`, `IMAGE`, `BASH_INPUT`, `BASH_OUTPUT`, `SESSION_HEADER`, `UNKNOWN` +- System subtypes: `SYSTEM_INFO`, `SYSTEM_WARNING`, `SYSTEM_ERROR` + +**Type Guards Added**: +- `is_user_entry()`, `is_assistant_entry()`, `is_system_entry()`, `is_summary_entry()`, `is_queue_operation_entry()` +- `is_tool_use_content()`, `is_tool_result_content()`, `is_thinking_content()`, `is_image_content()`, `is_text_content()` + +**Note**: MessageModifiers dataclass deferred - existing boolean flags work well for now + +### Phase 10: Parser Simplification ✅ COMPLETE + +**Goal**: Simplify `extract_text_content()` using isinstance checks + +**Completed Work**: +- ✅ Added imports for Anthropic SDK types: `TextBlock`, `ThinkingBlock` +- ✅ Simplified `extract_text_content()` with clean isinstance checks +- ✅ Removed defensive `hasattr`/`getattr` patterns +- ✅ 23% code reduction (17 lines → 13 lines) + +**Before** (defensive pattern): +```python +if hasattr(item, "type") and getattr(item, "type") == "text": + text = getattr(item, "text", "") + if text: + text_parts.append(text) +``` + +**After** (clean isinstance): +```python +if isinstance(item, (TextContent, TextBlock)): + text_parts.append(item.text) +elif isinstance(item, (ThinkingContent, ThinkingBlock)): + continue +``` + +**Testing Evidence**: All 431 tests pass with simplified version +**Risk**: Low - maintains same behavior, fully tested + +### Phase 11: Tool Model Enhancement ✅ COMPLETE + +**Goal**: Add typed models for tool inputs (currently all generic `Dict[str, Any]`) + +**Completed Work**: +- ✅ Added 9 typed input models to `models.py`: + - `BashInput`, `ReadInput`, `WriteInput`, `EditInput`, `MultiEditInput` + - `GlobInput`, `GrepInput`, `TaskInput`, `TodoWriteInput` +- ✅ Created `ToolInput` union type for type-safe tool input handling +- ✅ Added `TOOL_INPUT_MODELS` mapping for tool name → model class lookup +- ✅ Added `parse_tool_input()` helper function with fallback to raw dict + +**Typed Input Models Added**: +```python +class BashInput(BaseModel): + command: str + description: Optional[str] = None + timeout: Optional[int] = None + run_in_background: Optional[bool] = None + dangerouslyDisableSandbox: Optional[bool] = None + +class ReadInput(BaseModel): + file_path: str + offset: Optional[int] = None + limit: Optional[int] = None + +class EditInput(BaseModel): + file_path: str + old_string: str + new_string: str + replace_all: Optional[bool] = None +``` + +**Note**: The `ToolUseContent.input` field remains `Dict[str, Any]` for backward compatibility. +The new typed models are available for optional use via `parse_tool_input()`. Existing +code continues to work unchanged with dictionary access. + +**Independence from Phase 12**: Phase 11 and Phase 12 are independent improvements. +Phase 12 focuses on architectural decomposition (splitting renderer.py into format-neutral +and format-specific modules), while Phase 11 provides typed tool input models as an +optional type-safety enhancement. The typed models can be adopted incrementally by any +code that wants to use them, independent of the format-neutral refactoring. + +### Phase 12: Renderer Decomposition - Format Neutral ✅ COMPLETE + +**Goal**: Separate format-neutral logic from HTML-specific generation + +**Achieved Architecture** (December 2025): +``` +renderer.py (2525 lines) - Format-neutral +├── generate_template_messages() → returns tree roots +├── Renderer base class (subclassed by HtmlRenderer) +├── TemplateMessage, TemplateProject, TemplateSummary classes +├── Message processing loop with content model creation +├── Pairing & hierarchy logic +└── Deduplication + +html/ directory - HTML-specific +├── renderer.py (297 lines) - HtmlRenderer class +│ ├── _flatten_preorder() - tree traversal + formatting +│ ├── _format_message_content() - dispatches to formatters +│ └── generate(), generate_session() - template rendering +├── tool_formatters.py (950 lines) - Tool use/result formatters +├── user_formatters.py (326 lines) - User message formatters +├── assistant_formatters.py (90 lines) - Assistant/thinking formatters +├── system_formatters.py (113 lines) - System message formatters +├── utils.py (352 lines) - Markdown, escape, collapsibles +└── ansi_colors.py (261 lines) - ANSI → HTML conversion + +models.py (858 lines) - Content models +├── MessageContent base class and subclasses +├── SessionHeaderContent, DedupNoticeContent (renderer content) +├── IdeNotificationContent, UserTextContent (user content) +├── ReadOutput, EditOutput, etc. (tool output models) +└── MessageModifiers dataclass +``` + +**Implementation Steps** (completed differently than original plan): + +| Step | Description | Status | +|------|-------------|--------| +| 1-5 | Initial HTML extraction | ✅ Complete | +| 6 | Split tool formatters (two-stage: parse + render) | ✅ Done via content models in models.py | +| 7 | Split message content renderers | ✅ Done via html/{user,assistant,system,tool}_formatters.py | +| 8 | Split _process_* message functions | ✅ Content models created during processing | +| 9 | Move generate_projects_index_html | ⏸️ Still in renderer.py (format-neutral prep + HTML) | +| 10-11 | Final organization | ✅ Complete | + +**Steps 6-8 Resolution**: +The original plan called for two-stage (parse + render) splits. This was achieved differently: +- **Content models** in `models.py` capture parsed data (SessionHeaderContent, IdeNotificationContent, ReadOutput, etc.) +- **Format-neutral processing** in `renderer.py` creates content models during message processing +- **HTML formatters** in `html/*.py` render content models to HTML +- **Tree-first architecture** means HtmlRenderer traverses tree and formats during pre-order walk + +**Step 9 Status**: +`generate_projects_index_html()` remains in renderer.py because: +- Mixes format-neutral data preparation (TemplateProject/TemplateSummary) with HTML generation +- Moving just the HTML part would require restructuring the data flow +- Low priority: function works correctly and is ~100 lines + +**Dependencies**: +- Requires Phase 9 (type safety) for clean interfaces ✅ +- Benefits from Phase 10 (parser simplification) ✅ +- Tree-first architecture (TEMPLATE_MESSAGE_CHILDREN.md Phase 2.5) ✅ +- Enables golergka's multi-format integration + +**Risk**: High - requires careful refactoring +**Status**: ✅ COMPLETE + +## Recommended Execution Order + +For maximum impact with minimum risk: + +### Completed +1. ✅ **Phase 3 (ANSI)** - Low risk, self-contained, immediate ~250 line reduction +2. ✅ **Phase 4 (Code rendering)** - Medium risk, ~274 line reduction, clear boundaries +3. ✅ **Phase 5 (Processing)** - High impact, main loop 33% smaller +4. ✅ **Phase 6 (Pairing)** - Pairing function 69% smaller, clear helpers +5. ✅ **Phase 7 (Documentation)** - Complete CSS/message docs +6. ✅ **Phase 8 (Testing)** - Coverage gap tests, message variant tests, CSS simplification +7. ✅ **Phase 9 (Type Safety)** - MessageType enum and type guards added + +### Next Steps +8. ✅ **Phase 10 (Parser)** - Simplified extract_text_content() with isinstance checks +9. ✅ **Phase 11 (Tool Models)** - Added typed input models for 9 common tools +10. ✅ **Phase 12 (Format Neutral)** - HTML formatters in `html/` directory, content models in models.py +11. ✅ **Tree-first architecture** - `generate_template_messages()` returns tree roots (TEMPLATE_MESSAGE_CHILDREN.md Phase 2.5) + +**Current Status (December 2025):** +- All planned phases complete +- renderer.py reduced from 4246 to 2525 lines (41% reduction) +- Clean separation: format-neutral in renderer.py, HTML-specific in html/ directory +- Tree-first architecture enables future recursive template rendering + +**Future Work:** +- **Recursive templates** (TEMPLATE_MESSAGE_CHILDREN.md Phase 3): Pass tree roots directly to template with recursive macro +- **Alternative renderers**: Text/markdown renderer using Renderer base class +- **golergka integration**: Content models and tree structure ready for multi-format output + +## Metrics to Track + +| Metric | Baseline (v0.9) | Current (Dec 2025) | Target | +|--------|-----------------|-------------------|--------| +| renderer.py lines | 4246 | 2525 | ✅ <3000 | +| html/ directory | - | 2389 total | - | +| models.py lines | ~400 | 858 | - | +| Largest function | ~687 lines | ~300 lines | <100 lines | +| `_identify_message_pairs()` | ~120 lines | ~37 lines | ✅ | +| Typed tool input models | 0 | 9 | ✅ | +| Content models | 0 | 15+ | - | +| Module count | 3 | 11 | - | +| Test coverage | ~78% | ~78% | >85% | + +**html/ directory breakdown:** +- renderer.py: 297 lines (HtmlRenderer) +- tool_formatters.py: 950 lines +- user_formatters.py: 326 lines +- utils.py: 352 lines +- ansi_colors.py: 261 lines +- assistant_formatters.py: 90 lines +- system_formatters.py: 113 lines + +**Progress Summary**: +- renderer.py reduced by 41% (4246 → 2525 lines) +- Format-neutral/HTML separation complete +- Tree-first architecture implemented +- Content models moved to models.py +- HTML formatters organized by message type in html/ directory + +## Quality Gates + +Before merging any phase: + +- [ ] `just test-all` passes +- [ ] `uv run pyright` passes with 0 errors +- [ ] `ruff check` passes +- [ ] Snapshot tests unchanged (or intentionally updated) +- [ ] No performance regression (check with `CLAUDE_CODE_LOG_DEBUG_TIMING=1`) + +## Notes + +- All changes should maintain backward compatibility +- Each phase should be committed separately for easy review +- Consider feature flags for large changes during development +- Run against real Claude projects to verify visual correctness + +## References + +### Code Modules - Format Neutral +- [renderer.py](../claude_code_log/renderer.py) - Format-neutral rendering (2525 lines) +- [models.py](../claude_code_log/models.py) - Content models, MessageModifiers, type guards (858 lines) +- [renderer_code.py](../claude_code_log/renderer_code.py) - Code highlighting & diffs (330 lines) +- [renderer_timings.py](../claude_code_log/renderer_timings.py) - Timing utilities +- [parser.py](../claude_code_log/parser.py) - JSONL parsing + +### Code Modules - HTML Specific (html/ directory) +- [html/renderer.py](../claude_code_log/html/renderer.py) - HtmlRenderer class (297 lines) +- [html/tool_formatters.py](../claude_code_log/html/tool_formatters.py) - Tool HTML formatters (950 lines) +- [html/user_formatters.py](../claude_code_log/html/user_formatters.py) - User message formatters (326 lines) +- [html/assistant_formatters.py](../claude_code_log/html/assistant_formatters.py) - Assistant/thinking formatters (90 lines) +- [html/system_formatters.py](../claude_code_log/html/system_formatters.py) - System message formatters (113 lines) +- [html/utils.py](../claude_code_log/html/utils.py) - Markdown, escape, collapsibles (352 lines) +- [html/ansi_colors.py](../claude_code_log/html/ansi_colors.py) - ANSI color conversion (261 lines) + +### Documentation +- [css-classes.md](css-classes.md) - Complete CSS class reference with support status +- [messages.md](messages.md) - Message types, content models, tool documentation +- [FOLD_STATE_DIAGRAM.md](FOLD_STATE_DIAGRAM.md) - Fold system documentation +- [TEMPLATE_MESSAGE_CHILDREN.md](TEMPLATE_MESSAGE_CHILDREN.md) - Tree architecture (Phase 2.5 complete) + +### Tests +- [test/test_ansi_colors.py](../test/test_ansi_colors.py) - ANSI tests +- [test/test_preview_truncation.py](../test/test_preview_truncation.py) - Code preview tests +- [test/test_sidechain_agents.py](../test/test_sidechain_agents.py) - Integration tests +- [test/test_template_data.py](../test/test_template_data.py) - Tree building tests (TestTemplateMessageTree) +- [test/test_phase8_message_variants.py](../test/test_phase8_message_variants.py) - Message variants +- [test/test_renderer.py](../test/test_renderer.py) - Renderer edge cases +- [test/test_renderer_code.py](../test/test_renderer_code.py) - Code highlighting/diff tests + +### External +- golergka's branch: `remotes/golergka/feat/text-output-format` (commit ada7ef5) diff --git a/dev-docs/TEMPLATE_MESSAGE_CHILDREN.md b/dev-docs/TEMPLATE_MESSAGE_CHILDREN.md new file mode 100644 index 00000000..b1acf510 --- /dev/null +++ b/dev-docs/TEMPLATE_MESSAGE_CHILDREN.md @@ -0,0 +1,137 @@ +# Template Message Children Architecture + +This document tracks the exploration of a children-based architecture for `TemplateMessage`, where messages can have nested children to form an explicit tree structure. + +## Current Architecture (2025-12-13) + +### Data Flow +``` +TranscriptEntry[] → generate_template_messages() → root_messages (tree) + ↓ + HtmlRenderer._flatten_preorder() → flat_list + ↓ + template.render(messages=flat_list) +``` + +### TemplateMessage (current) +- `generate_template_messages()` returns **tree roots** (typically session headers) +- Each message has `children: List[TemplateMessage]` populated +- `ancestry` field preserved for CSS classes / JavaScript fold/unfold +- HtmlRenderer flattens via pre-order traversal before template rendering + +### Hierarchy Levels +``` +Level 0: Tree roots (messages without ancestry - typically session headers) +Level 1: User messages +Level 2: Assistant, System, Thinking +Level 3: Tool use/result +Level 4: Sidechain assistant/thinking +Level 5: Sidechain tools +``` + +**Note:** Tree roots are any messages with empty `ancestry`. This is typically session headers, but in degenerate cases (no session headers), user messages or other top-level messages become roots. + +### Template Rendering (current) +- Single `{% for message in messages %}` loop over flattened list +- Ancestry rendered as CSS classes for JavaScript DOM queries +- Fold/unfold uses `document.querySelectorAll('.message.${targetId}')` +- Tree structure used internally but template still receives flat list + +## Future: Recursive Template Rendering + +The next step would be to pass tree roots directly to the template and use a recursive macro, eliminating the flatten step. + +### Template Rendering (future) +Recursive macro approach (Note: html_content is now passed separately, not stored in message): +```jinja2 +{% macro render_message(message, html_content, depth=0) %} +
+
{{ html_content | safe }}
+ {% if message.children %} +
+ {% for child, child_html in message.children_with_html %} + {{ render_message(child, child_html, depth + 1) }} + {% endfor %} +
+ {% endif %} +
+{% endmacro %} + +{% for root, root_html in roots_with_html %} +{{ render_message(root, root_html) }} +{% endfor %} +``` + +### JavaScript Simplification (future) +With nested DOM structure, fold/unfold becomes trivial: +```javascript +// Hide all children +messageEl.querySelector('.children').style.display = 'none'; +// Show children +messageEl.querySelector('.children').style.display = ''; +``` + +This would require updating the fold/unfold JavaScript to work with the nested structure rather than CSS class queries. + +## Exploration Log + +### Phase 1: Foundation ✅ COMPLETE +- [x] Add `children` field to TemplateMessage (commit `7077f68`) +- [x] Keep existing flat-list behavior working +- [x] Add `flatten()` method for backward compatibility (commit `ed4d7b3`) + - Instance method `flatten()` returns self + all descendants in depth-first order + - Static method `flatten_all()` flattens list of root messages + - Unit tests in `test/test_template_data.py::TestTemplateMessageTree` + +### Phase 2: Tree Building ✅ COMPLETE +- [x] Create `_build_message_tree()` function (commit `83fcf31`) + - Takes flat list with `message_id` and `ancestry` already set + - Populates `children` field based on ancestry + - Returns list of root messages (those with empty ancestry) +- [x] Called after `_mark_messages_with_children()` in render pipeline +- [x] Integration tests verify tree building doesn't break HTML generation + +### Phase 2.5: Tree-First Architecture ✅ COMPLETE (2025-12-13) +- [x] `generate_template_messages()` now returns tree roots, not flat list (commit `c5048b9`) +- [x] `HtmlRenderer._flatten_preorder()` traverses tree, formats content, builds flat list +- [x] Content formatting happens during pre-order traversal (no separate pass) +- [x] Template unchanged - still receives flat list + +**Key insight:** The flat list was being passed to template AND the same messages had children populated. This caused confusion about which structure was authoritative. Now the tree is authoritative and the flat list is derived. + +### Phase 3: Template Migration (TODO - Future Work) +- [ ] Create recursive render macro +- [ ] Update DOM structure to use nested `.children` divs +- [ ] Migrate JavaScript fold/unfold to use nested DOM +- [ ] Pass `root_messages` directly to template (eliminate flatten step) + +### Challenges & Notes + +**Current State (2025-12-13):** +- Tree is the primary structure returned from `generate_template_messages()` +- HtmlRenderer flattens via pre-order traversal for template rendering +- This is cleaner than before: tree in → flat list out (explicit transformation) + +**Performance (2025-12-13):** +- Benchmark: 3.35s for 3917 messages across 5 projects +- Pre-order traversal + formatting is O(n) +- No caching needed - each message formatted exactly once + +**Why Keep Flat Template (for now):** +1. JavaScript fold/unfold relies on CSS class queries +2. Changing DOM structure requires JS migration +3. Current approach works correctly + +## Related Work + +### golergka's text-output-format PR +Created `content_extractor.py` for shared content parsing: +- Separates data extraction from presentation +- Dataclasses for extracted content: `ExtractedText`, `ExtractedToolUse`, etc. +- Could be extended for the tree-building approach + +### Visitor Pattern Consideration +For multi-format output (HTML, Markdown, JSON), consider: +- TemplateMessage as a tree data structure (no rendering logic) +- Visitor implementations for each output format +- Preparation in converter.py before any rendering diff --git a/dev-docs/css-classes.md b/dev-docs/css-classes.md new file mode 100755 index 00000000..1001b78c --- /dev/null +++ b/dev-docs/css-classes.md @@ -0,0 +1,286 @@ +# CSS Classes for Message Types + +This document provides a comprehensive reference for CSS class combinations used in Claude Code Log HTML output, their CSS rule support status, and pairing behavior. + +**Generated from analysis of:** 29 session HTML files (3,244 message elements) +**Last updated:** 2025-12-07 + +--- + +## Quick Reference + +### Support Status Legend + +| Status | Meaning | +|--------|---------| +| ✅ Full | Has dedicated CSS selectors for this combination | +| ⚠️ Partial | Inherits from parent selectors only | +| ❌ None | No CSS rules found | + +--- + +## Base Message Types + +| Type | Description | CSS Support | +|------|-------------|-------------| +| `assistant` | Assistant response | ✅ Full | +| `bash-input` | Bash command input | ✅ Full | +| `bash-output` | Bash command output | ✅ Full | +| `image` | User-attached image | ✅ Full | +| `session-header` | Session header divider | ✅ Full | +| `system` | System message (user-initiated) | ✅ Full | +| `system-error` | System error (assistant-generated) | ✅ Full | +| `system-info` | System info message | ✅ Full | +| `system-warning` | System warning (assistant-generated) | ✅ Full | +| `thinking` | Extended thinking content | ✅ Full | +| `tool_result` | Tool result (success) | ✅ Full | +| `tool_use` | Tool use message | ✅ Full | +| `user` | Basic user message | ✅ Full | +| `unknown` | Unknown message type | ❌ None | + +--- + +## Modifier Classes + +| Modifier | Applied To | Description | +|----------|------------|-------------| +| `compacted` | `user` | Compacted conversation summary | +| `command-output` | `user` | Slash command output content | +| `error` | `tool_result` | Tool execution error | +| `pair_first` | Various | First message in a pair | +| `pair_last` | Various | Last message in a pair | +| `pair_middle` | Various | Middle message (never used so far) | +| `sidechain` | Various | Sub-agent (Task) message | +| `slash-command` | `user` | Expanded slash command prompt | +| `steering` | `user` | User steering via queue operation | +| `system-info` | `system` | System info level | +| `system-hook` | `system` | Hook execution summary | + +--- + +## Pairing Behavior + +Message pairing creates visual groupings for related messages. The `pair_first` and `pair_last` classes control styling of paired messages. + +### Pairing Rules by Type + +| Base Type | Can Be `pair_first` | Can Be `pair_last` | +|-----------|---------------------|-------------------| +| `assistant` | No | Yes | +| `bash-input` | Yes | No | +| `bash-output` | No | Yes | +| `system` | Yes | Yes | +| `thinking` | Yes | No | +| `tool_result` | No | Yes | +| `tool_use` | Yes | No | +| `user` | No | Yes | + +### Common Pairing Patterns + +| First Message | Last Message | Linked By | +|---------------|--------------|-----------| +| `tool_use` | `tool_result` | `tool_use_id` | +| `bash-input` | `bash-output` | Sequential | +| `thinking` | `assistant` | Sequential | +| `user` (slash-command) | `user` (command-output) | Sequential | +| `system` (system-info) | `system` (system-info) | Paired info | + +--- + +## All Class Combinations by Support Level + +### ✅ Full Support (25 combinations) + +These combinations have dedicated CSS selectors: + +| Combination | Description | Occurrences | +|-------------|-------------|-------------| +| `assistant` | Assistant response | 419 | +| `assistant ` | Assistant (paired with thinking) | 104 | +| `assistant sidechain` | Sub-assistant response | 73 | +| `bash-input` | Bash command input | 5 | +| `bash-output` | Bash command output | 5 | +| `image` | Image content | (rare) | +| `session-header` | Session header divider | 29 | +| `system` | System message (user-initiated) | 20 | +| `system system-hook` | Hook summary message | (rare) | +| `system-error` | System error (assistant-generated) | (rare) | +| `system-info` | System info message | 118 | +| `system-warning` | System warning (assistant-generated) | (rare) | +| `thinking` | Thinking content | 199 | +| `thinking pair_first` | Thinking (first in pair) | 104 | +| `thinking sidechain` | Sub-assistant thinking | (rare) | +| `tool_result` | Tool result (success) | 863 | +| `tool_result error` | Tool result (error) | 83 | +| `tool_result sidechain` | Sub-assistant tool result | 83 | +| `tool_use` | Tool use message | 946 | +| `tool_use sidechain` | Sub-assistant tool use | 84 | +| `user` | Basic user message | 88 | +| `user command-output` | Slash command output | 19 | +| `user compacted` | Compacted user conversation | (rare) | +| `user slash-command` | Slash command invocation | 20 | +| `user steering` | Out-of-band steering input | (rare) | + +### ⚠️ Partial Support (7 combinations) + +These combinations inherit from parent selectors but have no dedicated rules: + +| Combination | Description | Inherits From | +|-------------|-------------|---------------| +| `assistant pair_last` | Assistant (last in pair) | `.assistant`, `.` | +| `tool_result error sidechain` | Sub-assistant tool error | `.tool_result`, `.error`, `.sidechain` | +| `unknown sidechain` | Unknown sidechain type | `.sidechain` | +| `user compacted sidechain` | Compacted sidechain user | `.user`, `.compacted`, `.sidechain` | +| `user sidechain` | Sub-assistant user prompt (deprecated) | `.user`, `.sidechain` | +| `user slash-command sidechain` | Sidechain slash command | `.user`, `.slash-command`, `.sidechain` | +| `user command-output pair_last` | Command output in pair | `.user`, `.command-output` | + +### ❌ No Support (1 combination) + +| Combination | Description | Note | +|-------------|-------------|------| +| `unknown` | Unknown message type | Fallback type - should rarely appear | + +--- + +## Fold-Bar Support + +The fold-bar component uses `data-border-color` attribute to style borders based on message types. Below shows which combinations have dedicated fold-bar styling. + +### Has Fold-Bar Styling (27 combinations) + +- `assistant` +- `assistant sidechain` +- `bash-input` +- `bash-output` +- `image` +- `image sidechain` +- `session-header` +- `system` +- `system-error` +- `system-info` +- `system-warning` +- `thinking` +- `thinking sidechain` +- `tool_result` +- `tool_result error` +- `tool_result error sidechain` +- `tool_result sidechain` +- `tool_use` +- `tool_use sidechain` +- `unknown` +- `unknown sidechain` +- `user` +- `user command-output` +- `user compacted` +- `user compacted sidechain` +- `user sidechain` +- `user slash-command` +- `user slash-command sidechain` + +### Missing Fold-Bar Styling (5 combinations) + +These combinations appear in HTML but lack dedicated fold-bar border colors: + +- `assistant ` (uses base `assistant` color) +- `assistant pair_last` (uses base `assistant` color) +- `system system-hook` (uses base `system` color) +- `thinking pair_first` (uses base `thinking` color) +- `user steering` (uses base `user` color) + +--- + +## Detailed Breakdown by Base Type + +### `assistant` (596 occurrences, 3 variations) +- 419× `assistant` (standalone) +- 104× `assistant pair_last ` +- 73× `assistant sidechain` + +### `bash-input` (5 occurrences, 1 variation) +- 5× `bash-input pair_first ` + +### `bash-output` (5 occurrences, 1 variation) +- 5× `bash-output pair_last ` + +### `system` (138 occurrences, 3 variations) +- 59× `system pair_first system-info` +- 59× `system pair_last system-info` +- 20× `system pair_first ` + +### `thinking` (303 occurrences, 2 variations) +- 199× `thinking` (standalone) +- 104× `thinking pair_first ` + +### `tool_result` (1,030 occurrences, 4 variations) +- 863× `tool_result pair_last ` +- 83× `tool_result error pair_last ` +- 83× `tool_result pair_last sidechain` +- 1× `tool_result error pair_last sidechain` + +### `tool_use` (1,030 occurrences, 2 variations) +- 946× `tool_use pair_first ` +- 84× `tool_use pair_first sidechain` + +### `user` (128 occurrences, 4 variations) +- 88× `user` (standalone) +- 20× `user pair_first slash-command` +- 19× `user command-output pair_last ` +- 1× `user pair_last slash-command` (unpaired) + +--- + +## Key Observations + +1. **Pairing Consistency**: Tools (`tool_use` + `tool_result`) and bash commands (`bash-input` + `bash-output`) always appear as pairs, with `pair_first` on the input/use side and `pair_last` on the output/result side. + +2. **Thinking-Assistant Pattern**: `thinking` messages that are paired are always `pair_first`, paired with an `assistant` message that is `pair_last`. + +3. **Sidechains**: The `sidechain` modifier appears on: + - `assistant` messages (73 occurrences) + - `tool_use` and `tool_result` pairs (84 and 84 occurrences respectively) + +4. **Error Handling**: The `error` modifier only appears on `tool_result` messages (84 total error results). + +5. **System Messages**: Have 3 variations: + - System info pairs (118 total, always paired) + - Generic system pairs (20, `pair_first`) + +6. **Slash Commands**: User messages with `slash-command` and `command-output` pair together: + - `user slash-command` (20 occurrences, `pair_first`) + - `user command-output` (19 occurrences, `pair_last`) + +7. **Rare Cases**: + - `tool_result` with both `error` and `sidechain` (1 occurrence) + - `bash-input`/`bash-output` pairs (5 pairs total) + +--- + +## Structural Classes (Not Semantic) + +In addition to the semantic classes above, messages include structural classes: + +- **Session IDs**: `session-{uuid}` - identifies which session a message belongs to +- **Ancestry Markers**: `d-{number}` - indicates descendant depth in the message tree + +These are excluded from semantic analysis but appear in all HTML output. + +--- + +## CSS Selector Statistics + +- **Total CSS selectors in templates**: 495 +- **Message-related selectors**: 78 +- **Fold-bar combinations**: 28 +- **Full support combinations**: 25 +- **Partial support combinations**: 7 +- **No support combinations**: 1 + +--- + +## References + +- Source: [css_class_combinations_summary.md](/tmp/css_class_combinations_summary.md) +- Source: [css_rules_analysis.md](/tmp/css_rules_analysis.md) +- CSS templates: [claude_code_log/templates/](../claude_code_log/templates/) +- Messages documentation: [messages.md](messages.md) diff --git a/dev-docs/messages.md b/dev-docs/messages.md new file mode 100644 index 00000000..ce455b47 --- /dev/null +++ b/dev-docs/messages.md @@ -0,0 +1,793 @@ +# Message Types in Claude Code Transcripts + +This document describes all message types found in Claude Code JSONL transcript files and their corresponding output representations. The goal is to define an **intermediate representation** that captures the logical message structure independent of HTML rendering. + +## Overview + +Claude Code transcripts contain messages in JSONL format. Each line represents an input message that gets transformed through: + +1. **Input Layer** (JSONL): Raw Claude Code transcript data +2. **Intermediate Layer** (TemplateMessage): Format-neutral logical representation +3. **Output Layer** (HTML): Rendered visual output + +This document maps input types to their intermediate and output representations. + +--- + +## Data Flow: From Transcript Entries to Rendered Messages + +``` +JSONL Parsing (parser.py) +│ +├── UserTranscriptEntry +│ ├── TextContent → User message variants: +│ │ ├── SlashCommandContent (isMeta or tags) +│ │ ├── CommandOutputContent ( tags) +│ │ ├── BashInputContent ( tags) +│ │ ├── CompactedSummaryContent (compacted conversation) +│ │ └── Plain user text +│ ├── ToolResultContent → Tool result messages: +│ │ ├── ReadOutput (cat-n formatted file content) +│ │ ├── EditOutput (cat-n formatted edit result) +│ │ └── Generic tool result text +│ └── ImageContent → Image messages +│ +├── AssistantTranscriptEntry +│ ├── TextContent → AssistantTextContent +│ ├── ThinkingContent → ThinkingContentModel +│ └── ToolUseContent → Tool use messages with parsed inputs: +│ ├── ReadInput, WriteInput, EditInput, MultiEditInput +│ ├── BashInput, GlobInput, GrepInput +│ ├── TaskInput, TodoWriteInput, AskUserQuestionInput +│ └── ExitPlanModeInput +│ +├── SystemTranscriptEntry +│ ├── SystemContent (level: info/warning/error) +│ └── HookSummaryContent (subtype: stop_hook_summary) +│ +├── SummaryTranscriptEntry → Session metadata (not rendered) +│ +└── QueueOperationTranscriptEntry + └── "remove" operation → Steering message (rendered as user) +``` + +--- + +## Intermediate Representation: TemplateMessage + +The intermediate representation is `TemplateMessage`, a Python class (in `renderer.py`) that captures all fields needed for rendering. + +### Key Fields + +```python +class TemplateMessage: + # Identity + type: str # Base type: "user", "assistant", "tool_use", etc. + message_id: str # Unique ID within session (e.g., "msg-0", "tool-1") + uuid: str # Original JSONL uuid + + # Content (format-neutral) + content: Optional[MessageContent] # Structured content model + # Note: HTML is generated during template rendering, not stored in the message + + # Display + message_title: str # Display title (e.g., "User", "Assistant") + css_class: str # CSS classes (derived from type + modifiers) + modifiers: MessageModifiers # Format-neutral display traits + + # Metadata + raw_timestamp: str # ISO 8601 timestamp + session_id: str # Session UUID + + # Hierarchy + children: List[TemplateMessage] # Child messages (tree mode) + ancestry: List[str] # Parent message IDs for fold/unfold + + # Pairing + is_paired: bool # True if part of a pair + pair_role: Optional[str] # "pair_first", "pair_last", "pair_middle" + + # Tool-specific + tool_use_id: Optional[str] # ID linking tool_use to tool_result +``` + +### MessageModifiers → CSS Classes + +Display traits are stored in `MessageModifiers` (see [Part 6](#part-6-infrastructure-models)) and converted to CSS classes for HTML rendering: + +| css_class | Base Type | MessageModifiers Field | +|-----------|-----------|------------------------| +| `"user"` | user | (none) | +| `"user compacted"` | user | `is_compacted=True` | +| `"user slash-command"` | user | `is_slash_command=True` | +| `"user command-output"` | user | `is_command_output=True` | +| `"user sidechain"` | user | `is_sidechain=True` | +| `"user steering"` | user | `is_steering=True` | +| `"assistant"` | assistant | (none) | +| `"assistant sidechain"` | assistant | `is_sidechain=True` | +| `"tool_use"` | tool_use | (none) | +| `"tool_use sidechain"` | tool_use | `is_sidechain=True` | +| `"tool_result"` | tool_result | (none) | +| `"tool_result error"` | tool_result | `is_error=True` | +| `"tool_result sidechain"` | tool_result | `is_sidechain=True` | +| `"thinking"` | thinking | (none) | +| `"system system-info"` | system | `system_level="info"` | +| `"system system-warning"` | system | `system_level="warning"` | +| `"system system-error"` | system | `system_level="error"` | +| `"system system-hook"` | system | `system_level="hook"` | + +**Note**: See [css-classes.md](css-classes.md) for complete CSS support status. + +--- + +# Part 1: User Messages (UserTranscriptEntry) + +User transcript entries (`type: "user"`) contain human input, tool results, and images. + +## 1.1 Content Types in User Messages + +User messages contain `ContentItem` instances that are either: +- **TextContent**: User-typed text (with various semantic variants) +- **ToolResultContent**: Results from tool execution +- **ImageContent**: User-attached images + +## 1.2 User Text Variants + +Based on flags and tag patterns in `TextContent`, user text messages are classified into specialized content types defined in `models.py`. + +### Regular User Prompt + +- **Condition**: No special flags or tags +- **Content Model**: Plain `TextContent` +- **CSS Class**: `user` +- **Files**: [user.json](messages/user/user.json) | [user.jsonl](messages/user/user.jsonl) + +```json +{ + "type": "user", + "message": { + "role": "user", + "content": [{ "type": "text", "text": "Help me fix this bug..." }] + }, + "isSidechain": false +} +``` + +### Slash Command (isMeta) + +- **Condition**: `isMeta: true` flag +- **Content Model**: `SlashCommandContent` (models.py) +- **CSS Class**: `user slash-command` +- **Files**: [user_slash_command.json](messages/user/user_slash_command.json) + +```json +{ + "type": "user", + "message": { "content": "Caveat: The messages below were generated..." }, + "isMeta": true +} +``` + +### Slash Command (Tags) + +- **Condition**: Contains `` tags +- **Content Model**: `SlashCommandContent` with parsed name/args/contents +- **CSS Class**: `user slash-command` +- **Files**: [user_command.json](messages/user/user_command.json) + +```python +@dataclass +class SlashCommandContent(MessageContent): + command_name: str # e.g., "/model", "/context" + command_args: str # Arguments after command + command_contents: str # Content inside command +``` + +### Command Output + +- **Condition**: Contains `` tags +- **Content Model**: `CommandOutputContent` +- **CSS Class**: `user command-output` +- **Files**: [command_output.json](messages/user/command_output.json) + +```python +@dataclass +class CommandOutputContent(MessageContent): + stdout: str # Command output text + is_markdown: bool # True if content appears to be markdown +``` + +### Bash Input + +- **Condition**: Contains `` tags +- **Content Model**: `BashInputContent` +- **CSS Class**: Part of bash tool pairing +- **Files**: [bash_input.json](messages/user/bash_input.json) + +```python +@dataclass +class BashInputContent(MessageContent): + command: str # The bash command that was executed +``` + +### Bash Output + +The corresponding output uses `` and optionally `` tags: + +- **Condition**: Contains `` tags +- **Content Model**: `BashOutputContent` +- **CSS Class**: Part of bash tool pairing +- **Files**: [bash_output.json](messages/user/bash_output.json) + +### Compacted Conversation + +- **Condition**: Contains "(compacted conversation)" marker +- **Content Model**: `CompactedSummaryContent` +- **CSS Class**: `user compacted` + +```python +@dataclass +class CompactedSummaryContent(MessageContent): + summary_text: str # The compacted conversation summary +``` + +### Sidechain User (Sub-agent) + +- **Condition**: `isSidechain: true` +- **CSS Class**: `user sidechain` +- **Note**: Typically skipped during rendering (duplicates Task prompt) +- **Files**: [user_sidechain.json](messages/user/user_sidechain.json) + +### IDE Notifications + +User messages may contain IDE notification tags that are parsed into structured content: + +- **Condition**: Contains ``, ``, or `` tags +- **Content Model**: `IdeNotificationContent` containing lists of: + - `IdeOpenedFile`: File open notifications + - `IdeSelection`: Code selection notifications + - `IdeDiagnostic`: Diagnostic messages (parsed JSON or raw text fallback) +- **CSS Class**: Notifications rendered as inline elements within user message + +```python +@dataclass +class IdeOpenedFile: + content: str # Raw content from the tag + +@dataclass +class IdeSelection: + content: str # Raw selection content + +@dataclass +class IdeDiagnostic: + diagnostics: Optional[List[Dict[str, Any]]] # Parsed JSON + raw_content: Optional[str] # Fallback if parsing failed + +@dataclass +class IdeNotificationContent(MessageContent): + opened_files: List[IdeOpenedFile] + selections: List[IdeSelection] + diagnostics: List[IdeDiagnostic] + remaining_text: str # Text after notifications extracted +``` + +## 1.3 Tool Results (ToolResultContent) + +Tool results appear as `ToolResultContent` items in user messages, linked to their corresponding `ToolUseContent` via `tool_use_id`. + +### Tool Result Output Models + +| Tool | Output Model | Key Fields | Files | +|------|--------------|------------|-------| +| Read | `ReadOutput` | file_path, content, start_line, num_lines, is_truncated | [tool_result](messages/tools/Read-tool_result.json) | +| Edit | `EditOutput` | file_path, success, diffs, message, start_line | [tool_result](messages/tools/Edit-tool_result.json) | +| Write | `WriteOutput` *(TODO)* | file_path, success, message | [tool_result](messages/tools/Write-tool_result.json) | +| Bash | `BashOutput` *(TODO)* | stdout, stderr, exit_code, interrupted, is_image | [tool_result](messages/tools/Bash-tool_result.json) | +| Glob | `GlobOutput` *(TODO)* | pattern, files, truncated | [tool_result](messages/tools/Glob-tool_result.json) | +| Grep | `GrepOutput` *(TODO)* | pattern, matches, output_mode, truncated | [tool_result](messages/tools/Grep-tool_result.json) | +| Task | `TaskOutput` *(TODO)* | agent_id, result, is_background | [tool_result](messages/tools/Task-tool_result.json) | +| (error) | — | is_error: true | [Bash error](messages/tools/Bash-tool_result_error.json) | + +**(TODO)**: Output model defined in models.py but not yet used - tool results currently handled as raw strings. + +### Generic Tool Result + +- **CSS Class**: `tool_result` +- **Content**: Raw string or structured content + +```json +{ + "type": "user", + "message": { + "content": [{ + "type": "tool_result", + "tool_use_id": "toolu_xxx", + "is_error": false, + "content": "..." + }] + } +} +``` + +### Tool Result Error + +- **Condition**: `is_error: true` +- **CSS Class**: `tool_result error` +- **Files**: [Bash-tool_result_error.json](messages/tools/Bash-tool_result_error.json) + +### Read Tool Result → ReadOutput + +Read tool results in cat-n format are parsed into structured `ReadOutput`: +- **Files**: [Read-tool_result.json](messages/tools/Read-tool_result.json) + +```python +@dataclass +class ReadOutput(MessageContent): + file_path: str + content: str # File content (may be truncated) + start_line: int # 1-based starting line number + num_lines: int # Number of lines in content + total_lines: int # Total lines in file + is_truncated: bool + system_reminder: Optional[str] # Embedded system reminder +``` + +### Edit Tool Result → EditOutput + +Edit tool results with cat-n snippets are parsed into structured `EditOutput`: +- **Files**: [Edit-tool_result.json](messages/tools/Edit-tool_result.json) + +```python +@dataclass +class EditOutput(MessageContent): + file_path: str + success: bool + diffs: List[EditDiff] # Changes made + message: str # Result message or code snippet + start_line: int # Starting line for display +``` + +### Tool Result Rendering Wrapper + +Tool results are wrapped in `ToolResultContentModel` for rendering, which provides additional context: + +```python +@dataclass +class ToolResultContentModel(MessageContent): + tool_use_id: str + content: Any # Union[str, List[Dict[str, Any]]] + is_error: bool = False + tool_name: Optional[str] = None # Name of the tool + file_path: Optional[str] = None # File path for Read/Edit/Write +``` + +## 1.4 Images (ImageContent) + +- **CSS Class**: `image` +- **Files**: [image.json](messages/user/image.json) + +```json +{ + "type": "user", + "message": { + "content": [{ + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": "iVBORw0KGgo..." + } + }] + } +} +``` + +Image data is structured using `ImageSource`: + +```python +class ImageSource(BaseModel): + type: Literal["base64"] + media_type: str # e.g., "image/png" + data: str # Base64-encoded image data + +class ImageContent(BaseModel, MessageContent): + type: Literal["image"] + source: ImageSource +``` + +--- + +# Part 2: Assistant Messages (AssistantTranscriptEntry) + +Assistant transcript entries (`type: "assistant"`) contain Claude's responses. + +## 2.1 Content Types in Assistant Messages + +Assistant messages contain `ContentItem` instances that are: +- **TextContent**: Claude's text response +- **ThinkingContent**: Extended thinking blocks +- **ToolUseContent**: Tool invocations + +## 2.2 Assistant Text → AssistantTextContent + +- **Content Model**: `AssistantTextContent` (models.py) +- **CSS Class**: `assistant` (or `assistant sidechain`) +- **Files**: [assistant.json](messages/assistant/assistant.json) + +```python +@dataclass +class AssistantTextContent(MessageContent): + text: str # The assistant's response text +``` + +### Sidechain Assistant + +- **Condition**: `isSidechain: true` +- **CSS Class**: `assistant sidechain` +- **Title**: "Sub-assistant" +- **Files**: [assistant_sidechain.json](messages/assistant/assistant_sidechain.json) + +## 2.3 Thinking Content → ThinkingContentModel + +- **Content Model**: `ThinkingContentModel` (models.py) +- **CSS Class**: `thinking` +- **Files**: [thinking.json](messages/assistant/thinking.json) + +```python +@dataclass +class ThinkingContentModel(MessageContent): + thinking: str # The thinking text + signature: Optional[str] # Thinking block signature +``` + +```json +{ + "type": "assistant", + "message": { + "content": [{ "type": "thinking", "thinking": "Let me analyze..." }] + } +} +``` + +## 2.4 Tool Use → ToolUseContent with Typed Inputs + +Tool invocations contain a `ToolUseContent` item with: +- `name`: The tool name (e.g., "Read", "Bash", "Task") +- `id`: Unique ID for pairing with results +- `input`: Raw input dictionary + +The `parsed_input` property returns a typed input model via `parse_tool_input()`. + +### Tool Input Models (models.py) + +| Tool | Input Model | Key Fields | +|------|-------------|------------| +| Read | `ReadInput` | file_path, offset, limit | +| Write | `WriteInput` | file_path, content | +| Edit | `EditInput` | file_path, old_string, new_string, replace_all | +| MultiEdit | `MultiEditInput` | file_path, edits[] | +| Bash | `BashInput` | command, description, timeout, run_in_background | +| Glob | `GlobInput` | pattern, path | +| Grep | `GrepInput` | pattern, path, glob, type, output_mode | +| Task | `TaskInput` | prompt, subagent_type, description, model | +| TodoWrite | `TodoWriteInput` | todos[] | +| AskUserQuestion | `AskUserQuestionInput` | questions[], question | +| ExitPlanMode | `ExitPlanModeInput` | plan, launchSwarm, teammateCount | + +### Tool Input Helper Models + +Some tool inputs contain nested structures with their own models: + +```python +# MultiEdit tool uses EditItem for individual edits +class EditItem(BaseModel): + old_string: str + new_string: str + +# TodoWrite tool uses TodoWriteItem for individual todos +class TodoWriteItem(BaseModel): + content: str = "" + status: str = "pending" + activeForm: str = "" + id: Optional[str] = None + priority: Optional[str] = None + +# AskUserQuestion tool uses nested models for questions/options +class AskUserQuestionOption(BaseModel): + label: str = "" + description: Optional[str] = None + +class AskUserQuestionItem(BaseModel): + question: str = "" + header: Optional[str] = None + options: List[AskUserQuestionOption] = [] + multiSelect: bool = False +``` + +### Tool Use Message Structure + +- **CSS Class**: `tool_use` (or `tool_use sidechain`) +- **Files**: See [messages/tools/](messages/tools/) (e.g., `Read-tool_use.json`) + +```json +{ + "type": "assistant", + "message": { + "content": [{ + "type": "tool_use", + "id": "toolu_xxx", + "name": "Read", + "input": { "file_path": "/path/to/file" } + }] + } +} +``` + +--- + +# Part 3: System Messages (SystemTranscriptEntry) + +System transcript entries (`type: "system"`) convey notifications and hook summaries. + +## 3.1 Content Types for System Messages + +System messages are parsed into structured content models in `models.py`: +- **SystemContent**: For info/warning/error messages +- **HookSummaryContent**: For hook execution summaries + +## 3.2 System Info/Warning/Error → SystemContent + +- **Content Model**: `SystemContent` (models.py) +- **CSS Class**: `system system-info`, `system system-warning`, `system system-error` +- **Files**: [system_info.json](messages/system/system_info.json) + +```python +@dataclass +class SystemContent(MessageContent): + level: str # "info", "warning", "error" + text: str # Raw text content (may contain ANSI codes) +``` + +```json +{ + "type": "system", + "content": "Running PostToolUse:MultiEdit...", + "level": "info" +} +``` + +## 3.3 Hook Summary → HookSummaryContent + +- **Content Model**: `HookSummaryContent` (models.py) +- **Condition**: `subtype: "stop_hook_summary"` +- **CSS Class**: `system system-hook` + +```python +@dataclass +class HookInfo: + command: str + +@dataclass +class HookSummaryContent(MessageContent): + has_output: bool + hook_errors: List[str] + hook_infos: List[HookInfo] +``` + +--- + +# Part 4: Metadata Entries + +These entry types primarily contain metadata, with some rendered conditionally. + +## 4.1 Summary (SummaryTranscriptEntry) + +- **Purpose**: Session summary for navigation +- **Files**: [summary.json](messages/system/summary.json) + +```json +{ + "type": "summary", + "summary": "Claude Code warmup for deep-manifest project", + "leafUuid": "b83b0f5f-8bfc-4b98-8368-16162a6e9320" +} +``` + +The `leafUuid` links the summary to the last message of the session. + +## 4.2 Queue Operation (QueueOperationTranscriptEntry) + +- **Purpose**: User interrupts and steering during assistant responses +- **Rendered**: Only `remove` operations (as `user steering`) +- **Files**: [queue_operation.json](messages/system/queue_operation.json) + +## 4.3 File History Snapshot + +- **Purpose**: File state snapshots for undo/redo +- **Not Rendered** +- **Files**: [file_history_snapshot.json](messages/system/file_history_snapshot.json) + +--- + +# Part 5: Renderer Content Models + +These models are created during rendering to represent synthesized content not directly from JSONL entries. + +## 5.1 SessionHeaderContent + +Session headers are rendered at the start of each session: + +```python +@dataclass +class SessionHeaderContent(MessageContent): + title: str # e.g., "Session 2025-12-13 10:30" + session_id: str # Session UUID + summary: Optional[str] = None # Session summary if available +``` + +## 5.2 DedupNoticeContent + +Deduplication notices are shown when content is deduplicated (e.g., sidechain assistant text that duplicates the Task tool result): + +```python +@dataclass +class DedupNoticeContent(MessageContent): + notice_text: str # e.g., "Content omitted (duplicates Task result)" +``` + +--- + +# Part 6: Infrastructure Models + +## 6.1 MessageModifiers + +Semantic modifiers that affect message display. These are format-neutral flags that renderers use to determine how to display a message: + +```python +@dataclass +class MessageModifiers: + is_sidechain: bool = False # Sub-agent message + is_slash_command: bool = False # Slash command invocation + is_command_output: bool = False # Command output + is_compacted: bool = False # Compacted conversation + is_error: bool = False # Error message + is_steering: bool = False # Queue remove (steering) + system_level: Optional[str] = None # "info", "warning", "error", "hook" +``` + +## 6.2 UsageInfo + +Token usage tracking for assistant messages: + +```python +class UsageInfo(BaseModel): + input_tokens: Optional[int] = None + cache_creation_input_tokens: Optional[int] = None + cache_read_input_tokens: Optional[int] = None + output_tokens: Optional[int] = None + service_tier: Optional[str] = None + server_tool_use: Optional[Dict[str, Any]] = None +``` + +## 6.3 BaseTranscriptEntry + +Base class for all transcript entries, providing common fields: + +```python +class BaseTranscriptEntry(BaseModel): + parentUuid: Optional[str] # UUID of parent message + isSidechain: bool # Whether this is a sub-agent message + userType: str # User type identifier + cwd: str # Working directory + sessionId: str # Session UUID + version: str # Transcript format version + uuid: str # Unique message ID + timestamp: str # ISO 8601 timestamp + isMeta: Optional[bool] = None # Slash command marker + agentId: Optional[str] = None # Sub-agent ID +``` + +--- + +# Part 7: Message Relationships + +## 7.1 Hierarchy (Parent/Child) + +The message hierarchy is determined by **sequence and message type**, not by `parentUuid`: + +- Session headers are topmost (Level 0) +- User messages follow at Level 1 +- Assistant responses and system messages nest under user messages (Level 2) +- Tool use/result pairs nest under assistant responses (Level 3) +- Sidechain messages nest under their Task result (Level 4+) + +``` +Session header (Level 0) +└── User message (Level 1) + ├── System message (Level 2) + └── Assistant response (Level 2) + └── Tool use/result pair (Level 3) + └── Sidechain messages (Level 4+) +``` + +**Note**: `parentUuid` links messages temporally (which message preceded this one) but is not used for rendering hierarchy. + +## 7.2 Tool Pairing + +`tool_use` and `tool_result` messages are paired by `tool_use_id`: + +| First | Last | Link | +|-------|------|------| +| `tool_use` | `tool_result` | `tool_use.id` = `tool_result.tool_use_id` | + +### Other Pairings + +| First | Last | Link | +|-------|------|------| +| `bash-input` | `bash-output` | Sequential | +| `thinking` | `assistant` | Sequential | +| `slash-command` | `command-output` | Sequential | + +## 7.3 Sidechain Linking + +Sub-agent messages (from `Task` tool): +- Have `isSidechain: true` +- Have `agentId` linking to the Task +- Appear nested under their Task result + +--- + +# Part 8: Tool Reference + +## Available Tools by Category + +### File Operations + +| Tool | Use Sample | Result Sample | Input Model | Output Model | +|------|------------|---------------|-------------|--------------| +| Read | [tool_use](messages/tools/Read-tool_use.json) | [tool_result](messages/tools/Read-tool_result.json) | `ReadInput` | `ReadOutput` | +| Write | [tool_use](messages/tools/Write-tool_use.json) | [tool_result](messages/tools/Write-tool_result.json) | `WriteInput` | `WriteOutput` *(TODO)* | +| Edit | [tool_use](messages/tools/Edit-tool_use.json) | [tool_result](messages/tools/Edit-tool_result.json) | `EditInput` | `EditOutput` | +| MultiEdit | [tool_use](messages/tools/MultiEdit-tool_use.json) | [tool_result](messages/tools/MultiEdit-tool_result.json) | `MultiEditInput` | — | +| Glob | [tool_use](messages/tools/Glob-tool_use.json) | [tool_result](messages/tools/Glob-tool_result.json) | `GlobInput` | `GlobOutput` *(TODO)* | +| Grep | [tool_use](messages/tools/Grep-tool_use.json) | [tool_result](messages/tools/Grep-tool_result.json) | `GrepInput` | `GrepOutput` *(TODO)* | + +### Shell Operations + +| Tool | Use Sample | Result Sample | Input Model | Output Model | +|------|------------|---------------|-------------|--------------| +| Bash | [tool_use](messages/tools/Bash-tool_use.json) | [tool_result](messages/tools/Bash-tool_result.json) | `BashInput` | `BashOutput` *(TODO)* | +| BashOutput | [tool_use](messages/tools/BashOutput-tool_use.json) | [tool_result](messages/tools/BashOutput-tool_result.json) | — | — | +| KillShell | [tool_use](messages/tools/KillShell-tool_use.json) | [tool_result](messages/tools/KillShell-tool_result.json) | — | — | + +### Agent Operations + +| Tool | Use Sample | Result Sample | Input Model | Output Model | +|------|------------|---------------|-------------|--------------| +| Task | [tool_use](messages/tools/Task-tool_use.json) | [tool_result](messages/tools/Task-tool_result.json) | `TaskInput` | `TaskOutput` *(TODO)* | +| TodoWrite | [tool_use](messages/tools/TodoWrite-tool_use.json) | [tool_result](messages/tools/TodoWrite-tool_result.json) | `TodoWriteInput` | — | +| AskUserQuestion | [tool_use](messages/tools/AskUserQuestion-tool_use.json) | [tool_result](messages/tools/AskUserQuestion-tool_result.json) | `AskUserQuestionInput` | — | +| ExitPlanMode | [tool_use](messages/tools/ExitPlanMode-tool_use.json) | [tool_result](messages/tools/ExitPlanMode-tool_result.json) | `ExitPlanModeInput` | — | + +### Web Operations + +| Tool | Use Sample | Result Sample | Input Model | Output Model | +|------|------------|---------------|-------------|--------------| +| WebFetch | [tool_use](messages/tools/WebFetch-tool_use.json) | [tool_result](messages/tools/WebFetch-tool_result.json) | — | — | +| WebSearch | [tool_use](messages/tools/WebSearch-tool_use.json) | [tool_result](messages/tools/WebSearch-tool_result.json) | — | — | + +--- + +## References + +- [css-classes.md](css-classes.md) - Complete CSS class reference with support status +- [models.py](../claude_code_log/models.py) - Pydantic models for transcript data +- [renderer.py](../claude_code_log/renderer.py) - Main rendering module +- [html/](../claude_code_log/html/) - HTML-specific formatters (formatting only, content models in models.py) + - [system_formatters.py](../claude_code_log/html/system_formatters.py) - SystemContent, HookSummaryContent formatting + - [user_formatters.py](../claude_code_log/html/user_formatters.py) - User message formatting + - [assistant_formatters.py](../claude_code_log/html/assistant_formatters.py) - AssistantText, Thinking, Image formatting + - [tool_formatters.py](../claude_code_log/html/tool_formatters.py) - Tool use/result formatting +- [parser.py](../claude_code_log/parser.py) - JSONL parsing module +- [TEMPLATE_MESSAGE_CHILDREN.md](TEMPLATE_MESSAGE_CHILDREN.md) - Tree architecture exploration +- [MESSAGE_REFACTORING.md](MESSAGE_REFACTORING.md) - Refactoring plan diff --git a/dev-docs/messages/assistant/assistant.json b/dev-docs/messages/assistant/assistant.json new file mode 100644 index 00000000..6cbdbe76 --- /dev/null +++ b/dev-docs/messages/assistant/assistant.json @@ -0,0 +1,20 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:07:50.508Z", + "uuid": "6610c2dd-f12c-4fc1-b1d4-fa78c1612692", + "parentUuid": "39ea49bc-8cc9-4ec3-b598-4d75428d7c5e", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-opus-4-1-20250805", + "id": "msg_01NtyE53hx2q89rMBGuw6qKD", + "content": [ + { + "type": "text", + "text": "I'll help you rewrite this to use proper HTML ruby elements, which have better browser support than the CSS `ruby-base` and `ruby-text` display values.\n\nLet me first examine the current structure to u... [truncated]" + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/assistant/assistant.jsonl b/dev-docs/messages/assistant/assistant.jsonl new file mode 100644 index 00000000..25fd5235 --- /dev/null +++ b/dev-docs/messages/assistant/assistant.jsonl @@ -0,0 +1 @@ +{"parentUuid": "39ea49bc-8cc9-4ec3-b598-4d75428d7c5e", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01NtyE53hx2q89rMBGuw6qKD", "type": "message", "role": "assistant", "model": "claude-opus-4-1-20250805", "content": [{"type": "text", "text": "I'll help you rewrite this to use proper HTML ruby elements, which have better browser support than the CSS `ruby-base` and `ruby-text` display values.\n\nLet me first examine the current structure to understand how it's being used:"}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 4, "cache_creation_input_tokens": 4756, "cache_read_input_tokens": 12008, "cache_creation": {"ephemeral_5m_input_tokens": 4756, "ephemeral_1h_input_tokens": 0}, "output_tokens": 2, "service_tier": "standard"}}, "requestId": "req_011CTd4PoK9LMzcZt6RWbVTR", "type": "assistant", "uuid": "6610c2dd-f12c-4fc1-b1d4-fa78c1612692", "timestamp": "2025-09-29T17:07:50.508Z"} diff --git a/dev-docs/messages/assistant/assistant_sidechain.json b/dev-docs/messages/assistant/assistant_sidechain.json new file mode 100644 index 00000000..92205cd7 --- /dev/null +++ b/dev-docs/messages/assistant/assistant_sidechain.json @@ -0,0 +1,20 @@ +{ + "type": "assistant", + "sessionId": "7864f562-717b-4d70-a1cb-b588f7826a1a", + "timestamp": "2025-10-29T16:03:08.981Z", + "uuid": "dfcf5df8-10d0-4b02-a2a0-3775a96225d3", + "parentUuid": "86a390e3-356f-4e9b-9584-cd5d5b9af948", + "isSidechain": true, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_018gYNPTHWry5dDGwS3tQ6BV", + "content": [ + { + "type": "text", + "text": "I'm ready to help you search through your codebase! I can see this is a Next.js blog project with several blog posts and projects. \n\nWhat would you like me to help you find? I can:\n... [truncated]" + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/assistant/assistant_sidechain.jsonl b/dev-docs/messages/assistant/assistant_sidechain.jsonl new file mode 100644 index 00000000..762c118d --- /dev/null +++ b/dev-docs/messages/assistant/assistant_sidechain.jsonl @@ -0,0 +1 @@ +{"parentUuid": "86a390e3-356f-4e9b-9584-cd5d5b9af948", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "7864f562-717b-4d70-a1cb-b588f7826a1a", "version": "2.0.28", "gitBranch": "main", "agentId": "b1f5d80e", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_018gYNPTHWry5dDGwS3tQ6BV", "type": "message", "role": "assistant", "content": [{"type": "text", "text": "I'm ready to help you search through your codebase! I can see this is a Next.js blog project with several blog posts and projects. \n\nWhat would you like me to help you find? I can:\n- Locate specific files using patterns\n- Search for code or text content\n- Read and analyze specific files\n- Navigate through your project structure\n\nWhat are you looking for today?"}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 3, "cache_creation_input_tokens": 1374, "cache_read_input_tokens": 0, "cache_creation": {"ephemeral_5m_input_tokens": 1374, "ephemeral_1h_input_tokens": 0}, "output_tokens": 87, "service_tier": "standard"}}, "requestId": "req_011CUbmj9zcNSzAQzdYyNMk6", "type": "assistant", "uuid": "dfcf5df8-10d0-4b02-a2a0-3775a96225d3", "timestamp": "2025-10-29T16:03:08.981Z"} diff --git a/dev-docs/messages/assistant/thinking.json b/dev-docs/messages/assistant/thinking.json new file mode 100644 index 00000000..8eb662ef --- /dev/null +++ b/dev-docs/messages/assistant/thinking.json @@ -0,0 +1,20 @@ +{ + "type": "assistant", + "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", + "timestamp": "2025-09-29T18:01:57.835Z", + "uuid": "96acdb48-646c-415f-9528-722902e9fb6e", + "parentUuid": "7002bd4a-4559-454c-bca3-b40729ce9246", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-opus-4-1-20250805", + "id": "msg_01CkR2ph1853oo3iZdeTXBvJ", + "content": [ + { + "type": "thinking", + "thinking": "The user is asking me to:\n1. Read three files related to a tokenizer application\n2. Do a thorough code review\n... [truncated]" + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/assistant/thinking.jsonl b/dev-docs/messages/assistant/thinking.jsonl new file mode 100644 index 00000000..d25c0650 --- /dev/null +++ b/dev-docs/messages/assistant/thinking.jsonl @@ -0,0 +1 @@ +{"parentUuid": "7002bd4a-4559-454c-bca3-b40729ce9246", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01CkR2ph1853oo3iZdeTXBvJ", "type": "message", "role": "assistant", "model": "claude-opus-4-1-20250805", "content": [{"type": "thinking", "thinking": "The user is asking me to:\n1. Read three files related to a tokenizer application\n2. Do a thorough code review\n3. Keep it simple and dependency-free (except for transformers.js)\n4. Keep it compilation-free\n5. Look at TODOs and propose a refactoring plan\n6. Keep the same 3 file structure\n7. Add features from TODOs and make the JavaScript neater\n\nI'm in plan mode, so I should not make any changes yet, just analyze and propose a plan.\n\nLet me analyze what I've read:\n\nThe application is an online LLM tokenizer that:\n- Runs entirely in the browser\n- Loads tokenizer models from HuggingFace\n- Shows token counts and visualizes tokens with colors\n- Stores models in localStorage\n- Has a textarea for input text\n- Can add custom models from HuggingFace\n\nTODOs I found:\n1. In tokenizer.js line 2: Consider upgrading to transformers.js v3 with WebGPU support\n2. In tokenizer.js line 21: Take model list from URL params\n3. In tokenizer.js line 91-92: Add delete button for models and make them reorderable\n4. In tokenizer.js line 97: See if it would be possible to render after each model loaded\n5. In tokenizer.js line 110: Do tokenization in a worker\n6. In tokenizer.html line 13: Add link to Github\n7. In tokenizer.html line 24: Take text from URL params\n8. In tokenizer.html line 34: Add styling to the add model section\n\nLet me do a thorough code review:\n\n**HTML (tokenizer.html):**\n- Clean and simple structure\n- Good use of semantic HTML\n- Missing Github link (TODO)\n- Could benefit from URL param support for text and models\n- The add model section needs better styling\n\n**CSS (tokenizer.css):**\n\n\nMinimalist design with effective dark mode implementation. The styling elegantly handles token visualization and layout. Subtle background coloring for tokenized text creates clear visual separation. Ruby positioning demonstrates careful attention to typographic details. The current styling provides a solid foundation with room for incremental improvements to the model addition interface.\n\n**JavaScript (tokenizer.js):**\nSolid core functionality with several optimization opportunities. The current implementation manages model loading and tokenization effectively, but lacks advanced features like model management and performance optimization. Key improvements could streamline user experience and enhance application responsiveness.\n\nPotential architectural enhancements include leveraging Web Workers for background processing, implementing dynamic URL configuration sharing, and creating more flexible model interaction mechanisms. The goal is to transform the current implementation into a more robust, user-friendly tokenization tool with enhanced performance and configurability.", "signature": "Ev8VCkYICBgCKkCWzxqq3aFDFB0xYRSwcFyYFw+IrZh0P0SNbyr8Qaq0/9pzG8L66PWfe4nEX5FmUVSXuphz+2r/v+lnLuSZKrqaEgzZVBiSdzmQsP2O3wQaDEncYPeh2Yx56E+d7iIwRunjzQt0YlrGyrTc6Nf5hV8+cYuDQjQZkj3pTjOy/+3PdTkkk5j8LU56COFyXn0rKuYUOeAZ5pU0Soh9WIMwSlgLPem3vJRF+GfC3QC4fUr2PEXvoI5cwOOcsHY8jZ6ixNKYXncLWwba8Ij13IB+gUuCrB97FJ27OOhk/Kk11E69gZJGCSFqb8ZmX8qwKBoxGmBoykhIhTg+UMLHP/jIN/vbOz1ed3yvfcrJMFgZlg3RG5THUSVLhF0ma53kEE7VweInSk2PHOgU0xeqZX4/CJvobAnNZ2OrsW8bxKKP0rdexMiQ2kA9ck1wckcXBt568F7CM4Pn7xvphEH5D8NBJ1QUK1waNeWRlj+f32Y1j8PM2w5WuJiGlDzQiJQHzrn8FvxCOoYozEgG+M5zBv4++cR3OAdG5gyuJNdF4JZKGA7mPA/ec+1DTXg4wfJrPe6p4EMYmcaLJJa8az9BwQ3qDCiIl7pIQcPGsrsSRd1nCXQs8ykMSgSOq5P8ZKpOJG0+tLiTQdhy0p8wZ+XKU22ni8SexAUW9npryT7CRCz3EsAQ6hlu/+JISnNVBHBSSEcp/iVqvY7BSl5Fa0CChNBTkLZEolkY1zeCECQyNdpmTio6XQQU8UU0SNU287y6g/HGaadVhcY0d/Cch/zN4SuShg6GYaJdpE+CHcHZLFzakQY6ssrdsnTWfAMhXznA+AXjdH+2COskx2vZAPtH5dWhDRxiuw6bsDXAIZd5UfFVMws2Nbpdv0IsRL6CBN5YRZmzcORrIMitNpz/qmEZPsiTG3hZfM0f+L3CYHW5Ts/BPNbnr48JreprJAwUYgePkyjm6+SevhW38tw3mj2alh6rOmqa1Aet7e9XuhAoaVLWJHPCnW8Eqaebl8T/YdIunVLyf2+emEe+D6xYbrgymkvJRo3mb7CJoU/RnmrV+5DEa99J1fFlIBV0+i0vZys1ozv93Jh8y11z0l9g3+5GsbXsfZfhmPC3BcLVTY4WwK5c63uW+yrnrxu94ZMXST8YmiU2Qw+zpqwSiABYddcDy9xgqLPu6kG8d0NTQNfix8FuT6fHAUmiOM43KDODRlCPYX8LX5o8RhWNZ0JIOaFORMZlkseEmItgmSVXxf9354JRf2cbhFrJiqEvRHk+uPlsOTm7j+Kd3H49fGGNNYJqRprjn/0l3jeAqr87dwH3FTtt48uOpCui50yZ5b3XG+0oQ4FOzuShIZ5/ZLWcJYxpf0S8KQZKcTvtAVCOY0q+HXyi80Cd0A7CyvJiw5rcp5a7/sdFJJBfTGQSTG46qmUjQKW3IGf4FeYPrioLF4gxRGj2QF12h0PHAGRVmue82bEbiRwoxnKbGD4Zo8bQrKlup8nM0ZgXVYalNJJzhLhOHOkd1BB4zVZ5IFEF9vbzfDnAyDZilSO3+gMxGqfKoz6ubIpXpr6WhNa7pQUH5Q20PnNZG28OULytIrkOPgksWZXbqWpAFATIm93VJ9MPYgnROj1Vkl8QnPjbcneZreXqUdej5cyhTUmB/W/lVf7fDBxMqkLTUdX6jFflj1fVEHERh2ML5qoDRvjXhlSTSHkXUq0pXlH3XVcb4Ey9LSUpR+j6bhrdcZLxoH1oklo0XW2+VbUGXTrKPhp/FZjdT+GSqXyl9E6VtNqlDvRkWSaSTY3Ddj3UIweKveghktOPik4vQe6qJWMPZjumtacFwOuF1OVR+BsJV3KAgsrm5/x0GFpSarN82I7izvoWZiFHviSa/MFUYB0lKQkBdvP5CJjf3FJvBxLil8+2cOjdZHKfqPoWtel9iaSP6wmQDuWN6BEfOXoQrio4m0ZW9K74D+Z9WXJhzKhfxhW2alQq/QXiG+MUp2YP05hGC8zIl1EpTVyAv7TEgAQj/zT35yoDJhGjt+HM9ajEZ61BrP3K1g+4oknIwdsFw5Jz3loMVfQSUOoYwwFCMYC08NmZdleV7CB9Cd5q3BfFCKKA7wq+OLV1i20i/r908bPmbCHP/v4sPn0qLuhg0K/1/dyMNPVhR2JG85/nUgg4thC3/yxWCNAWSx723ZwxwO2cmXHgNpXZO41P2KvL5s6E8cxcmmzP2UgDrWifuGjtd86gax+HN+QltDqjylLp+ksT+2Vo5sAHOLH+jAWOyX0zHBmSfIO83XMshioNIrayO4wgAYHjiPyIhQVTj9epRQozR+9CGtVX93n0FG7qu0pjqzQ/Q2OOns12ZYHjTCtOr9FOi7WBBZz1btkyK2ejJZqPhn2KDjRveqBCGNtvFALIdrDio+CmOmaLShBPSF3FVeVUcXNErrgDnzWjXbw7kHkq4LBlfzo+hCJVbNnq5zEMwIWRPJLFLoBaKR7qDPWp1lCYj7EEd9yMXB6IhR2aqa+QPgL8WFTVpME/MngUclCI6JvL83E5UyTia7NQ5bFMEsFE+9KitA9s8LJWF7Oh9pcdysHxBOfIcKU4vynvIy2+MlmXDV+XnlctoX128q7rfRK432GmHrQi8fs1htNXYQZdlg4FwCxc25tcqVDqhUDiJO4KWaHiJ2uACQ/ZpbnaX6wWqi+d80MuqiCrfFwYWXddVeaJQrRlk47wtI7m1/et7TvPSnhuIQRiYOdHFey11slhtiSGA12hGgBZMOb21UzYrFqVrPdMV0p/kFdqPm1WHJwT5llIt6oxtIiGlf6d726DW0o2Qehq5biJNxqRDFwjIesiTzekMuDNFu2x0hKBIvIOk4MgkA7Xlf8pbdLABMBIgszssAPtizTmmTLC+/HefMbGGb8qMwboioUgRF8jpCSAautw1FJoImrRDYpncNgEP1Iza52awdH5tLtbZDu1u2tQIxU+bE6Z651gp7H9MG47FWZ1E8RDaxwcgecZy9Yc1nh5kLeoRauMM25/jR6Qch0vSGFw4WFfFe5h5RMsNSi2AFbm9qW/HilJckieb8SJPKp2Hktx+fw7wET1aBSf9OVDdjQhKq4nGlio5sQOc1jC/7XFwI7JAL1uFNHlNpNW7E58TUy8z22QNJyphN3QO+BEKlyzTT+v6e7gUzPjZ5siaRznc7BdFp7AuyPLd2oyEM6DHZdBqpW29TK6bjWHfgABwJSpWC8lER30Izqe7FyXwgw6QQ/yYRtGzjJnamBxVfPjO9zhUmRS3l4/5NT3qZ14j7hjdxdKQ57NQn+taVf6EVF0VOjKPQBTrQH2JdIG32DP2/NWUv3PyeOlk1uFuEC0GLDcQJlO7OYPkKbd9VsByx2OjsxkNIjvK2F9GGdPQTZ7x8PD4JuvfRTEwUDQyGUtGBJLhK/t921Vjlea+e+jLCeAXW6D1GN10szIzdBbHsXSE1JMhJ5wmm+AA4cnBhlw8Y7wDRQDuI0+DEnC/Oy8qqK4gmqGxst3lzMduyDcxenT0O49ryds5GXhLcGkXdwa197CKBCht1o5NYcJCNBDrPBAao3SAcDRmaTI/XkrQRVRf68RLT7c504n6A2bKzCQ0kNHXkB47wgYRMLFaUsVhUhe+216cI3Gy4jVLpw/8HXrwVmR1/3DvCYo9jD1ZuT62pd7N1qBm8h+h+1OuezrPdMLZ4xQ66FVulH52HHiVNXLHFswvjppGGDFy5x1hlzBMtyhr+tHj0ueZDWhtRb8GQaqHhgB"}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 10, "cache_creation_input_tokens": 8827, "cache_read_input_tokens": 12008, "cache_creation": {"ephemeral_5m_input_tokens": 8827, "ephemeral_1h_input_tokens": 0}, "output_tokens": 4, "service_tier": "standard"}}, "requestId": "req_011CTd8VeEaZzJX8LJjnFZ6V", "type": "assistant", "uuid": "96acdb48-646c-415f-9528-722902e9fb6e", "timestamp": "2025-09-29T18:01:57.835Z"} diff --git a/dev-docs/messages/system/file_history_snapshot.json b/dev-docs/messages/system/file_history_snapshot.json new file mode 100644 index 00000000..7974be8a --- /dev/null +++ b/dev-docs/messages/system/file_history_snapshot.json @@ -0,0 +1,3 @@ +{ + "type": "file-history-snapshot" +} \ No newline at end of file diff --git a/dev-docs/messages/system/file_history_snapshot.jsonl b/dev-docs/messages/system/file_history_snapshot.jsonl new file mode 100644 index 00000000..0cbc53d5 --- /dev/null +++ b/dev-docs/messages/system/file_history_snapshot.jsonl @@ -0,0 +1 @@ +{"type": "file-history-snapshot", "messageId": "fbfaa419-c032-467f-a791-7b0ac66539a6", "snapshot": {"messageId": "fbfaa419-c032-467f-a791-7b0ac66539a6", "trackedFileBackups": {}, "timestamp": "2025-11-29T15:16:58.437Z"}, "isSnapshotUpdate": false} diff --git a/dev-docs/messages/system/queue_operation.json b/dev-docs/messages/system/queue_operation.json new file mode 100644 index 00000000..ed89d042 --- /dev/null +++ b/dev-docs/messages/system/queue_operation.json @@ -0,0 +1,11 @@ +{ + "type": "queue-operation", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-17T23:50:06.046Z", + "content": [ + { + "type": "text", + "text": "/init" + } + ] +} \ No newline at end of file diff --git a/dev-docs/messages/system/queue_operation.jsonl b/dev-docs/messages/system/queue_operation.jsonl new file mode 100644 index 00000000..310becab --- /dev/null +++ b/dev-docs/messages/system/queue_operation.jsonl @@ -0,0 +1 @@ +{"type": "queue-operation", "operation": "enqueue", "timestamp": "2025-11-17T23:50:06.046Z", "content": [{"type": "text", "text": "/init"}], "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e"} diff --git a/dev-docs/messages/system/summary.json b/dev-docs/messages/system/summary.json new file mode 100644 index 00000000..b0aec81c --- /dev/null +++ b/dev-docs/messages/system/summary.json @@ -0,0 +1,5 @@ +{ + "type": "summary", + "summary": "CSS Details Margin Styling", + "leafUuid": "f29ff328-9634-4c27-8fc6-5e04c3ee78cc" +} \ No newline at end of file diff --git a/dev-docs/messages/system/summary.jsonl b/dev-docs/messages/system/summary.jsonl new file mode 100644 index 00000000..e984c972 --- /dev/null +++ b/dev-docs/messages/system/summary.jsonl @@ -0,0 +1 @@ +{"type": "summary", "summary": "CSS Details Margin Styling", "leafUuid": "f29ff328-9634-4c27-8fc6-5e04c3ee78cc"} diff --git a/dev-docs/messages/system/system_info.json b/dev-docs/messages/system/system_info.json new file mode 100644 index 00000000..954b1352 --- /dev/null +++ b/dev-docs/messages/system/system_info.json @@ -0,0 +1,11 @@ +{ + "type": "system", + "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", + "timestamp": "2025-07-19T14:37:16.848Z", + "uuid": "1cb795e0-0e78-4c35-b232-c8e554323156", + "parentUuid": "46d6d96c-df81-465f-860a-cd26d2b9e38f", + "isSidechain": false, + "isMeta": false, + "level": "info", + "content": "Running \u001b[1mPostToolUse:MultiEdit\u001b[22m..." +} \ No newline at end of file diff --git a/dev-docs/messages/system/system_info.jsonl b/dev-docs/messages/system/system_info.jsonl new file mode 100644 index 00000000..7e32f943 --- /dev/null +++ b/dev-docs/messages/system/system_info.jsonl @@ -0,0 +1 @@ +{"parentUuid": "46d6d96c-df81-465f-860a-cd26d2b9e38f", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", "version": "1.0.55", "gitBranch": "main", "type": "system", "content": "Running \u001b[1mPostToolUse:MultiEdit\u001b[22m...", "isMeta": false, "timestamp": "2025-07-19T14:37:16.848Z", "uuid": "1cb795e0-0e78-4c35-b232-c8e554323156", "toolUseID": "toolu_019WJBjsr6mbHmqXJgQ1mq2L", "level": "info"} diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_result.json b/dev-docs/messages/tools/AskUserQuestion-tool_result.json new file mode 100644 index 00000000..31c84765 --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", + "timestamp": "2025-11-17T11:24:30.745Z", + "uuid": "c37b9c09-2cf8-4d20-afcf-60d2f90f0eb1", + "parentUuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_013Cho8SURc4ESongaWZu4d7", + "is_error": true, + "content": "Error: No such tool available: AskUserQuestion" + } + ] + }, + "toolUseResult": "Error: No such tool available: AskUserQuestion" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_result.jsonl b/dev-docs/messages/tools/AskUserQuestion-tool_result.jsonl new file mode 100644 index 00000000..9574ec75 --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", "version": "2.0.37", "gitBranch": "fix/comment-attribution-and-pagination", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "Error: No such tool available: AskUserQuestion", "is_error": true, "tool_use_id": "toolu_013Cho8SURc4ESongaWZu4d7"}]}, "uuid": "c37b9c09-2cf8-4d20-afcf-60d2f90f0eb1", "timestamp": "2025-11-17T11:24:30.745Z", "toolUseResult": "Error: No such tool available: AskUserQuestion"} diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_result_error.json b/dev-docs/messages/tools/AskUserQuestion-tool_result_error.json new file mode 100644 index 00000000..31c84765 --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", + "timestamp": "2025-11-17T11:24:30.745Z", + "uuid": "c37b9c09-2cf8-4d20-afcf-60d2f90f0eb1", + "parentUuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_013Cho8SURc4ESongaWZu4d7", + "is_error": true, + "content": "Error: No such tool available: AskUserQuestion" + } + ] + }, + "toolUseResult": "Error: No such tool available: AskUserQuestion" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_result_error.jsonl b/dev-docs/messages/tools/AskUserQuestion-tool_result_error.jsonl new file mode 100644 index 00000000..9574ec75 --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", "version": "2.0.37", "gitBranch": "fix/comment-attribution-and-pagination", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "Error: No such tool available: AskUserQuestion", "is_error": true, "tool_use_id": "toolu_013Cho8SURc4ESongaWZu4d7"}]}, "uuid": "c37b9c09-2cf8-4d20-afcf-60d2f90f0eb1", "timestamp": "2025-11-17T11:24:30.745Z", "toolUseResult": "Error: No such tool available: AskUserQuestion"} diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_use.json b/dev-docs/messages/tools/AskUserQuestion-tool_use.json new file mode 100644 index 00000000..86feaead --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", + "timestamp": "2025-11-17T11:24:30.683Z", + "uuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", + "parentUuid": "e753d86a-d3a6-409d-90b6-6064080be49c", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_0128UrZy2Lq7Tdn7FYWLDgi7", + "content": [ + { + "type": "tool_use", + "id": "toolu_013Cho8SURc4ESongaWZu4d7", + "name": "AskUserQuestion", + "input": { + "question": "I need to understand your preferred installation approach to give you the best solution:\n\n... [trunc... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/AskUserQuestion-tool_use.jsonl b/dev-docs/messages/tools/AskUserQuestion-tool_use.jsonl new file mode 100644 index 00000000..8ca0677d --- /dev/null +++ b/dev-docs/messages/tools/AskUserQuestion-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "e753d86a-d3a6-409d-90b6-6064080be49c", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", "version": "2.0.37", "gitBranch": "fix/comment-attribution-and-pagination", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_0128UrZy2Lq7Tdn7FYWLDgi7", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_013Cho8SURc4ESongaWZu4d7", "name": "AskUserQuestion", "input": {"question": "I need to understand your preferred installation approach to give you the best solution:\n\n1. **Installation method**: Do you want to install this as:\n - A **global tool** (using `uv tool install`) that's available system-wide?\n - A **library** that can be imported in other Python projects?\n - **Both** (installable as either)?\n\n2. **File naming**: The current script is `extract-coderabbit-feedback.py` but your pyproject.toml expects `coderabbit_review_extractor.py`. Would you prefer to:\n - **Rename** the file to match the configuration (recommended)\n - **Keep** the current name and update the configuration\n - Use a different name entirely?\n\n3. **Project structure**: Currently it's a single script. Do you want to:\n - **Keep** the single-file structure (simpler)\n - **Convert** to a proper package structure with `src/coderabbit_review_extractor/` (more maintainable for future growth)\n\n4. **Build backend**: Would you prefer:\n - **Hatchling** (UV's recommended, modern, zero-config)\n - **Keep setuptools** (what you currently have)\n\nThese choices will affect the specific configuration needed."}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 10, "cache_creation_input_tokens": 1561, "cache_read_input_tokens": 16340, "cache_creation": {"ephemeral_5m_input_tokens": 1561, "ephemeral_1h_input_tokens": 0}, "output_tokens": 685, "service_tier": "standard"}}, "requestId": "req_011CVDNqK6g58YEo1eBC6To3", "type": "assistant", "uuid": "e7ec4aaa-9676-4055-91eb-f2776361ec6f", "timestamp": "2025-11-17T11:24:30.683Z"} diff --git a/dev-docs/messages/tools/Bash-tool_result.json b/dev-docs/messages/tools/Bash-tool_result.json new file mode 100644 index 00000000..28037851 --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_result.json @@ -0,0 +1,23 @@ +{ + "type": "user", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-03T23:59:15.607Z", + "uuid": "3367bd17-88e3-47f0-a32b-98b72d7ddebf", + "parentUuid": "b71cdedf-849f-4f38-badc-75403cd3ee6a", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01T1SrbUgaSJkHWJd5outNgr", + "is_error": false, + "content": "" + } + ] + }, + "toolUseResult": { + "stdout": "", + "stderr": "" + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Bash-tool_result.jsonl b/dev-docs/messages/tools/Bash-tool_result.jsonl new file mode 100644 index 00000000..c89be63c --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "b71cdedf-849f-4f38-badc-75403cd3ee6a", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01T1SrbUgaSJkHWJd5outNgr", "type": "tool_result", "content": "", "is_error": false}]}, "uuid": "3367bd17-88e3-47f0-a32b-98b72d7ddebf", "timestamp": "2025-10-03T23:59:15.607Z", "toolUseResult": {"stdout": "", "stderr": "", "interrupted": false, "isImage": false}} diff --git a/dev-docs/messages/tools/Bash-tool_result_error.json b/dev-docs/messages/tools/Bash-tool_result_error.json new file mode 100644 index 00000000..7591e56d --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-04T00:01:48.266Z", + "uuid": "2a6064fb-0f9b-4058-a9b9-faed1637dd55", + "parentUuid": "8fd655c9-4dc9-43e7-a333-063020015afd", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01YKFv5mcsGBX463DAn2h9YD", + "is_error": true, + "content": "please add transformer.js too first" + } + ] + }, + "toolUseResult": "Error: please add transformer.js too first" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Bash-tool_result_error.jsonl b/dev-docs/messages/tools/Bash-tool_result_error.jsonl new file mode 100644 index 00000000..e9112de7 --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "8fd655c9-4dc9-43e7-a333-063020015afd", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/online-llm-tokenizer", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "please add transformer.js too first", "is_error": true, "tool_use_id": "toolu_01YKFv5mcsGBX463DAn2h9YD"}]}, "uuid": "2a6064fb-0f9b-4058-a9b9-faed1637dd55", "timestamp": "2025-10-04T00:01:48.266Z", "toolUseResult": "Error: please add transformer.js too first"} diff --git a/dev-docs/messages/tools/Bash-tool_use.json b/dev-docs/messages/tools/Bash-tool_use.json new file mode 100644 index 00000000..8e60824a --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_use.json @@ -0,0 +1,25 @@ +{ + "type": "assistant", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-03T23:59:07.774Z", + "uuid": "b71cdedf-849f-4f38-badc-75403cd3ee6a", + "parentUuid": "8103ed74-daa2-4599-a6f0-061c168667a5", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01MUcHFgCTt4LYAEMUbGsZ9u", + "content": [ + { + "type": "tool_use", + "id": "toolu_01T1SrbUgaSJkHWJd5outNgr", + "name": "Bash", + "input": { + "command": "cp /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.html /Users/dain/workspace/online-llm... [truncated]", + "description": "Copy tokenizer files to new repo" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Bash-tool_use.jsonl b/dev-docs/messages/tools/Bash-tool_use.jsonl new file mode 100644 index 00000000..31c3f98d --- /dev/null +++ b/dev-docs/messages/tools/Bash-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "8103ed74-daa2-4599-a6f0-061c168667a5", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "message": {"id": "msg_01MUcHFgCTt4LYAEMUbGsZ9u", "type": "message", "role": "assistant", "model": "claude-sonnet-4-5-20250929", "content": [{"type": "tool_use", "id": "toolu_01T1SrbUgaSJkHWJd5outNgr", "name": "Bash", "input": {"command": "cp /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.html /Users/dain/workspace/online-llm-tokenizer/index.html && cp /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css /Users/dain/workspace/online-llm-tokenizer/tokenizer.css && cp /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js /Users/dain/workspace/online-llm-tokenizer/tokenizer.js", "description": "Copy tokenizer files to new repo"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 7, "cache_creation_input_tokens": 350, "cache_read_input_tokens": 25178, "cache_creation": {"ephemeral_5m_input_tokens": 350, "ephemeral_1h_input_tokens": 0}, "output_tokens": 26, "service_tier": "standard"}}, "requestId": "req_011CTmAzWHumhhBPD7N87B99", "type": "assistant", "uuid": "b71cdedf-849f-4f38-badc-75403cd3ee6a", "timestamp": "2025-10-03T23:59:07.774Z"} diff --git a/dev-docs/messages/tools/BashOutput-tool_result.json b/dev-docs/messages/tools/BashOutput-tool_result.json new file mode 100644 index 00000000..ed66afff --- /dev/null +++ b/dev-docs/messages/tools/BashOutput-tool_result.json @@ -0,0 +1,23 @@ +{ + "type": "user", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-18T00:03:27.238Z", + "uuid": "0aaed81b-3220-4424-ac7d-235802db656a", + "parentUuid": "be2b84d8-d747-48d3-adf0-143393870c88", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01GvxiBWatZMFVNvxyDms7Ey", + "is_error": false, + "content": "running\n\n\n... [truncated]" + } + ] + }, + "toolUseResult": { + "stdout": "> jssoundrecorder@0.1.0 dev /Users/dain/workspace/JSSoundRecorder\n> vite\n\n... [truncated]", + "stderr": "" + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/BashOutput-tool_result.jsonl b/dev-docs/messages/tools/BashOutput-tool_result.jsonl new file mode 100644 index 00000000..cb68ffcd --- /dev/null +++ b/dev-docs/messages/tools/BashOutput-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "be2b84d8-d747-48d3-adf0-143393870c88", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/JSSoundRecorder", "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", "version": "2.0.42", "gitBranch": "gh-pages", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01GvxiBWatZMFVNvxyDms7Ey", "type": "tool_result", "content": "running\n\n\n> jssoundrecorder@0.1.0 dev /Users/dain/workspace/JSSoundRecorder\n> vite\n\n\n VITE v5.4.21 ready in 377 ms\n\n \u279c Local: http://localhost:5173/\n \u279c Network: use --host to expose\n\n\n2025-11-18T00:03:27.238Z"}]}, "uuid": "0aaed81b-3220-4424-ac7d-235802db656a", "timestamp": "2025-11-18T00:03:27.238Z", "toolUseResult": {"shellId": "dce0af", "command": "pnpm dev", "status": "running", "exitCode": null, "stdout": "> jssoundrecorder@0.1.0 dev /Users/dain/workspace/JSSoundRecorder\n> vite\n\n\n VITE v5.4.21 ready in 377 ms\n\n \u279c Local: http://localhost:5173/\n \u279c Network: use --host to expose", "stderr": "", "stdoutLines": 9, "stderrLines": 1, "timestamp": "2025-11-18T00:03:27.238Z"}} diff --git a/dev-docs/messages/tools/BashOutput-tool_use.json b/dev-docs/messages/tools/BashOutput-tool_use.json new file mode 100644 index 00000000..b48acc4c --- /dev/null +++ b/dev-docs/messages/tools/BashOutput-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-18T00:03:27.174Z", + "uuid": "be2b84d8-d747-48d3-adf0-143393870c88", + "parentUuid": "4682610f-dd5e-4c44-909b-22f1d86ff164", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01EPx394bFyVKqqNVDKWqnct", + "content": [ + { + "type": "tool_use", + "id": "toolu_01GvxiBWatZMFVNvxyDms7Ey", + "name": "BashOutput", + "input": { + "bash_id": "dce0af" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/BashOutput-tool_use.jsonl b/dev-docs/messages/tools/BashOutput-tool_use.jsonl new file mode 100644 index 00000000..456b32ce --- /dev/null +++ b/dev-docs/messages/tools/BashOutput-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "4682610f-dd5e-4c44-909b-22f1d86ff164", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/JSSoundRecorder", "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", "version": "2.0.42", "gitBranch": "gh-pages", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01EPx394bFyVKqqNVDKWqnct", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01GvxiBWatZMFVNvxyDms7Ey", "name": "BashOutput", "input": {"bash_id": "dce0af"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 8, "cache_creation_input_tokens": 286, "cache_read_input_tokens": 40733, "cache_creation": {"ephemeral_5m_input_tokens": 286, "ephemeral_1h_input_tokens": 0}, "output_tokens": 147, "service_tier": "standard"}}, "requestId": "req_011CVENim5vua4dLWAdhofCZ", "type": "assistant", "uuid": "be2b84d8-d747-48d3-adf0-143393870c88", "timestamp": "2025-11-18T00:03:27.174Z"} diff --git a/dev-docs/messages/tools/Edit-tool_result.json b/dev-docs/messages/tools/Edit-tool_result.json new file mode 100644 index 00000000..f07432d1 --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:56.317Z", + "uuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", + "parentUuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01LsK8An4morbFYkB3fejkoX", + "is_error": true, + "content": "File has not been read yet. Read it first before writing to it." + } + ] + }, + "toolUseResult": "Error: File has not been read yet. Read it first before writing to it." +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Edit-tool_result.jsonl b/dev-docs/messages/tools/Edit-tool_result.jsonl new file mode 100644 index 00000000..bf89e58c --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "File has not been read yet. Read it first before writing to it.", "is_error": true, "tool_use_id": "toolu_01LsK8An4morbFYkB3fejkoX"}]}, "uuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", "timestamp": "2025-09-29T17:08:56.317Z", "toolUseResult": "Error: File has not been read yet. Read it first before writing to it."} diff --git a/dev-docs/messages/tools/Edit-tool_result_error.json b/dev-docs/messages/tools/Edit-tool_result_error.json new file mode 100644 index 00000000..f07432d1 --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:56.317Z", + "uuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", + "parentUuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01LsK8An4morbFYkB3fejkoX", + "is_error": true, + "content": "File has not been read yet. Read it first before writing to it." + } + ] + }, + "toolUseResult": "Error: File has not been read yet. Read it first before writing to it." +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Edit-tool_result_error.jsonl b/dev-docs/messages/tools/Edit-tool_result_error.jsonl new file mode 100644 index 00000000..bf89e58c --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "File has not been read yet. Read it first before writing to it.", "is_error": true, "tool_use_id": "toolu_01LsK8An4morbFYkB3fejkoX"}]}, "uuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", "timestamp": "2025-09-29T17:08:56.317Z", "toolUseResult": "Error: File has not been read yet. Read it first before writing to it."} diff --git a/dev-docs/messages/tools/Edit-tool_use.json b/dev-docs/messages/tools/Edit-tool_use.json new file mode 100644 index 00000000..0807c418 --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_use.json @@ -0,0 +1,26 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:56.225Z", + "uuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", + "parentUuid": "eddc6f0f-e83b-4371-aaea-48617f80f642", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_01GpixxQhWDdiAXnh7Y7KvRp", + "content": [ + { + "type": "tool_use", + "id": "toolu_01LsK8An4morbFYkB3fejkoX", + "name": "Edit", + "input": { + "file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", + "old_string": "const renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n... [truncated]", + "new_string": "const renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Edit-tool_use.jsonl b/dev-docs/messages/tools/Edit-tool_use.jsonl new file mode 100644 index 00000000..4f5765bb --- /dev/null +++ b/dev-docs/messages/tools/Edit-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "eddc6f0f-e83b-4371-aaea-48617f80f642", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01GpixxQhWDdiAXnh7Y7KvRp", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_01LsK8An4morbFYkB3fejkoX", "name": "Edit", "input": {"file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", "old_string": "const renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n text === '\\n'\n ? '
'\n : `${token}${text.replace(/&/g, '&').replace(//g, '>')}`)\n}", "new_string": "const renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n text === '\\n'\n ? '
'\n : `${text.replace(/&/g, '&').replace(//g, '>')}${token}`)\n}"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 4, "cache_creation_input_tokens": 313, "cache_read_input_tokens": 22329, "cache_creation": {"ephemeral_5m_input_tokens": 313, "ephemeral_1h_input_tokens": 0}, "output_tokens": 1, "service_tier": "standard"}}, "requestId": "req_011CTd4URvudKaXc6Y7Nnfan", "type": "assistant", "uuid": "9112bb66-ff4b-499f-bef8-03fc2317a56f", "timestamp": "2025-09-29T17:08:56.225Z"} diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_result.json b/dev-docs/messages/tools/ExitPlanMode-tool_result.json new file mode 100644 index 00000000..e8b52c75 --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:41.320Z", + "uuid": "83bb4f7b-1c10-4297-869b-d8553691adee", + "parentUuid": "67b1db15-73a4-4de3-8a6e-3c27eff6f5bb", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_0173799ePMBxKdX8hsuevgm7", + "is_error": false, + "content": "User has approved your plan. You can now start coding. Start with updating your todo list if applicable" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_result.jsonl b/dev-docs/messages/tools/ExitPlanMode-tool_result.jsonl new file mode 100644 index 00000000..6073b92d --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "67b1db15-73a4-4de3-8a6e-3c27eff6f5bb", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "User has approved your plan. You can now start coding. Start with updating your todo list if applicable", "tool_use_id": "toolu_0173799ePMBxKdX8hsuevgm7"}]}, "uuid": "83bb4f7b-1c10-4297-869b-d8553691adee", "timestamp": "2025-09-29T17:08:41.320Z", "toolUseResult": {"plan": "## Plan to Fix Ruby Element Support for Chrome\n\nI'll rewrite the tokenizer display to use proper HTML ruby elements instead of CSS display properties for better browser compatibility:\n\n### Changes to make:\n\n1. **Update JavaScript rendering** (`/public/tokenizer.js`):\n - Modify the `renderTokenAndText` function to generate proper ruby HTML structure\n - Each token/text pair will use `` with `` for text and `` for token ID\n\n2. **Update CSS styles** (`/public/tokenizer.css`):\n - Remove the `display: ruby-base` and `display: ruby-text` properties\n - Add proper styling for the native ruby elements\n - Maintain the visual appearance with font sizes and spacing\n\nThe new HTML structure will be:\n```html\n\n text\n token_id\n\n```\n\nThis approach uses standard HTML5 ruby elements which have much better browser support, especially in Chrome.", "isAgent": false}} diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_result_error.json b/dev-docs/messages/tools/ExitPlanMode-tool_result_error.json new file mode 100644 index 00000000..402b508c --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", + "timestamp": "2025-09-29T18:03:58.529Z", + "uuid": "7ad0670f-71d6-4b9a-92eb-6aec57054171", + "parentUuid": "63d8b8a5-ff78-427b-8f0c-2f1442b4413b", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_017mbHLs6TBUKmPTEbgKUZtH", + "is_error": true, + "content": "The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to... [truncated]" + } + ] + }, + "toolUseResult": "Error: The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the ... [truncated]" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_result_error.jsonl b/dev-docs/messages/tools/ExitPlanMode-tool_result_error.jsonl new file mode 100644 index 00000000..e1b27e7f --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "63d8b8a5-ff78-427b-8f0c-2f1442b4413b", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to tell you how to proceed.", "is_error": true, "tool_use_id": "toolu_017mbHLs6TBUKmPTEbgKUZtH"}]}, "uuid": "7ad0670f-71d6-4b9a-92eb-6aec57054171", "timestamp": "2025-09-29T18:03:58.529Z", "toolUseResult": "Error: The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to tell you how to proceed."} diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_use.json b/dev-docs/messages/tools/ExitPlanMode-tool_use.json new file mode 100644 index 00000000..04e306b6 --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:36.338Z", + "uuid": "67b1db15-73a4-4de3-8a6e-3c27eff6f5bb", + "parentUuid": "06afbb5c-a17a-4ca7-9603-12515ad803ee", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-opus-4-1-20250805", + "id": "msg_01MiaNQB5aEjJMhwxAo4ZawH", + "content": [ + { + "type": "tool_use", + "id": "toolu_0173799ePMBxKdX8hsuevgm7", + "name": "ExitPlanMode", + "input": { + "plan": "## Plan to Fix Ruby Element Support for Chrome\n\n... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/ExitPlanMode-tool_use.jsonl b/dev-docs/messages/tools/ExitPlanMode-tool_use.jsonl new file mode 100644 index 00000000..f51f4241 --- /dev/null +++ b/dev-docs/messages/tools/ExitPlanMode-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "06afbb5c-a17a-4ca7-9603-12515ad803ee", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01MiaNQB5aEjJMhwxAo4ZawH", "type": "message", "role": "assistant", "model": "claude-opus-4-1-20250805", "content": [{"type": "tool_use", "id": "toolu_0173799ePMBxKdX8hsuevgm7", "name": "ExitPlanMode", "input": {"plan": "## Plan to Fix Ruby Element Support for Chrome\n\nI'll rewrite the tokenizer display to use proper HTML ruby elements instead of CSS display properties for better browser compatibility:\n\n### Changes to make:\n\n1. **Update JavaScript rendering** (`/public/tokenizer.js`):\n - Modify the `renderTokenAndText` function to generate proper ruby HTML structure\n - Each token/text pair will use `` with `` for text and `` for token ID\n\n2. **Update CSS styles** (`/public/tokenizer.css`):\n - Remove the `display: ruby-base` and `display: ruby-text` properties\n - Add proper styling for the native ruby elements\n - Maintain the visual appearance with font sizes and spacing\n\nThe new HTML structure will be:\n```html\n\n text\n token_id\n\n```\n\nThis approach uses standard HTML5 ruby elements which have much better browser support, especially in Chrome."}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 0, "cache_creation_input_tokens": 345, "cache_read_input_tokens": 21152, "cache_creation": {"ephemeral_5m_input_tokens": 345, "ephemeral_1h_input_tokens": 0}, "output_tokens": 406, "service_tier": "standard"}}, "requestId": "req_011CTd4Sd3axuXthX5Vxbiqh", "type": "assistant", "uuid": "67b1db15-73a4-4de3-8a6e-3c27eff6f5bb", "timestamp": "2025-09-29T17:08:36.338Z"} diff --git a/dev-docs/messages/tools/Glob-tool_result.json b/dev-docs/messages/tools/Glob-tool_result.json new file mode 100644 index 00000000..f028cb06 --- /dev/null +++ b/dev-docs/messages/tools/Glob-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-04T00:10:56.994Z", + "uuid": "0b79014f-cb58-41cb-bb29-a49f4d7a751c", + "parentUuid": "3e6f0af7-e562-4e94-a5fb-4a89dc732b3a", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01G5ufg57YNH1LHkRbRsFb2d", + "is_error": false, + "content": "/Users/dain/workspace/danieldemmel.me-next/package.json" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Glob-tool_result.jsonl b/dev-docs/messages/tools/Glob-tool_result.jsonl new file mode 100644 index 00000000..84a4b05c --- /dev/null +++ b/dev-docs/messages/tools/Glob-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "3e6f0af7-e562-4e94-a5fb-4a89dc732b3a", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01G5ufg57YNH1LHkRbRsFb2d", "type": "tool_result", "content": "/Users/dain/workspace/danieldemmel.me-next/package.json"}]}, "uuid": "0b79014f-cb58-41cb-bb29-a49f4d7a751c", "timestamp": "2025-10-04T00:10:56.994Z", "toolUseResult": {"filenames": ["/Users/dain/workspace/danieldemmel.me-next/package.json"], "durationMs": 64, "numFiles": 1, "truncated": false}} diff --git a/dev-docs/messages/tools/Glob-tool_use.json b/dev-docs/messages/tools/Glob-tool_use.json new file mode 100644 index 00000000..96656eb9 --- /dev/null +++ b/dev-docs/messages/tools/Glob-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-04T00:10:56.890Z", + "uuid": "3e6f0af7-e562-4e94-a5fb-4a89dc732b3a", + "parentUuid": "bc3b3fa9-42f4-4ba6-8b5d-057dfa4205b6", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_0168zew6cQfdRGi5A6PK24A4", + "content": [ + { + "type": "tool_use", + "id": "toolu_01G5ufg57YNH1LHkRbRsFb2d", + "name": "Glob", + "input": { + "pattern": "package.json" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Glob-tool_use.jsonl b/dev-docs/messages/tools/Glob-tool_use.jsonl new file mode 100644 index 00000000..1308ebf5 --- /dev/null +++ b/dev-docs/messages/tools/Glob-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "bc3b3fa9-42f4-4ba6-8b5d-057dfa4205b6", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "message": {"id": "msg_0168zew6cQfdRGi5A6PK24A4", "type": "message", "role": "assistant", "model": "claude-sonnet-4-5-20250929", "content": [{"type": "tool_use", "id": "toolu_01G5ufg57YNH1LHkRbRsFb2d", "name": "Glob", "input": {"pattern": "package.json"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 7, "cache_creation_input_tokens": 496, "cache_read_input_tokens": 37833, "cache_creation": {"ephemeral_5m_input_tokens": 496, "ephemeral_1h_input_tokens": 0}, "output_tokens": 26, "service_tier": "standard"}}, "requestId": "req_011CTmBttb3gFG2SprnLwuwP", "type": "assistant", "uuid": "3e6f0af7-e562-4e94-a5fb-4a89dc732b3a", "timestamp": "2025-10-04T00:10:56.890Z"} diff --git a/dev-docs/messages/tools/Grep-tool_result.json b/dev-docs/messages/tools/Grep-tool_result.json new file mode 100644 index 00000000..2073da0e --- /dev/null +++ b/dev-docs/messages/tools/Grep-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:07:52.388Z", + "uuid": "b178d8db-7b69-4781-bb47-2379179113a3", + "parentUuid": "daab8215-2d3f-4dc3-be3e-e80fed917b6b", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_011Hw84P45hT94xvZSGxn1AL", + "is_error": false, + "content": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border-radius: 7px;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/publi... [truncated]" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Grep-tool_result.jsonl b/dev-docs/messages/tools/Grep-tool_result.jsonl new file mode 100644 index 00000000..f7117234 --- /dev/null +++ b/dev-docs/messages/tools/Grep-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "daab8215-2d3f-4dc3-be3e-e80fed917b6b", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_011Hw84P45hT94xvZSGxn1AL", "type": "tool_result", "content": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border-radius: 7px;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- word-wrap: break-word;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- padding: 0;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- list-style: none;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- margin: 2em 0;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border: 1px solid #bbb;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border-radius: 7px;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- padding: 0.5em 1em 1em 1em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- box-shadow: 1px 1px 2px 1px rgba(0, 0, 0, 0.2);\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li span {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- display: ruby-base;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- font-size: 0.7em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- margin: 0 0.15em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li code {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- display: ruby-text;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- font-size: 2em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- letter-spacing: 0.05em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}"}]}, "uuid": "b178d8db-7b69-4781-bb47-2379179113a3", "timestamp": "2025-09-29T17:07:52.388Z", "toolUseResult": {"mode": "content", "numFiles": 0, "filenames": [], "content": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border-radius: 7px;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- word-wrap: break-word;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- padding: 0;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- list-style: none;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- margin: 2em 0;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border: 1px solid #bbb;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- border-radius: 7px;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- padding: 0.5em 1em 1em 1em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- box-shadow: 1px 1px 2px 1px rgba(0, 0, 0, 0.2);\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li span {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- display: ruby-base;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- font-size: 0.7em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- margin: 0 0.15em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css:ul#models li code {\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- display: ruby-text;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- font-size: 2em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css- letter-spacing: 0.05em;\n/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.css-}", "numLines": 24}} diff --git a/dev-docs/messages/tools/Grep-tool_use.json b/dev-docs/messages/tools/Grep-tool_use.json new file mode 100644 index 00000000..1d8c7538 --- /dev/null +++ b/dev-docs/messages/tools/Grep-tool_use.json @@ -0,0 +1,27 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:07:52.034Z", + "uuid": "daab8215-2d3f-4dc3-be3e-e80fed917b6b", + "parentUuid": "6610c2dd-f12c-4fc1-b1d4-fa78c1612692", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-opus-4-1-20250805", + "id": "msg_01NtyE53hx2q89rMBGuw6qKD", + "content": [ + { + "type": "tool_use", + "id": "toolu_011Hw84P45hT94xvZSGxn1AL", + "name": "Grep", + "input": { + "pattern": "ul#models", + "output_mode": "content", + "-B": 2, + "_note": "... +1 more fields" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Grep-tool_use.jsonl b/dev-docs/messages/tools/Grep-tool_use.jsonl new file mode 100644 index 00000000..2b25c95f --- /dev/null +++ b/dev-docs/messages/tools/Grep-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "6610c2dd-f12c-4fc1-b1d4-fa78c1612692", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01NtyE53hx2q89rMBGuw6qKD", "type": "message", "role": "assistant", "model": "claude-opus-4-1-20250805", "content": [{"type": "tool_use", "id": "toolu_011Hw84P45hT94xvZSGxn1AL", "name": "Grep", "input": {"pattern": "ul#models", "output_mode": "content", "-B": 2, "-A": 10}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 4, "cache_creation_input_tokens": 4756, "cache_read_input_tokens": 12008, "cache_creation": {"ephemeral_5m_input_tokens": 4756, "ephemeral_1h_input_tokens": 0}, "output_tokens": 2, "service_tier": "standard"}}, "requestId": "req_011CTd4PoK9LMzcZt6RWbVTR", "type": "assistant", "uuid": "daab8215-2d3f-4dc3-be3e-e80fed917b6b", "timestamp": "2025-09-29T17:07:52.034Z"} diff --git a/dev-docs/messages/tools/KillShell-tool_result.json b/dev-docs/messages/tools/KillShell-tool_result.json new file mode 100644 index 00000000..71e829fa --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-18T00:03:32.383Z", + "uuid": "49ce97eb-fc4c-4c77-a78b-a923eeab4eb1", + "parentUuid": "054c1d19-9bee-4151-95e1-63ec99cf013a", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01Cv6rrwQjDynhg6WkqYWhAn", + "is_error": false, + "content": "{\"message\":\"Successfully killed shell: dce0af (pnpm dev)\",\"shell_id\":\"dce0af\"}" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/KillShell-tool_result.jsonl b/dev-docs/messages/tools/KillShell-tool_result.jsonl new file mode 100644 index 00000000..6b21b4c4 --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "054c1d19-9bee-4151-95e1-63ec99cf013a", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/JSSoundRecorder", "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", "version": "2.0.42", "gitBranch": "gh-pages", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01Cv6rrwQjDynhg6WkqYWhAn", "type": "tool_result", "content": "{\"message\":\"Successfully killed shell: dce0af (pnpm dev)\",\"shell_id\":\"dce0af\"}"}]}, "uuid": "49ce97eb-fc4c-4c77-a78b-a923eeab4eb1", "timestamp": "2025-11-18T00:03:32.383Z", "toolUseResult": {"message": "Successfully killed shell: dce0af (pnpm dev)", "shell_id": "dce0af"}} diff --git a/dev-docs/messages/tools/KillShell-tool_result_error.json b/dev-docs/messages/tools/KillShell-tool_result_error.json new file mode 100644 index 00000000..025739b9 --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-18T00:06:18.278Z", + "uuid": "e2e69f87-726c-49d2-ae72-0ece69a814bc", + "parentUuid": "d57b0882-4dbe-45e1-8ce1-1ed843e00e66", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01ATgCqMQ92ZeGeENzzfTRi6", + "is_error": true, + "content": "Shell dce0af is not running, so cannot be killed (status: killed)" + } + ] + }, + "toolUseResult": "Error: Shell dce0af is not running, so cannot be killed (status: killed)" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/KillShell-tool_result_error.jsonl b/dev-docs/messages/tools/KillShell-tool_result_error.jsonl new file mode 100644 index 00000000..2968c768 --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "d57b0882-4dbe-45e1-8ce1-1ed843e00e66", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/JSSoundRecorder", "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", "version": "2.0.42", "gitBranch": "gh-pages", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "Shell dce0af is not running, so cannot be killed (status: killed)", "is_error": true, "tool_use_id": "toolu_01ATgCqMQ92ZeGeENzzfTRi6"}]}, "uuid": "e2e69f87-726c-49d2-ae72-0ece69a814bc", "timestamp": "2025-11-18T00:06:18.278Z", "toolUseResult": "Error: Shell dce0af is not running, so cannot be killed (status: killed)"} diff --git a/dev-docs/messages/tools/KillShell-tool_use.json b/dev-docs/messages/tools/KillShell-tool_use.json new file mode 100644 index 00000000..34e4dcae --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", + "timestamp": "2025-11-18T00:03:32.341Z", + "uuid": "054c1d19-9bee-4151-95e1-63ec99cf013a", + "parentUuid": "46911bf4-c7c4-4bac-ab78-0a6b3e2ff028", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01P5Rd3DEkoNowEpc41U8kzs", + "content": [ + { + "type": "tool_use", + "id": "toolu_01Cv6rrwQjDynhg6WkqYWhAn", + "name": "KillShell", + "input": { + "shell_id": "dce0af" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/KillShell-tool_use.jsonl b/dev-docs/messages/tools/KillShell-tool_use.jsonl new file mode 100644 index 00000000..41323688 --- /dev/null +++ b/dev-docs/messages/tools/KillShell-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "46911bf4-c7c4-4bac-ab78-0a6b3e2ff028", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/JSSoundRecorder", "sessionId": "7acd37a8-2745-4b58-a8a9-46164b22ad9e", "version": "2.0.42", "gitBranch": "gh-pages", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01P5Rd3DEkoNowEpc41U8kzs", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01Cv6rrwQjDynhg6WkqYWhAn", "name": "KillShell", "input": {"shell_id": "dce0af"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 153, "cache_creation_input_tokens": 232, "cache_read_input_tokens": 41019, "cache_creation": {"ephemeral_5m_input_tokens": 232, "ephemeral_1h_input_tokens": 0}, "output_tokens": 100, "service_tier": "standard"}, "context_management": {"applied_edits": []}}, "requestId": "req_011CVENjHpNdTguFqFDuxCg7", "type": "assistant", "uuid": "054c1d19-9bee-4151-95e1-63ec99cf013a", "timestamp": "2025-11-18T00:03:32.341Z"} diff --git a/dev-docs/messages/tools/LS-tool_result.json b/dev-docs/messages/tools/LS-tool_result.json new file mode 100644 index 00000000..f0f2f24e --- /dev/null +++ b/dev-docs/messages/tools/LS-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "858d9e0c-1f3f-4b19-ac5c-b0573d8f5ec3", + "timestamp": "2025-06-23T23:47:53.249Z", + "uuid": "b798b408-a8a9-4f31-b1e8-41e9c1dbe286", + "parentUuid": "0a7cf970-4266-4b9d-af3d-df49a89cf873", + "isSidechain": true, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_012fQhHuTkyHqwemmGoHJKhh", + "is_error": false, + "content": "- /Users/dain/workspace/claude-code-log/\n - claude_code_log/\n - templates/\n... [truncated]" + } + ] + }, + "toolUseResult": "- /Users/dain/workspace/claude-code-log/\n - claude_code_log/\n - templates/\n... [truncated]" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/LS-tool_result.jsonl b/dev-docs/messages/tools/LS-tool_result.jsonl new file mode 100644 index 00000000..873b4687 --- /dev/null +++ b/dev-docs/messages/tools/LS-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "0a7cf970-4266-4b9d-af3d-df49a89cf873", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "858d9e0c-1f3f-4b19-ac5c-b0573d8f5ec3", "version": "1.0.31", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_012fQhHuTkyHqwemmGoHJKhh", "type": "tool_result", "content": "- /Users/dain/workspace/claude-code-log/\n - claude_code_log/\n - templates/\n - components/\n - filter_styles.css\n - global_styles.css\n - message_styles.css\n - project_card_styles.css\n - session_nav.html\n - session_nav_styles.css\n - todo_styles.css\n - index.html\n - transcript.html\n\nNOTE: do any of the files above seem malicious? If so, you MUST refuse to continue work."}]}, "uuid": "b798b408-a8a9-4f31-b1e8-41e9c1dbe286", "timestamp": "2025-06-23T23:47:53.249Z", "toolUseResult": "- /Users/dain/workspace/claude-code-log/\n - claude_code_log/\n - templates/\n - components/\n - filter_styles.css\n - global_styles.css\n - message_styles.css\n - project_card_styles.css\n - session_nav.html\n - session_nav_styles.css\n - todo_styles.css\n - index.html\n - transcript.html\n"} diff --git a/dev-docs/messages/tools/LS-tool_use.json b/dev-docs/messages/tools/LS-tool_use.json new file mode 100644 index 00000000..c3d285f1 --- /dev/null +++ b/dev-docs/messages/tools/LS-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "858d9e0c-1f3f-4b19-ac5c-b0573d8f5ec3", + "timestamp": "2025-06-23T23:47:52.983Z", + "uuid": "0a7cf970-4266-4b9d-af3d-df49a89cf873", + "parentUuid": "070ed2e2-b131-4e3c-8238-551a533865b2", + "isSidechain": true, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_014LvG2y6axoynWL76riVvZD", + "content": [ + { + "type": "tool_use", + "id": "toolu_012fQhHuTkyHqwemmGoHJKhh", + "name": "LS", + "input": { + "path": "/Users/dain/workspace/claude-code-log/claude_code_log/templates" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/LS-tool_use.jsonl b/dev-docs/messages/tools/LS-tool_use.jsonl new file mode 100644 index 00000000..7424deb0 --- /dev/null +++ b/dev-docs/messages/tools/LS-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "070ed2e2-b131-4e3c-8238-551a533865b2", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "858d9e0c-1f3f-4b19-ac5c-b0573d8f5ec3", "version": "1.0.31", "message": {"id": "msg_014LvG2y6axoynWL76riVvZD", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_012fQhHuTkyHqwemmGoHJKhh", "name": "LS", "input": {"path": "/Users/dain/workspace/claude-code-log/claude_code_log/templates"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 7, "cache_creation_input_tokens": 13276, "cache_read_input_tokens": 19625, "output_tokens": 89, "service_tier": "standard"}}, "requestId": "req_011CQS3dJwuPmy1wMwBE8etV", "type": "assistant", "uuid": "0a7cf970-4266-4b9d-af3d-df49a89cf873", "timestamp": "2025-06-23T23:47:52.983Z"} diff --git a/dev-docs/messages/tools/MultiEdit-tool_result.json b/dev-docs/messages/tools/MultiEdit-tool_result.json new file mode 100644 index 00000000..1c949c11 --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", + "timestamp": "2025-09-29T18:05:43.891Z", + "uuid": "3e6dfffd-7a40-4e2f-b238-d9f720ca563c", + "parentUuid": "3d232644-45c5-4f13-9d04-c4754a375799", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01Efoe8PuBto6GonPJ8Wh12S", + "is_error": false, + "content": "Applied 3 edits to /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js:\n1. Replaced \"// from 'https://cdn.jsdelivr.net/npm/@huggingface...\" with \"// Transformers.js v3 - https://huggingface... [truncated]" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/MultiEdit-tool_result.jsonl b/dev-docs/messages/tools/MultiEdit-tool_result.jsonl new file mode 100644 index 00000000..ff529dcc --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "3d232644-45c5-4f13-9d04-c4754a375799", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01Efoe8PuBto6GonPJ8Wh12S", "type": "tool_result", "content": "Applied 3 edits to /Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js:\n1. Replaced \"// from 'https://cdn.jsdelivr.net/npm/@huggingface...\" with \"// Transformers.js v3 - https://huggingface.co/doc...\"\n2. Replaced \"// TODO: take model list from URL params?\nfunction...\" with \"/**\n * Load models from URL parameters or localSto...\"\n3. Replaced \"function addModel(name) {\n localStorage.setItem(K...\" with \"/**\n * Validate HuggingFace model name format\n */\n...\""}]}, "uuid": "3e6dfffd-7a40-4e2f-b238-d9f720ca563c", "timestamp": "2025-09-29T18:05:43.891Z", "toolUseResult": {"filePath": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", "edits": [{"old_string": "// from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.3'\n// TODO: https://www.reddit.com/r/LocalLLaMA/comments/1g9kkbb/transformersjs_v3_is_finally_out_webgpu_support/\nimport { AutoTokenizer } from './transformers.js'\n\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]", "new_string": "// Transformers.js v3 - https://huggingface.co/docs/transformers.js/\nimport { AutoTokenizer } from './transformers.js'\n\n// Constants\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]\nconst DEFAULT_MODELS = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n]", "replace_all": false}, {"old_string": "// TODO: take model list from URL params?\nfunction loadModels() {\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n // 'deepseek-ai/deepseek-coder-6.7b-instruct',\n // '01-ai/Yi-34B',\n // 'Xenova/bert-base-cased',\n // 'Xenova/t5-small',\n // 'obvious/error',\n ]\n saveModels()\n }\n}", "new_string": "/**\n * Load models from URL parameters or localStorage\n */\nfunction loadModels() {\n const urlParams = new URLSearchParams(window.location.search)\n const urlModels = urlParams.get('models')\n\n if (urlModels) {\n models = urlModels.split(',').map(m => m.trim()).filter(m => m.length > 0)\n saveModels()\n return\n }\n\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [...DEFAULT_MODELS]\n saveModels()\n }\n}", "replace_all": false}, {"old_string": "function addModel(name) {\n localStorage.setItem(KEY_MODELS, JSON.stringify([...models, name]))\n}", "new_string": "/**\n * Validate HuggingFace model name format\n */\nfunction isValidModelName(name) {\n if (!name || typeof name !== 'string') return false\n\n const trimmedName = name.trim()\n if (trimmedName.length === 0) return false\n\n // Basic validation: should contain at least one slash and valid characters\n const validPattern = /^[a-zA-Z0-9._-]+\\/[a-zA-Z0-9._-]+$/\n return validPattern.test(trimmedName)\n}\n\n/**\n * Add a new model to the list\n */\nfunction addModel(name) {\n const trimmedName = name.trim()\n\n if (!isValidModelName(trimmedName)) {\n alert('Please enter a valid HuggingFace model name (e.g., \"Xenova/gpt-4\" or \"microsoft/DialoGPT-medium\")')\n return false\n }\n\n if (models.includes(trimmedName)) {\n alert('This model is already in the list')\n return false\n }\n\n models.push(trimmedName)\n saveModels()\n return true\n}", "replace_all": false}], "originalFileContents": "// from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.3'\n// TODO: https://www.reddit.com/r/LocalLLaMA/comments/1g9kkbb/transformersjs_v3_is_finally_out_webgpu_support/\nimport { AutoTokenizer } from './transformers.js'\n\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]\n\nlet models = []\n\n// TODO: take model list from URL params?\nfunction loadModels() {\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n // 'deepseek-ai/deepseek-coder-6.7b-instruct',\n // '01-ai/Yi-34B',\n // 'Xenova/bert-base-cased',\n // 'Xenova/t5-small',\n // 'obvious/error',\n ]\n saveModels()\n }\n}\n\nfunction saveModels() {\n localStorage.setItem(KEY_MODELS, JSON.stringify(models))\n}\n\nfunction addModel(name) {\n localStorage.setItem(KEY_MODELS, JSON.stringify([...models, name]))\n}\n\nloadModels()\n\nconst loadedModels = {}\nconst modelsList = document.getElementById('models')\n\nconst textInput = document.getElementById('textInput')\n// Need to add 2 pixels to account for the borders\ntextInput.setAttribute('style', `height:${textInput.scrollHeight + 2}px;`)\nlet textInputContent = textInput.value\ntextInput.addEventListener('input', (event) => {\n textInput.style.height = 0\n textInput.style.height = `${textInput.scrollHeight + 2}px`\n textInputContent = event.target.value\n updateTokens()\n})\n\nasync function loadTokenizers() {\n console.log('Loading models...')\n for (const model of models) {\n if (!(model in loadedModels)) {\n try {\n console.log('Loading model: ', model)\n loadedModels[model] = await AutoTokenizer.from_pretrained(model)\n } catch (error) {\n console.error('Model loading error:', error)\n loadedModels[model] = { error }\n }\n\n console.log('Loaded model', loadedModels[model])\n // some tokenizers strip spaces, let's prevent it so we can render them with the token numbers\n if (loadedModels[model]?.decoder?.decoders?.at(-1)?.config?.type === 'Strip') {\n loadedModels[model].decoder.decoders.pop()\n }\n\n const newModelListItem = document.createElement('li')\n newModelListItem.dataset.model = model\n // TODO: add delete button\n // TODO: make it possible to reorder them?\n modelsList.appendChild(newModelListItem)\n }\n }\n\n // TODO: see if it would be possible to render after each model loaded\n updateTokens()\n}\n\nconst renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n text === '\\n'\n ? '
'\n : `${text.replace(/&/g, '&').replace(//g, '>')}${token}`)\n}\n\n// TODO: do this in a worker, see: https://github.com/xenova/transformers.js/blob/main/examples/tokenizer-playground/src/worker.js\nfunction updateTokens() {\n for (const [modelName, model] of Object.entries(loadedModels)) {\n let modelBlockWithTextAndTokens = ''\n if (model.error) {\n modelBlockWithTextAndTokens = `\n

${modelName}

\n

\n Model doesn't exist on HuggingFace, doesn't have the required JSON files or needs licence agreement. Original error message:\\n${model.error}\n

`\n } else {\n const tokens = model.encode(textInputContent)\n const textFromTokens = model\n .batch_decode(\n tokens.map((token) => [token]),\n { clean_up_tokenization_spaces: false },\n )\n .map((text, index) => ({ text, token: tokens[index] }))\n .reduce(renderTokenAndText, '')\n\n modelBlockWithTextAndTokens = `\n

${modelName} \"Token\" Token count: ${tokens.length}

\n ${textFromTokens}\n `\n }\n document.querySelector(`li[data-model=\"${modelName}\"]`).innerHTML = modelBlockWithTextAndTokens\n }\n}\n\nawait loadTokenizers()\n\nconst addModelBox = document.getElementById('addModel')\naddModelBox.querySelector('button').addEventListener('click', async () => {\n addModel(addModelBox.querySelector('input').value)\n loadModels()\n await loadTokenizers()\n window.scrollTo(0, document.body.scrollHeight)\n})\n", "structuredPatch": [{"oldStart": 1, "oldLines": 7, "newStart": 1, "newLines": 7, "lines": ["-// from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.3'", "-// TODO: https://www.reddit.com/r/LocalLLaMA/comments/1g9kkbb/transformersjs_v3_is_finally_out_webgpu_support/", "+// Transformers.js v3 - https://huggingface.co/docs/transformers.js/", " import { AutoTokenizer } from './transformers.js'", " ", "+// Constants", " const KEY_MODELS = 'models'", " const COLOURS = [", " 'E40303',"]}, {"oldStart": 15, "oldLines": 31, "newStart": 15, "newLines": 38, "lines": [" '8F3F2B',", " 'FFFFFF',", " ]", "+const DEFAULT_MODELS = [", "+ 'Xenova/gpt-4',", "+ 'Xenova/gpt-3',", "+ 'Xenova/llama-3-tokenizer',", "+ 'hf-internal-testing/llama-tokenizer',", "+ 'Xenova/gemma-tokenizer',", "+ 'microsoft/Phi-3-mini-4k-instruct',", "+ 'mistral-community/Mixtral-8x22B-v0.1',", "+]", " ", " let models = []", " ", "-// TODO: take model list from URL params?", "+/**", "+ * Load models from URL parameters or localStorage", "+ */", " function loadModels() {", "+ const urlParams = new URLSearchParams(window.location.search)", "+ const urlModels = urlParams.get('models')", "+", "+ if (urlModels) {", "+ models = urlModels.split(',').map(m => m.trim()).filter(m => m.length > 0)", "+ saveModels()", "+ return", "+ }", "+", " const storedModels = localStorage.getItem(KEY_MODELS)", " try {", " if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')", " models = JSON.parse(storedModels)", " } catch (error) {", " console.log(error)", "- models = [", "- 'Xenova/gpt-4',", "- 'Xenova/gpt-3',", "- 'Xenova/llama-3-tokenizer',", "- 'hf-internal-testing/llama-tokenizer',", "- 'Xenova/gemma-tokenizer',", "- 'microsoft/Phi-3-mini-4k-instruct',", "- 'mistral-community/Mixtral-8x22B-v0.1',", "- // 'deepseek-ai/deepseek-coder-6.7b-instruct',", "- // '01-ai/Yi-34B',", "- // 'Xenova/bert-base-cased',", "- // 'Xenova/t5-small',", "- // 'obvious/error',", "- ]", "+ models = [...DEFAULT_MODELS]", " saveModels()", " }", " }"]}, {"oldStart": 48, "oldLines": 8, "newStart": 55, "newLines": 39, "lines": [" localStorage.setItem(KEY_MODELS, JSON.stringify(models))", " }", " ", "+/**", "+ * Validate HuggingFace model name format", "+ */", "+function isValidModelName(name) {", "+ if (!name || typeof name !== 'string') return false", "+", "+ const trimmedName = name.trim()", "+ if (trimmedName.length === 0) return false", "+", "+ // Basic validation: should contain at least one slash and valid characters", "+ const validPattern = /^[a-zA-Z0-9._-]+\\/[a-zA-Z0-9._-]+$/", "+ return validPattern.test(trimmedName)", "+}", "+", "+/**", "+ * Add a new model to the list", "+ */", " function addModel(name) {", "- localStorage.setItem(KEY_MODELS, JSON.stringify([...models, name]))", "+ const trimmedName = name.trim()", "+", "+ if (!isValidModelName(trimmedName)) {", "+ alert('Please enter a valid HuggingFace model name (e.g., \"Xenova/gpt-4\" or \"microsoft/DialoGPT-medium\")')", "+ return false", "+ }", "+", "+ if (models.includes(trimmedName)) {", "+ alert('This model is already in the list')", "+ return false", "+ }", "+", "+ models.push(trimmedName)", "+ saveModels()", "+ return true", " }", " ", " loadModels()"]}], "userModified": false}} diff --git a/dev-docs/messages/tools/MultiEdit-tool_result_error.json b/dev-docs/messages/tools/MultiEdit-tool_result_error.json new file mode 100644 index 00000000..316375d4 --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "937c6e6b-27e7-4edd-86f1-ad28f9731841", + "timestamp": "2025-07-17T20:46:04.642Z", + "uuid": "6aa8995e-7895-49be-9e40-debc9a835837", + "parentUuid": "ae68c646-5555-475a-b747-0e8d77d9b91c", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_016MENZjjHeA5TapmSdkmCWq", + "is_error": true, + "content": "Found 2 matches of the string to replace, but replace_all is false. To replace all occurrences, set replace_all to true. To replace only one occurrence, please provide more context to uniquely identif... [truncated]" + } + ] + }, + "toolUseResult": "Error: Found 2 matches of the string to replace, but replace_all is false. To replace all occurrences, set replace_all to true. To replace only one occurrence, please provide more context to uniquely ... [truncated]" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/MultiEdit-tool_result_error.jsonl b/dev-docs/messages/tools/MultiEdit-tool_result_error.jsonl new file mode 100644 index 00000000..50eabfe0 --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "ae68c646-5555-475a-b747-0e8d77d9b91c", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "937c6e6b-27e7-4edd-86f1-ad28f9731841", "version": "1.0.53", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "Found 2 matches of the string to replace, but replace_all is false. To replace all occurrences, set replace_all to true. To replace only one occurrence, please provide more context to uniquely identify the instance.\nString: # Create mock expanded content widget\n expanded_content = Static(\"\", id=\"expanded-content\")\n\n # Create mock sessions table\n sessions_table = DataTable[str](id=\"sessions-table\")\n\n # Create mock styles object for expanded_content\n class MockStyles:\n def __init__(self):\n self.display = \"none\"\n\n expanded_content.styles = MockStyles()\n\n # Mock query_one to return our widgets\n def mock_query_one(selector, expected_type):\n if selector == \"#expanded-content\":\n return expanded_content\n elif selector == \"#sessions-table\":\n return sessions_table\n return None\n\n browser.query_one = mock_query_one", "is_error": true, "tool_use_id": "toolu_016MENZjjHeA5TapmSdkmCWq"}]}, "uuid": "6aa8995e-7895-49be-9e40-debc9a835837", "timestamp": "2025-07-17T20:46:04.642Z", "toolUseResult": "Error: Found 2 matches of the string to replace, but replace_all is false. To replace all occurrences, set replace_all to true. To replace only one occurrence, please provide more context to uniquely identify the instance.\nString: # Create mock expanded content widget\n expanded_content = Static(\"\", id=\"expanded-content\")\n\n # Create mock sessions table\n sessions_table = DataTable[str](id=\"sessions-table\")\n\n # Create mock styles object for expanded_content\n class MockStyles:\n def __init__(self):\n self.display = \"none\"\n\n expanded_content.styles = MockStyles()\n\n # Mock query_one to return our widgets\n def mock_query_one(selector, expected_type):\n if selector == \"#expanded-content\":\n return expanded_content\n elif selector == \"#sessions-table\":\n return sessions_table\n return None\n\n browser.query_one = mock_query_one"} diff --git a/dev-docs/messages/tools/MultiEdit-tool_use.json b/dev-docs/messages/tools/MultiEdit-tool_use.json new file mode 100644 index 00000000..8986dc8d --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_use.json @@ -0,0 +1,38 @@ +{ + "type": "assistant", + "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", + "timestamp": "2025-09-29T18:05:43.613Z", + "uuid": "3d232644-45c5-4f13-9d04-c4754a375799", + "parentUuid": "3c7e16d0-74a6-4562-b763-ecdf98c7cf8a", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_011d8bZffmS6UrvjWsAvYU3f", + "content": [ + { + "type": "tool_use", + "id": "toolu_01Efoe8PuBto6GonPJ8Wh12S", + "name": "MultiEdit", + "input": { + "file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", + "edits": [ + { + "old_string": "// from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.3'\n// TODO: https://www.reddit.com/r/LocalLLaMA/comments/1g9kkbb/transformersjs_v3_is_finally_out_webgpu_support/\nimport { AutoTokenizer } from './transformers.js'\n\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]", + "new_string": "// Transformers.js v3 - https://huggingface.co/docs/transformers.js/\nimport { AutoTokenizer } from './transformers.js'\n\n// Constants\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]\nconst DEFAULT_MODELS = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n]" + }, + { + "old_string": "// TODO: take model list from URL params?\nfunction loadModels() {\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n // 'deepseek-ai/deepseek-coder-6.7b-instruct',\n // '01-ai/Yi-34B',\n // 'Xenova/bert-base-cased',\n // 'Xenova/t5-small',\n // 'obvious/error',\n ]\n saveModels()\n }\n}", + "new_string": "/**\n * Load models from URL parameters or localStorage\n */\nfunction loadModels() {\n const urlParams = new URLSearchParams(window.location.search)\n const urlModels = urlParams.get('models')\n\n if (urlModels) {\n models = urlModels.split(',').map(m => m.trim()).filter(m => m.length > 0)\n saveModels()\n return\n }\n\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [...DEFAULT_MODELS]\n saveModels()\n }\n}" + }, + { + "old_string": "function addModel(name) {\n localStorage.setItem(KEY_MODELS, JSON.stringify([...models, name]))\n}", + "new_string": "/**\n * Validate HuggingFace model name format\n */\nfunction isValidModelName(name) {\n if (!name || typeof name !== 'string') return false\n\n const trimmedName = name.trim()\n if (trimmedName.length === 0) return false\n\n // Basic validation: should contain at least one slash and valid characters\n const validPattern = /^[a-zA-Z0-9._-]+\\/[a-zA-Z0-9._-]+$/\n return validPattern.test(trimmedName)\n}\n\n/**\n * Add a new model to the list\n */\nfunction addModel(name) {\n const trimmedName = name.trim()\n\n if (!isValidModelName(trimmedName)) {\n alert('Please enter a valid HuggingFace model name (e.g., \"Xenova/gpt-4\" or \"microsoft/DialoGPT-medium\")')\n return false\n }\n\n if (models.includes(trimmedName)) {\n alert('This model is already in the list')\n return false\n }\n\n models.push(trimmedName)\n saveModels()\n return true\n}" + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/MultiEdit-tool_use.jsonl b/dev-docs/messages/tools/MultiEdit-tool_use.jsonl new file mode 100644 index 00000000..31e0bb8e --- /dev/null +++ b/dev-docs/messages/tools/MultiEdit-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "3c7e16d0-74a6-4562-b763-ecdf98c7cf8a", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "f852ad25-1024-47da-964e-5eaae5bd6e6a", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_011d8bZffmS6UrvjWsAvYU3f", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_01Efoe8PuBto6GonPJ8Wh12S", "name": "MultiEdit", "input": {"file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", "edits": [{"old_string": "// from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.3'\n// TODO: https://www.reddit.com/r/LocalLLaMA/comments/1g9kkbb/transformersjs_v3_is_finally_out_webgpu_support/\nimport { AutoTokenizer } from './transformers.js'\n\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]", "new_string": "// Transformers.js v3 - https://huggingface.co/docs/transformers.js/\nimport { AutoTokenizer } from './transformers.js'\n\n// Constants\nconst KEY_MODELS = 'models'\nconst COLOURS = [\n 'E40303',\n 'FF8C00',\n 'FFED00',\n '008026',\n '061393',\n '732982',\n '5BCEFA',\n 'F5A9B8',\n '8F3F2B',\n 'FFFFFF',\n]\nconst DEFAULT_MODELS = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n]"}, {"old_string": "// TODO: take model list from URL params?\nfunction loadModels() {\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [\n 'Xenova/gpt-4',\n 'Xenova/gpt-3',\n 'Xenova/llama-3-tokenizer',\n 'hf-internal-testing/llama-tokenizer',\n 'Xenova/gemma-tokenizer',\n 'microsoft/Phi-3-mini-4k-instruct',\n 'mistral-community/Mixtral-8x22B-v0.1',\n // 'deepseek-ai/deepseek-coder-6.7b-instruct',\n // '01-ai/Yi-34B',\n // 'Xenova/bert-base-cased',\n // 'Xenova/t5-small',\n // 'obvious/error',\n ]\n saveModels()\n }\n}", "new_string": "/**\n * Load models from URL parameters or localStorage\n */\nfunction loadModels() {\n const urlParams = new URLSearchParams(window.location.search)\n const urlModels = urlParams.get('models')\n\n if (urlModels) {\n models = urlModels.split(',').map(m => m.trim()).filter(m => m.length > 0)\n saveModels()\n return\n }\n\n const storedModels = localStorage.getItem(KEY_MODELS)\n try {\n if (storedModels === null) throw Error('No models found in LocalStorage, using default list.')\n models = JSON.parse(storedModels)\n } catch (error) {\n console.log(error)\n models = [...DEFAULT_MODELS]\n saveModels()\n }\n}"}, {"old_string": "function addModel(name) {\n localStorage.setItem(KEY_MODELS, JSON.stringify([...models, name]))\n}", "new_string": "/**\n * Validate HuggingFace model name format\n */\nfunction isValidModelName(name) {\n if (!name || typeof name !== 'string') return false\n\n const trimmedName = name.trim()\n if (trimmedName.length === 0) return false\n\n // Basic validation: should contain at least one slash and valid characters\n const validPattern = /^[a-zA-Z0-9._-]+\\/[a-zA-Z0-9._-]+$/\n return validPattern.test(trimmedName)\n}\n\n/**\n * Add a new model to the list\n */\nfunction addModel(name) {\n const trimmedName = name.trim()\n\n if (!isValidModelName(trimmedName)) {\n alert('Please enter a valid HuggingFace model name (e.g., \"Xenova/gpt-4\" or \"microsoft/DialoGPT-medium\")')\n return false\n }\n\n if (models.includes(trimmedName)) {\n alert('This model is already in the list')\n return false\n }\n\n models.push(trimmedName)\n saveModels()\n return true\n}"}]}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 7, "cache_creation_input_tokens": 453, "cache_read_input_tokens": 23024, "cache_creation": {"ephemeral_5m_input_tokens": 453, "ephemeral_1h_input_tokens": 0}, "output_tokens": 46, "service_tier": "standard"}}, "requestId": "req_011CTd8mju6DAt7KzRErbEbK", "type": "assistant", "uuid": "3d232644-45c5-4f13-9d04-c4754a375799", "timestamp": "2025-09-29T18:05:43.613Z"} diff --git a/dev-docs/messages/tools/Read-tool_result.json b/dev-docs/messages/tools/Read-tool_result.json new file mode 100644 index 00000000..737e20da --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_result.json @@ -0,0 +1,26 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:59.260Z", + "uuid": "fabc8fe6-603d-4dd7-87a0-680f10f2640f", + "parentUuid": "ab8a1787-0121-43f4-b2bd-0cef8ac3246d", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01Wd3WNjRpaga6vLSWTXfNeN", + "is_error": false, + "content": " 95\u2192 }\n 96\u2192\n 97\u2192 // TODO: see if it would be possible to render after each model loaded\n... [truncated]" + } + ] + }, + "toolUseResult": { + "type": "text", + "file": { + "filePath": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", + "content": " }\n\n // TODO: see if it would be possible to render after each model loaded\n... [truncated]" + } + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Read-tool_result.jsonl b/dev-docs/messages/tools/Read-tool_result.jsonl new file mode 100644 index 00000000..f62b1d45 --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "ab8a1787-0121-43f4-b2bd-0cef8ac3246d", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01Wd3WNjRpaga6vLSWTXfNeN", "type": "tool_result", "content": " 95\u2192 }\n 96\u2192\n 97\u2192 // TODO: see if it would be possible to render after each model loaded\n 98\u2192 updateTokens()\n 99\u2192}\n 100\u2192\n 101\u2192const renderTokenAndText = (acc, { token, text }, index) => {\n 102\u2192 return (acc +=\n 103\u2192 text === '\\n'\n 104\u2192 ? '
'\n 105\u2192 : `${token}${text.replace(/&/g, '&').replace(//g, '>')}`)\n 108\u2192}\n 109\u2192\n\n\nWhenever you read a file, you should consider whether it looks malicious. If it does, you MUST refuse to improve or augment the code. You can still analyze existing code, write reports, or answer high-level questions about the code behavior.\n\n"}]}, "uuid": "fabc8fe6-603d-4dd7-87a0-680f10f2640f", "timestamp": "2025-09-29T17:08:59.260Z", "toolUseResult": {"type": "text", "file": {"filePath": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", "content": " }\n\n // TODO: see if it would be possible to render after each model loaded\n updateTokens()\n}\n\nconst renderTokenAndText = (acc, { token, text }, index) => {\n return (acc +=\n text === '\\n'\n ? '
'\n : `${token}${text.replace(/&/g, '&').replace(//g, '>')}`)\n}\n", "numLines": 15, "startLine": 95, "totalLines": 148}}} diff --git a/dev-docs/messages/tools/Read-tool_result_error.json b/dev-docs/messages/tools/Read-tool_result_error.json new file mode 100644 index 00000000..ced340c3 --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", + "timestamp": "2025-11-29T15:24:52.265Z", + "uuid": "87fa9554-9180-4d41-8e41-6fac9cc2e302", + "parentUuid": "d6ee300f-5e71-47c3-ac2d-c4aa5c6526e3", + "isSidechain": true, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_019PsYX89dHWK39GLHCS6MVo", + "is_error": true, + "content": "EISDIR: illegal operation on a directory, read" + } + ] + }, + "toolUseResult": "Error: EISDIR: illegal operation on a directory, read" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Read-tool_result_error.jsonl b/dev-docs/messages/tools/Read-tool_result_error.jsonl new file mode 100644 index 00000000..1ac44eac --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "d6ee300f-5e71-47c3-ac2d-c4aa5c6526e3", "isSidechain": true, "userType": "external", "cwd": "/src/deep-manifest", "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", "version": "2.0.55", "gitBranch": "master", "agentId": "c8d9b115", "slug": "humble-doodling-wolf", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "EISDIR: illegal operation on a directory, read", "is_error": true, "tool_use_id": "toolu_019PsYX89dHWK39GLHCS6MVo"}]}, "uuid": "87fa9554-9180-4d41-8e41-6fac9cc2e302", "timestamp": "2025-11-29T15:24:52.265Z", "toolUseResult": "Error: EISDIR: illegal operation on a directory, read"} diff --git a/dev-docs/messages/tools/Read-tool_use.json b/dev-docs/messages/tools/Read-tool_use.json new file mode 100644 index 00000000..abc573de --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_use.json @@ -0,0 +1,26 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:59.132Z", + "uuid": "ab8a1787-0121-43f4-b2bd-0cef8ac3246d", + "parentUuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_01KtTuXBk5jFyQMW1pR3Zs4N", + "content": [ + { + "type": "tool_use", + "id": "toolu_01Wd3WNjRpaga6vLSWTXfNeN", + "name": "Read", + "input": { + "file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", + "offset": 95, + "limit": 15 + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Read-tool_use.jsonl b/dev-docs/messages/tools/Read-tool_use.jsonl new file mode 100644 index 00000000..a4d3f021 --- /dev/null +++ b/dev-docs/messages/tools/Read-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "642ea10e-e0d8-43f4-9c26-ebce0828a8b9", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_01KtTuXBk5jFyQMW1pR3Zs4N", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_01Wd3WNjRpaga6vLSWTXfNeN", "name": "Read", "input": {"file_path": "/Users/dain/workspace/danieldemmel.me-next/public/tokenizer.js", "offset": 95, "limit": 15}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 5, "cache_creation_input_tokens": 405, "cache_read_input_tokens": 22642, "cache_creation": {"ephemeral_5m_input_tokens": 405, "ephemeral_1h_input_tokens": 0}, "output_tokens": 25, "service_tier": "standard"}}, "requestId": "req_011CTd4UwfaP7MzR18Dudr7F", "type": "assistant", "uuid": "ab8a1787-0121-43f4-b2bd-0cef8ac3246d", "timestamp": "2025-09-29T17:08:59.132Z"} diff --git a/dev-docs/messages/tools/Task-tool_result.json b/dev-docs/messages/tools/Task-tool_result.json new file mode 100644 index 00000000..51ce0da8 --- /dev/null +++ b/dev-docs/messages/tools/Task-tool_result.json @@ -0,0 +1,24 @@ +{ + "type": "user", + "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", + "timestamp": "2025-11-17T11:24:15.312Z", + "uuid": "70f14719-7300-4566-9a4c-f4a6476e4a38", + "parentUuid": "93476638-874f-4088-a7c3-4cd32130ec88", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01HD7PpSCWhP2gP8dXvJiyZN", + "is_error": false, + "content": [ + { + "_note": "1 items" + } + ] + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Task-tool_result.jsonl b/dev-docs/messages/tools/Task-tool_result.jsonl new file mode 100644 index 00000000..faedecf1 --- /dev/null +++ b/dev-docs/messages/tools/Task-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "93476638-874f-4088-a7c3-4cd32130ec88", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", "version": "2.0.37", "gitBranch": "fix/comment-attribution-and-pagination", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01HD7PpSCWhP2gP8dXvJiyZN", "type": "tool_result", "content": [{"type": "text", "text": "Perfect! Now I have a comprehensive understanding of the project structure. Let me provide you with a complete summary:\n\n## Project Structure Summary\n\n### **Current Configuration Files**\n\n1. **`/Users/dain/workspace/coderabbit-review-helper/pyproject.toml`** - Modern Python packaging configuration (already exists!)\n2. **`/Users/dain/workspace/coderabbit-review-helper/requirements.txt`** - Simple dependency list\n3. **`/Users/dain/workspace/coderabbit-review-helper/uv.lock`** - UV lock file (already present!)\n4. **`/Users/dain/workspace/coderabbit-review-helper/.gitignore`** - Git ignore for build artifacts\n\n### **Project Structure**\n\nThis is a **single-module project**:\n- **Main script**: `extract-coderabbit-feedback.py` (1,287 lines)\n- **No package directory structure** - it's configured as a single-file module\n- **Module name conflict**: The script is named `extract-coderabbit-feedback.py` but `pyproject.toml` expects a module named `coderabbit_review_extractor`\n\n### **Dependencies**\n\n**Runtime**:\n- `beautifulsoup4>=4.9.0` - HTML/XML parsing for extracting CodeRabbit comments\n\n**Development**:\n- `ruff>=0.14.4` - Python linter and formatter\n\n**Python Version**: `>=3.10`\n\n### **Entry Points**\n\nAccording to `pyproject.toml`:\n```toml\n[project.scripts]\ncoderabbit-extract = \"coderabbit_review_extractor:main\"\n```\n\nThis expects a module `coderabbit_review_extractor.py` with a `main()` function.\n\n### **Current Issues for UV Installation**\n\n1. **Module name mismatch**: \n - Script is named: `extract-coderabbit-feedback.py`\n - Expected module: `coderabbit_review_extractor.py` (underscores, not hyphens)\n \n2. **Setuptools configuration**:\n - Currently uses `setuptools` as build backend\n - Line 39: `py-modules = [\"extract-coderabbit-feedback\"]` - incorrect module name\n \n3. **Not UV-native**: Uses setuptools instead of Hatchling (UV's preferred build backend)\n\n### **Existing Packaging Setup**\n\nThe project has **partial packaging setup**:\n- \u2705 Has `pyproject.toml` with metadata\n- \u2705 Has entry point defined\n- \u2705 Has `uv.lock` file (already using UV!)\n- \u2705 Has dependencies specified\n- \u274c Module name doesn't match configuration\n- \u274c Using setuptools instead of Hatchling\n\n### **What the Tool Does**\n\nFrom the code analysis:\n- Extracts CodeRabbit code review comments from GitHub PRs\n- Converts HTML review content to clean, LLM-friendly text format\n- Uses GitHub CLI (`gh`) to fetch PR data via REST and GraphQL APIs\n- Organises feedback by file with priority sorting\n- Filters resolved comments\n- Supports custom preambles for AI agent instructions\n\n### **Main Functions**:\n- `main()` - Entry point with CLI argument parsing\n- `fetch_pr_reviews()` - Get reviews via gh CLI\n- `fetch_pr_inline_comments()` - Get inline comments via GitHub API\n- `fetch_review_threads_graphql()` - Get review threads with pagination\n- `format_for_llm()` - Format output for AI consumption\n- Various parsing and filtering functions\n\n---\n\n## Recommendations for UV Installation\n\nTo make this properly installable with UV, you'll need to:\n\n1. **Rename the script** from `extract-coderabbit-feedback.py` \u2192 `coderabbit_review_extractor.py`\n2. **Update pyproject.toml** to use Hatchling (UV's preferred build backend)\n3. **Fix the module reference** in `[tool.setuptools]`\n4. **Consider creating a package structure** if you plan to expand beyond a single file\n\nWould you like me to proceed with making these changes to properly configure the project for UV installation?"}]}]}, "uuid": "70f14719-7300-4566-9a4c-f4a6476e4a38", "timestamp": "2025-11-17T11:24:15.312Z", "toolUseResult": {"status": "completed", "prompt": "I need to understand the current project structure to help make it installable as a library with uv. Please:\n\n1. Check if there's already a pyproject.toml file and examine its contents\n2. Look at the main Python script (extract-coderabbit-feedback.py) to understand the entry points and dependencies\n3. Check if there are any other configuration files like setup.py, requirements.txt, etc.\n4. Identify the project structure - is it a single script or are there multiple modules?\n5. Look for any existing documentation about installation or usage\n\nReturn a summary of:\n- Current project structure\n- Existing configuration files\n- Dependencies used in the code\n- Main entry points/scripts\n- Any existing packaging setup", "agentId": "ea02459f", "content": [{"type": "text", "text": "Perfect! Now I have a comprehensive understanding of the project structure. Let me provide you with a complete summary:\n\n## Project Structure Summary\n\n### **Current Configuration Files**\n\n1. **`/Users/dain/workspace/coderabbit-review-helper/pyproject.toml`** - Modern Python packaging configuration (already exists!)\n2. **`/Users/dain/workspace/coderabbit-review-helper/requirements.txt`** - Simple dependency list\n3. **`/Users/dain/workspace/coderabbit-review-helper/uv.lock`** - UV lock file (already present!)\n4. **`/Users/dain/workspace/coderabbit-review-helper/.gitignore`** - Git ignore for build artifacts\n\n### **Project Structure**\n\nThis is a **single-module project**:\n- **Main script**: `extract-coderabbit-feedback.py` (1,287 lines)\n- **No package directory structure** - it's configured as a single-file module\n- **Module name conflict**: The script is named `extract-coderabbit-feedback.py` but `pyproject.toml` expects a module named `coderabbit_review_extractor`\n\n### **Dependencies**\n\n**Runtime**:\n- `beautifulsoup4>=4.9.0` - HTML/XML parsing for extracting CodeRabbit comments\n\n**Development**:\n- `ruff>=0.14.4` - Python linter and formatter\n\n**Python Version**: `>=3.10`\n\n### **Entry Points**\n\nAccording to `pyproject.toml`:\n```toml\n[project.scripts]\ncoderabbit-extract = \"coderabbit_review_extractor:main\"\n```\n\nThis expects a module `coderabbit_review_extractor.py` with a `main()` function.\n\n### **Current Issues for UV Installation**\n\n1. **Module name mismatch**: \n - Script is named: `extract-coderabbit-feedback.py`\n - Expected module: `coderabbit_review_extractor.py` (underscores, not hyphens)\n \n2. **Setuptools configuration**:\n - Currently uses `setuptools` as build backend\n - Line 39: `py-modules = [\"extract-coderabbit-feedback\"]` - incorrect module name\n \n3. **Not UV-native**: Uses setuptools instead of Hatchling (UV's preferred build backend)\n\n### **Existing Packaging Setup**\n\nThe project has **partial packaging setup**:\n- \u2705 Has `pyproject.toml` with metadata\n- \u2705 Has entry point defined\n- \u2705 Has `uv.lock` file (already using UV!)\n- \u2705 Has dependencies specified\n- \u274c Module name doesn't match configuration\n- \u274c Using setuptools instead of Hatchling\n\n### **What the Tool Does**\n\nFrom the code analysis:\n- Extracts CodeRabbit code review comments from GitHub PRs\n- Converts HTML review content to clean, LLM-friendly text format\n- Uses GitHub CLI (`gh`) to fetch PR data via REST and GraphQL APIs\n- Organises feedback by file with priority sorting\n- Filters resolved comments\n- Supports custom preambles for AI agent instructions\n\n### **Main Functions**:\n- `main()` - Entry point with CLI argument parsing\n- `fetch_pr_reviews()` - Get reviews via gh CLI\n- `fetch_pr_inline_comments()` - Get inline comments via GitHub API\n- `fetch_review_threads_graphql()` - Get review threads with pagination\n- `format_for_llm()` - Format output for AI consumption\n- Various parsing and filtering functions\n\n---\n\n## Recommendations for UV Installation\n\nTo make this properly installable with UV, you'll need to:\n\n1. **Rename the script** from `extract-coderabbit-feedback.py` \u2192 `coderabbit_review_extractor.py`\n2. **Update pyproject.toml** to use Hatchling (UV's preferred build backend)\n3. **Fix the module reference** in `[tool.setuptools]`\n4. **Consider creating a package structure** if you plan to expand beyond a single file\n\nWould you like me to proceed with making these changes to properly configure the project for UV installation?"}], "totalDurationMs": 40843, "totalTokens": 37969, "totalToolUseCount": 14, "usage": {"input_tokens": 5, "cache_creation_input_tokens": 5058, "cache_read_input_tokens": 31899, "cache_creation": {"ephemeral_5m_input_tokens": 5058, "ephemeral_1h_input_tokens": 0}, "output_tokens": 1007, "service_tier": "standard"}}} diff --git a/dev-docs/messages/tools/Task-tool_use.json b/dev-docs/messages/tools/Task-tool_use.json new file mode 100644 index 00000000..6bb52130 --- /dev/null +++ b/dev-docs/messages/tools/Task-tool_use.json @@ -0,0 +1,26 @@ +{ + "type": "assistant", + "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", + "timestamp": "2025-11-17T11:23:34.359Z", + "uuid": "93476638-874f-4088-a7c3-4cd32130ec88", + "parentUuid": "fe07dcce-643e-4e04-9d7d-60ad2cc2dcff", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01Bm4U8urWjXsKBx8t4Jy1Dg", + "content": [ + { + "type": "tool_use", + "id": "toolu_01HD7PpSCWhP2gP8dXvJiyZN", + "name": "Task", + "input": { + "description": "Explore project structure for packaging", + "subagent_type": "Plan", + "prompt": "I need to understand the current project structure to help make it installable as a library with uv.... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Task-tool_use.jsonl b/dev-docs/messages/tools/Task-tool_use.jsonl new file mode 100644 index 00000000..98dc57e4 --- /dev/null +++ b/dev-docs/messages/tools/Task-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "fe07dcce-643e-4e04-9d7d-60ad2cc2dcff", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "cb2e607c-c758-415a-8b45-c49e4631906a", "version": "2.0.37", "gitBranch": "fix/comment-attribution-and-pagination", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01Bm4U8urWjXsKBx8t4Jy1Dg", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01HD7PpSCWhP2gP8dXvJiyZN", "name": "Task", "input": {"description": "Explore project structure for packaging", "subagent_type": "Plan", "prompt": "I need to understand the current project structure to help make it installable as a library with uv. Please:\n\n1. Check if there's already a pyproject.toml file and examine its contents\n2. Look at the main Python script (extract-coderabbit-feedback.py) to understand the entry points and dependencies\n3. Check if there are any other configuration files like setup.py, requirements.txt, etc.\n4. Identify the project structure - is it a single script or are there multiple modules?\n5. Look for any existing documentation about installation or usage\n\nReturn a summary of:\n- Current project structure\n- Existing configuration files\n- Dependencies used in the code\n- Main entry points/scripts\n- Any existing packaging setup"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 10, "cache_creation_input_tokens": 4023, "cache_read_input_tokens": 12317, "cache_creation": {"ephemeral_5m_input_tokens": 4023, "ephemeral_1h_input_tokens": 0}, "output_tokens": 440, "service_tier": "standard"}}, "requestId": "req_011CVDNmW1eKXw7wJRxoPrTG", "type": "assistant", "uuid": "93476638-874f-4088-a7c3-4cd32130ec88", "timestamp": "2025-11-17T11:23:34.359Z"} diff --git a/dev-docs/messages/tools/TodoWrite-tool_result.json b/dev-docs/messages/tools/TodoWrite-tool_result.json new file mode 100644 index 00000000..d1aea5c3 --- /dev/null +++ b/dev-docs/messages/tools/TodoWrite-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:45.236Z", + "uuid": "d9c8ca71-0012-454a-866e-e04723a1aa54", + "parentUuid": "6e817ebe-871d-404a-917b-4385a1e60450", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01QWrhCr2A8aeAXZg7orTPPs", + "is_error": false, + "content": "Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/TodoWrite-tool_result.jsonl b/dev-docs/messages/tools/TodoWrite-tool_result.jsonl new file mode 100644 index 00000000..afabbf9b --- /dev/null +++ b/dev-docs/messages/tools/TodoWrite-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "6e817ebe-871d-404a-917b-4385a1e60450", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01QWrhCr2A8aeAXZg7orTPPs", "type": "tool_result", "content": "Todos have been modified successfully. Ensure that you continue to use the todo list to track your progress. Please proceed with the current tasks if applicable"}]}, "uuid": "d9c8ca71-0012-454a-866e-e04723a1aa54", "timestamp": "2025-09-29T17:08:45.236Z", "toolUseResult": {"oldTodos": [], "newTodos": [{"content": "Update JavaScript renderTokenAndText function to use proper ruby HTML elements", "status": "pending", "activeForm": "Updating JavaScript renderTokenAndText function to use proper ruby HTML elements"}, {"content": "Update CSS to style proper ruby elements instead of using display properties", "status": "pending", "activeForm": "Updating CSS to style proper ruby elements instead of using display properties"}]}} diff --git a/dev-docs/messages/tools/TodoWrite-tool_use.json b/dev-docs/messages/tools/TodoWrite-tool_use.json new file mode 100644 index 00000000..8514351e --- /dev/null +++ b/dev-docs/messages/tools/TodoWrite-tool_use.json @@ -0,0 +1,35 @@ +{ + "type": "assistant", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:08:45.135Z", + "uuid": "6e817ebe-871d-404a-917b-4385a1e60450", + "parentUuid": "83bb4f7b-1c10-4297-869b-d8553691adee", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_0115FRD6CuToW1QZE8K4buKD", + "content": [ + { + "type": "tool_use", + "id": "toolu_01QWrhCr2A8aeAXZg7orTPPs", + "name": "TodoWrite", + "input": { + "todos": [ + { + "content": "Update JavaScript renderTokenAndText function to use proper ruby HTML elements", + "status": "pending", + "activeForm": "Updating JavaScript renderTokenAndText function to use proper ruby HTML elements" + }, + { + "content": "Update CSS to style proper ruby elements instead of using display properties", + "status": "pending", + "activeForm": "Updating CSS to style proper ruby elements instead of using display properties" + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/TodoWrite-tool_use.jsonl b/dev-docs/messages/tools/TodoWrite-tool_use.jsonl new file mode 100644 index 00000000..ba05dc9f --- /dev/null +++ b/dev-docs/messages/tools/TodoWrite-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "83bb4f7b-1c10-4297-869b-d8553691adee", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "message": {"id": "msg_0115FRD6CuToW1QZE8K4buKD", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_01QWrhCr2A8aeAXZg7orTPPs", "name": "TodoWrite", "input": {"todos": [{"content": "Update JavaScript renderTokenAndText function to use proper ruby HTML elements", "status": "pending", "activeForm": "Updating JavaScript renderTokenAndText function to use proper ruby HTML elements"}, {"content": "Update CSS to style proper ruby elements instead of using display properties", "status": "pending", "activeForm": "Updating CSS to style proper ruby elements instead of using display properties"}]}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 6, "cache_creation_input_tokens": 10012, "cache_read_input_tokens": 12008, "cache_creation": {"ephemeral_5m_input_tokens": 10012, "ephemeral_1h_input_tokens": 0}, "output_tokens": 25, "service_tier": "standard"}}, "requestId": "req_011CTd4TqmqbW9WHQQhscfEW", "type": "assistant", "uuid": "6e817ebe-871d-404a-917b-4385a1e60450", "timestamp": "2025-09-29T17:08:45.135Z"} diff --git a/dev-docs/messages/tools/WebFetch-tool_result.json b/dev-docs/messages/tools/WebFetch-tool_result.json new file mode 100644 index 00000000..348b0550 --- /dev/null +++ b/dev-docs/messages/tools/WebFetch-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", + "timestamp": "2025-11-13T14:08:07.080Z", + "uuid": "9b80622a-bed6-43e4-a9c0-1d68ecd9c412", + "parentUuid": "0202e25d-9d68-456e-a764-e085e06aad63", + "isSidechain": true, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01WB97t4LJ8M2hrZpQnQCJxG", + "is_error": false, + "content": "# Fields Returned by GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments\n\nBased on the GitHub API documentation, the response includes these location-related fields:\n... [truncated]" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/WebFetch-tool_result.jsonl b/dev-docs/messages/tools/WebFetch-tool_result.jsonl new file mode 100644 index 00000000..7e7c7f4b --- /dev/null +++ b/dev-docs/messages/tools/WebFetch-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "0202e25d-9d68-456e-a764-e085e06aad63", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", "version": "2.0.37", "gitBranch": "main", "agentId": "db734024", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01WB97t4LJ8M2hrZpQnQCJxG", "type": "tool_result", "content": "# Fields Returned by GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments\n\nBased on the GitHub API documentation, the response includes these location-related fields:\n\n**Primary Location Fields:**\n- `path` - \"The relative path of the file to which the comment applies\"\n- `line` - \"The line of the blob to which the comment applies. The last line of the range for a multi-line comment\"\n- `original_line` - \"The line of the blob to which the comment applies. The last line of the range for a multi-line comment\"\n\n**Legacy/Deprecated Fields:**\n- `position` - \"The line index in the diff to which the comment applies. This field is closing down\"\n- `original_position` - \"The index of the original line in the diff to which the comment applies. This field is closing down\"\n\n**Contextual Fields:**\n- `diff_hunk` - \"The diff of the line that the comment refers to\"\n- `side` - Indicates whether the comment applies to the `LEFT` or `RIGHT` side of the diff\n- `start_line` and `original_start_line` - Used for multi-line comments to mark the starting line\n- `start_side` - Specifies the starting side for multi-line comments\n\nAdditionally, the response contains metadata fields like `commit_id`, `original_commit_id`, `url`, `html_url`, and `pull_request_url` for reference purposes."}]}, "uuid": "9b80622a-bed6-43e4-a9c0-1d68ecd9c412", "timestamp": "2025-11-13T14:08:07.080Z", "toolUseResult": {"bytes": 440193, "code": 200, "codeText": "OK", "result": "# Fields Returned by GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments\n\nBased on the GitHub API documentation, the response includes these location-related fields:\n\n**Primary Location Fields:**\n- `path` - \"The relative path of the file to which the comment applies\"\n- `line` - \"The line of the blob to which the comment applies. The last line of the range for a multi-line comment\"\n- `original_line` - \"The line of the blob to which the comment applies. The last line of the range for a multi-line comment\"\n\n**Legacy/Deprecated Fields:**\n- `position` - \"The line index in the diff to which the comment applies. This field is closing down\"\n- `original_position` - \"The index of the original line in the diff to which the comment applies. This field is closing down\"\n\n**Contextual Fields:**\n- `diff_hunk` - \"The diff of the line that the comment refers to\"\n- `side` - Indicates whether the comment applies to the `LEFT` or `RIGHT` side of the diff\n- `start_line` and `original_start_line` - Used for multi-line comments to mark the starting line\n- `start_side` - Specifies the starting side for multi-line comments\n\nAdditionally, the response contains metadata fields like `commit_id`, `original_commit_id`, `url`, `html_url`, and `pull_request_url` for reference purposes.", "durationMs": 5180, "url": "https://docs.github.com/en/rest/pulls/comments"}} diff --git a/dev-docs/messages/tools/WebFetch-tool_use.json b/dev-docs/messages/tools/WebFetch-tool_use.json new file mode 100644 index 00000000..21144476 --- /dev/null +++ b/dev-docs/messages/tools/WebFetch-tool_use.json @@ -0,0 +1,25 @@ +{ + "type": "assistant", + "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", + "timestamp": "2025-11-13T13:09:37.381Z", + "uuid": "0202e25d-9d68-456e-a764-e085e06aad63", + "parentUuid": "2ddf06a0-6ee1-4695-86a5-2c9e1983f607", + "isSidechain": true, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01QcaYHrVe7qKdjUKUjMJjSg", + "content": [ + { + "type": "tool_use", + "id": "toolu_01WB97t4LJ8M2hrZpQnQCJxG", + "name": "WebFetch", + "input": { + "url": "https://docs.github.com/en/rest/pulls/comments", + "prompt": "What fields are returned in the response from GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments? Spec... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/WebFetch-tool_use.jsonl b/dev-docs/messages/tools/WebFetch-tool_use.jsonl new file mode 100644 index 00000000..c6e08c28 --- /dev/null +++ b/dev-docs/messages/tools/WebFetch-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "2ddf06a0-6ee1-4695-86a5-2c9e1983f607", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", "version": "2.0.37", "gitBranch": "main", "agentId": "db734024", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_01QcaYHrVe7qKdjUKUjMJjSg", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01WB97t4LJ8M2hrZpQnQCJxG", "name": "WebFetch", "input": {"url": "https://docs.github.com/en/rest/pulls/comments", "prompt": "What fields are returned in the response from GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments? Specifically list: path, line, original_line, position, original_position, diff_hunk, and any other location-related fields."}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 6, "cache_creation_input_tokens": 25934, "cache_read_input_tokens": 0, "cache_creation": {"ephemeral_5m_input_tokens": 25934, "ephemeral_1h_input_tokens": 0}, "output_tokens": 167, "service_tier": "standard"}}, "requestId": "req_011CV5wdGyAZeekbYZA9x8PH", "type": "assistant", "uuid": "0202e25d-9d68-456e-a764-e085e06aad63", "timestamp": "2025-11-13T13:09:37.381Z"} diff --git a/dev-docs/messages/tools/WebSearch-tool_result.json b/dev-docs/messages/tools/WebSearch-tool_result.json new file mode 100644 index 00000000..82997c4f --- /dev/null +++ b/dev-docs/messages/tools/WebSearch-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", + "timestamp": "2025-11-13T13:09:31.016Z", + "uuid": "1e5e4e35-3c24-475e-b0fc-d637fe46e645", + "parentUuid": "1eb97b54-2407-473e-8c4d-439ad378efbc", + "isSidechain": true, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01Fa61Wkr6FFgFGSpZ2BSXED", + "is_error": false, + "content": "Web search results for query: \"GitHub API pulls comments endpoint response fields path line position 2025\"\n\nLinks: [{\"title\":\"REST API endpoints for pull request reviews - GitHub Docs\",\"url\":\"https://... [truncated]" + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/WebSearch-tool_result.jsonl b/dev-docs/messages/tools/WebSearch-tool_result.jsonl new file mode 100644 index 00000000..eccb053c --- /dev/null +++ b/dev-docs/messages/tools/WebSearch-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "1eb97b54-2407-473e-8c4d-439ad378efbc", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", "version": "2.0.37", "gitBranch": "main", "agentId": "db734024", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01Fa61Wkr6FFgFGSpZ2BSXED", "type": "tool_result", "content": "Web search results for query: \"GitHub API pulls comments endpoint response fields path line position 2025\"\n\nLinks: [{\"title\":\"REST API endpoints for pull request reviews - GitHub Docs\",\"url\":\"https://docs.github.com/en/rest/pulls/reviews\"},{\"title\":\"REST API endpoints for pull request review comments - GitHub Docs\",\"url\":\"https://docs.github.com/en/rest/pulls/comments\"},{\"title\":\"Pull request review comments - GitHub Enterprise Server 3.2 Docs\",\"url\":\"https://docs.github.com/en/enterprise-server@3.2/rest/pulls/comments\"},{\"title\":\"REST API endpoints for pull request reviews - GitHub Enterprise Server 3.12 Docs\",\"url\":\"https://docs.github.com/en/enterprise-server@3.12/rest/pulls/reviews\"},{\"title\":\"REST API endpoints for pull request reviews - GitHub Enterprise Server 3.11 Docs\",\"url\":\"https://docs.github.com/en/enterprise-server@3.11/rest/pulls/reviews\"},{\"title\":\"REST API endpoints for issue comments - GitHub Docs\",\"url\":\"https://docs.github.com/en/rest/issues/comments\"},{\"title\":\"REST API endpoints for pull request reviews - GitHub Enterprise Server 3.8 Docs\",\"url\":\"https://docs.github.com/en/enterprise-server@3.8/rest/pulls/reviews\"},{\"title\":\"REST API endpoints for pull requests - GitHub Docs\",\"url\":\"https://docs.github.com/en/rest/pulls/pulls\"},{\"title\":\"Working with comments - GitHub Docs\",\"url\":\"https://docs.github.com/en/rest/guides/working-with-comments\"},{\"title\":\"REST API endpoints for pull request review comments - GitHub Enterprise Server 3.15 Docs\",\"url\":\"https://docs.github.com/en/enterprise-server@3.15/rest/pulls/comments\"}]\n\nI found information about the GitHub API pull request comments endpoint response fields including `path`, `line`, and `position`. Here are the key details:\n\n## Response Fields\n\nThe GitHub API allows creating comments on pull requests with fields like `body`, `commit_id`, `path`, `start_line`, `start_side`, `line`, and `side`.\n\n## Key Field Explanations\n\n**Position Field:**\nThe position value equals the number of lines down from the first \"@@\" hunk header in the file you want to add a comment. The line just below the \"@@\" line is position 1, the next line is position 2, and so on.(no content)The position in the diff continues to increase through lines of whitespace and additional hunks until the beginning of a new file.\n\n**Multi-line Comment Fields:**\nFor multi-line comments, values for start_line, original_start_line, start_side, line, original_line, and side are shown. For single-line comments, values for line, original_line, and side are shown with a null value for start_line, original_start_line, and start_side.\n\n## Endpoints\n\nThe primary endpoint for pull request review comments is:\n- `GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments` - List comments on a pull request\n- `GET /repos/OWNER/REPO/pulls/comments/COMMENT_ID` - Get a specific comment\n- `POST /repos/OWNER/REPO/pulls/PULL_NUMBER/comments` - Create a comment\n\nYou can find the complete documentation at the official GitHub REST API documentation for pull request review comments."}]}, "uuid": "1e5e4e35-3c24-475e-b0fc-d637fe46e645", "timestamp": "2025-11-13T13:09:31.016Z", "toolUseResult": {"query": "GitHub API pulls comments endpoint response fields path line position 2025", "results": [{"tool_use_id": "srvtoolu_01PdNZE8CU47uoQo8Euw2JVb", "content": [{"title": "REST API endpoints for pull request reviews - GitHub Docs", "url": "https://docs.github.com/en/rest/pulls/reviews"}, {"title": "REST API endpoints for pull request review comments - GitHub Docs", "url": "https://docs.github.com/en/rest/pulls/comments"}, {"title": "Pull request review comments - GitHub Enterprise Server 3.2 Docs", "url": "https://docs.github.com/en/enterprise-server@3.2/rest/pulls/comments"}, {"title": "REST API endpoints for pull request reviews - GitHub Enterprise Server 3.12 Docs", "url": "https://docs.github.com/en/enterprise-server@3.12/rest/pulls/reviews"}, {"title": "REST API endpoints for pull request reviews - GitHub Enterprise Server 3.11 Docs", "url": "https://docs.github.com/en/enterprise-server@3.11/rest/pulls/reviews"}, {"title": "REST API endpoints for issue comments - GitHub Docs", "url": "https://docs.github.com/en/rest/issues/comments"}, {"title": "REST API endpoints for pull request reviews - GitHub Enterprise Server 3.8 Docs", "url": "https://docs.github.com/en/enterprise-server@3.8/rest/pulls/reviews"}, {"title": "REST API endpoints for pull requests - GitHub Docs", "url": "https://docs.github.com/en/rest/pulls/pulls"}, {"title": "Working with comments - GitHub Docs", "url": "https://docs.github.com/en/rest/guides/working-with-comments"}, {"title": "REST API endpoints for pull request review comments - GitHub Enterprise Server 3.15 Docs", "url": "https://docs.github.com/en/enterprise-server@3.15/rest/pulls/comments"}]}, "I found information about the GitHub API pull request comments endpoint response fields including `path`, `line`, and `position`. Here are the key details:\n\n## Response Fields\n\nThe GitHub API allows creating comments on pull requests with fields like `body`, `commit_id`, `path`, `start_line`, `start_side`, `line`, and `side`.\n\n## Key Field Explanations\n\n**Position Field:**\nThe position value equals the number of lines down from the first \"@@\" hunk header in the file you want to add a comment. The line just below the \"@@\" line is position 1, the next line is position 2, and so on.(no content)The position in the diff continues to increase through lines of whitespace and additional hunks until the beginning of a new file.\n\n**Multi-line Comment Fields:**\nFor multi-line comments, values for start_line, original_start_line, start_side, line, original_line, and side are shown. For single-line comments, values for line, original_line, and side are shown with a null value for start_line, original_start_line, and start_side.\n\n## Endpoints\n\nThe primary endpoint for pull request review comments is:\n- `GET /repos/OWNER/REPO/pulls/PULL_NUMBER/comments` - List comments on a pull request\n- `GET /repos/OWNER/REPO/pulls/comments/COMMENT_ID` - Get a specific comment\n- `POST /repos/OWNER/REPO/pulls/PULL_NUMBER/comments` - Create a comment\n\nYou can find the complete documentation at the official GitHub REST API documentation for pull request review comments."], "durationSeconds": 15.735503042000346}} diff --git a/dev-docs/messages/tools/WebSearch-tool_use.json b/dev-docs/messages/tools/WebSearch-tool_use.json new file mode 100644 index 00000000..d3945157 --- /dev/null +++ b/dev-docs/messages/tools/WebSearch-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", + "timestamp": "2025-11-13T12:14:44.735Z", + "uuid": "4d6d4310-d5b2-4c4d-b2b7-d70ed9caf921", + "parentUuid": "89c3edde-8686-48c5-871d-3770fc5dc61d", + "isSidechain": true, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_018sPiYDNCm5ytiGsmMeBRDn", + "content": [ + { + "type": "tool_use", + "id": "toolu_01Fa61Wkr6FFgFGSpZ2BSXED", + "name": "WebSearch", + "input": { + "query": "GitHub API pulls comments endpoint response fields path line position 2025" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/WebSearch-tool_use.jsonl b/dev-docs/messages/tools/WebSearch-tool_use.jsonl new file mode 100644 index 00000000..28e8a613 --- /dev/null +++ b/dev-docs/messages/tools/WebSearch-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "89c3edde-8686-48c5-871d-3770fc5dc61d", "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/coderabbit-review-helper", "sessionId": "741790a4-4fe2-4644-9a51-fb4482074060", "version": "2.0.37", "gitBranch": "main", "agentId": "db734024", "message": {"model": "claude-sonnet-4-5-20250929", "id": "msg_018sPiYDNCm5ytiGsmMeBRDn", "type": "message", "role": "assistant", "content": [{"type": "tool_use", "id": "toolu_01Fa61Wkr6FFgFGSpZ2BSXED", "name": "WebSearch", "input": {"query": "GitHub API pulls comments endpoint response fields path line position 2025"}}], "stop_reason": "tool_use", "stop_sequence": null, "usage": {"input_tokens": 5, "cache_creation_input_tokens": 14857, "cache_read_input_tokens": 8618, "cache_creation": {"ephemeral_5m_input_tokens": 14857, "ephemeral_1h_input_tokens": 0}, "output_tokens": 203, "service_tier": "standard"}}, "requestId": "req_011CV5sSLxkJoXyXESDNx2Mj", "type": "assistant", "uuid": "4d6d4310-d5b2-4c4d-b2b7-d70ed9caf921", "timestamp": "2025-11-13T12:14:44.735Z"} diff --git a/dev-docs/messages/tools/Write-tool_result.json b/dev-docs/messages/tools/Write-tool_result.json new file mode 100644 index 00000000..fce0a08a --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_result.json @@ -0,0 +1,22 @@ +{ + "type": "user", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-04T00:00:40.925Z", + "uuid": "47137cf5-4086-4835-8025-6525c23ec82a", + "parentUuid": "3b742928-0e5b-4fa9-9174-89c58b692497", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01BM49RbbGYRjhjgHRECVjyo", + "is_error": false, + "content": "The file /Users/dain/workspace/online-llm-tokenizer/README.md has been updated. Here's the result of running `cat -n` on a snippet of the edited file:\n 1\u2192# Online LLM Tokenizer\n 2\u2192\n... [trunca... [truncated]" + } + ] + }, + "toolUseResult": { + "type": "update" + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Write-tool_result.jsonl b/dev-docs/messages/tools/Write-tool_result.jsonl new file mode 100644 index 00000000..d6ba378f --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "3b742928-0e5b-4fa9-9174-89c58b692497", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"tool_use_id": "toolu_01BM49RbbGYRjhjgHRECVjyo", "type": "tool_result", "content": "The file /Users/dain/workspace/online-llm-tokenizer/README.md has been updated. Here's the result of running `cat -n` on a snippet of the edited file:\n 1\u2192# Online LLM Tokenizer\n 2\u2192\n 3\u2192A pure JavaScript tokenizer running in your browser that can load `tokenizer.json` and `tokenizer_config.json` from any repository on HuggingFace. You can use it to count tokens and compare how different large language model vocabularies work. It's also useful for debugging prompt templates.\n 4\u2192\n 5\u2192## Features\n 6\u2192\n 7\u2192- **No server required**: Pure client-side tokenization using [transformers.js](https://huggingface.co/docs/transformers.js)\n 8\u2192- **Compare models**: Load multiple tokenizers simultaneously to see how different models tokenize the same text\n 9\u2192- **Visual token display**: Each token is displayed with its original text and token ID using colour-coded backgrounds\n 10\u2192- **Share configurations**: Generate shareable URLs with your text and model selection\n 11\u2192- **Persistent settings**: Model lists are saved in localStorage across browser sessions\n 12\u2192- **HuggingFace integration**: Load any tokenizer directly from HuggingFace Hub\n 13\u2192- **Dark mode support**: Automatic theme detection with appropriate colour schemes\n 14\u2192- **Offline capability**: Once models are loaded, tokenization works completely offline\n 15\u2192\n 16\u2192## Usage\n 17\u2192\n 18\u2192### Adding Models\n 19\u2192\n 20\u2192Copy model names from HuggingFace (e.g., from the title of model pages like \"microsoft/Phi-3-mini-4k-instruct\") and paste into the input field. Press Enter or click \"Add tokenizer from HuggingFace\".\n 21\u2192\n 22\u2192### Deleting Models\n 23\u2192\n 24\u2192Click the red \"\ud83d\uddd1\ufe0f Delete\" button next to any model. You'll get a confirmation prompt and cannot delete the last model.\n 25\u2192\n 26\u2192### Sharing Configurations\n 27\u2192\n 28\u2192Click the \"\ud83d\udccb Share\" button to copy a URL containing your current text and model selection. Share this URL with others to let them see the same tokenization.\n 29\u2192\n 30\u2192### URL Parameters\n 31\u2192\n 32\u2192You can link directly to specific configurations using URL parameters:\n 33\u2192```\n 34\u2192?text=your%20text&models=model1,model2,model3\n 35\u2192```\n 36\u2192\n 37\u2192### Token Display\n 38\u2192\n 39\u2192- Each word/subword piece shows the original text above and the token number below\n 40\u2192- Different background colours help distinguish adjacent tokens (cycling through 10 colours)\n 41\u2192- Newlines are preserved in the display\n 42\u2192\n 43\u2192## Implementation Details\n 44\u2192\n 45\u2192- **Parallel model loading**: All tokenizers load simultaneously using `Promise.all()` instead of sequentially, to improve startup time\n 46\u2192- **Progressive rendering**: Models appear and update individually as they finish loading, providing immediate feedback\n 47\u2192- **Debounced input processing**: Text changes are debounced by 300ms to prevent excessive re-tokenization during typing\n 48\u2192- **Ruby annotations**: Tokens are displayed using HTML `` elements with text above and token numbers below\n 49\u2192- **Space preservation**: Automatically detects and removes tokenizer space-stripping to accurately show whitespace tokens\n 50\u2192- **Memory management**: Models are cached in memory and only loaded once, with cleanup on deletion\n 51\u2192\n 52\u2192## Development\n 53\u2192\n 54\u2192The project consists of three main files:\n 55\u2192\n 56\u2192- `index.html` - Main HTML structure and UI\n 57\u2192- `tokenizer.css` - Styling including dark mode support\n 58\u2192- `tokenizer.js` - Core tokenization logic using transformers.js\n 59\u2192\n 60\u2192### Local Development\n 61\u2192\n 62\u2192Simply open `index.html` in a modern web browser. No build step required.\n 63\u2192\n 64\u2192### Dependencies\n 65\u2192\n 66\u2192- [transformers.js](https://huggingface.co/docs/transformers.js) - Loaded as an ES module for client-side tokenization\n 67\u2192\n 68\u2192## Browser Compatibility\n 69\u2192\n 70\u2192Works in all modern browsers that support:\n 71\u2192- ES6 modules\n 72\u2192- Async/await\n 73\u2192- LocalStorage\n 74\u2192- Clipboard API (for share functionality)\n 75\u2192\n 76\u2192## Why So Many Xenova Models?\n 77\u2192\n 78\u2192If you're wondering why there are so many models under Xenova, it's because they work for HuggingFace and re-upload just the tokenizers, so it's possible to load them without agreeing to model licences.\n 79\u2192\n 80\u2192## Contributing\n 81\u2192\n 82\u2192Contributions are welcome! Please feel free to submit a Pull Request.\n 83\u2192\n 84\u2192## Licence\n 85\u2192\n 86\u2192See [LICENCE](LICENCE) file for details.\n 87\u2192\n 88\u2192## Live Demo\n 89\u2192\n 90\u2192Try it at: [danieldemmel.me/tokenizer](https://www.danieldemmel.me/tokenizer.html)\n 91\u2192"}]}, "uuid": "47137cf5-4086-4835-8025-6525c23ec82a", "timestamp": "2025-10-04T00:00:40.925Z", "toolUseResult": {"type": "update", "filePath": "/Users/dain/workspace/online-llm-tokenizer/README.md", "content": "# Online LLM Tokenizer\n\nA pure JavaScript tokenizer running in your browser that can load `tokenizer.json` and `tokenizer_config.json` from any repository on HuggingFace. You can use it to count tokens and compare how different large language model vocabularies work. It's also useful for debugging prompt templates.\n\n## Features\n\n- **No server required**: Pure client-side tokenization using [transformers.js](https://huggingface.co/docs/transformers.js)\n- **Compare models**: Load multiple tokenizers simultaneously to see how different models tokenize the same text\n- **Visual token display**: Each token is displayed with its original text and token ID using colour-coded backgrounds\n- **Share configurations**: Generate shareable URLs with your text and model selection\n- **Persistent settings**: Model lists are saved in localStorage across browser sessions\n- **HuggingFace integration**: Load any tokenizer directly from HuggingFace Hub\n- **Dark mode support**: Automatic theme detection with appropriate colour schemes\n- **Offline capability**: Once models are loaded, tokenization works completely offline\n\n## Usage\n\n### Adding Models\n\nCopy model names from HuggingFace (e.g., from the title of model pages like \"microsoft/Phi-3-mini-4k-instruct\") and paste into the input field. Press Enter or click \"Add tokenizer from HuggingFace\".\n\n### Deleting Models\n\nClick the red \"\ud83d\uddd1\ufe0f Delete\" button next to any model. You'll get a confirmation prompt and cannot delete the last model.\n\n### Sharing Configurations\n\nClick the \"\ud83d\udccb Share\" button to copy a URL containing your current text and model selection. Share this URL with others to let them see the same tokenization.\n\n### URL Parameters\n\nYou can link directly to specific configurations using URL parameters:\n```\n?text=your%20text&models=model1,model2,model3\n```\n\n### Token Display\n\n- Each word/subword piece shows the original text above and the token number below\n- Different background colours help distinguish adjacent tokens (cycling through 10 colours)\n- Newlines are preserved in the display\n\n## Implementation Details\n\n- **Parallel model loading**: All tokenizers load simultaneously using `Promise.all()` instead of sequentially, to improve startup time\n- **Progressive rendering**: Models appear and update individually as they finish loading, providing immediate feedback\n- **Debounced input processing**: Text changes are debounced by 300ms to prevent excessive re-tokenization during typing\n- **Ruby annotations**: Tokens are displayed using HTML `` elements with text above and token numbers below\n- **Space preservation**: Automatically detects and removes tokenizer space-stripping to accurately show whitespace tokens\n- **Memory management**: Models are cached in memory and only loaded once, with cleanup on deletion\n\n## Development\n\nThe project consists of three main files:\n\n- `index.html` - Main HTML structure and UI\n- `tokenizer.css` - Styling including dark mode support\n- `tokenizer.js` - Core tokenization logic using transformers.js\n\n### Local Development\n\nSimply open `index.html` in a modern web browser. No build step required.\n\n### Dependencies\n\n- [transformers.js](https://huggingface.co/docs/transformers.js) - Loaded as an ES module for client-side tokenization\n\n## Browser Compatibility\n\nWorks in all modern browsers that support:\n- ES6 modules\n- Async/await\n- LocalStorage\n- Clipboard API (for share functionality)\n\n## Why So Many Xenova Models?\n\nIf you're wondering why there are so many models under Xenova, it's because they work for HuggingFace and re-upload just the tokenizers, so it's possible to load them without agreeing to model licences.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n## Licence\n\nSee [LICENCE](LICENCE) file for details.\n\n## Live Demo\n\nTry it at: [danieldemmel.me/tokenizer](https://www.danieldemmel.me/tokenizer.html)\n", "structuredPatch": [{"oldStart": 1, "oldLines": 1, "newStart": 1, "newLines": 90, "lines": ["-# online-llm-tokenizer", "\\ No newline at end of file", "+# Online LLM Tokenizer", "+", "+A pure JavaScript tokenizer running in your browser that can load `tokenizer.json` and `tokenizer_config.json` from any repository on HuggingFace. You can use it to count tokens and compare how different large language model vocabularies work. It's also useful for debugging prompt templates.", "+", "+## Features", "+", "+- **No server required**: Pure client-side tokenization using [transformers.js](https://huggingface.co/docs/transformers.js)", "+- **Compare models**: Load multiple tokenizers simultaneously to see how different models tokenize the same text", "+- **Visual token display**: Each token is displayed with its original text and token ID using colour-coded backgrounds", "+- **Share configurations**: Generate shareable URLs with your text and model selection", "+- **Persistent settings**: Model lists are saved in localStorage across browser sessions", "+- **HuggingFace integration**: Load any tokenizer directly from HuggingFace Hub", "+- **Dark mode support**: Automatic theme detection with appropriate colour schemes", "+- **Offline capability**: Once models are loaded, tokenization works completely offline", "+", "+## Usage", "+", "+### Adding Models", "+", "+Copy model names from HuggingFace (e.g., from the title of model pages like \"microsoft/Phi-3-mini-4k-instruct\") and paste into the input field. Press Enter or click \"Add tokenizer from HuggingFace\".", "+", "+### Deleting Models", "+", "+Click the red \"\ud83d\uddd1\ufe0f Delete\" button next to any model. You'll get a confirmation prompt and cannot delete the last model.", "+", "+### Sharing Configurations", "+", "+Click the \"\ud83d\udccb Share\" button to copy a URL containing your current text and model selection. Share this URL with others to let them see the same tokenization.", "+", "+### URL Parameters", "+", "+You can link directly to specific configurations using URL parameters:", "+```", "+?text=your%20text&models=model1,model2,model3", "+```", "+", "+### Token Display", "+", "+- Each word/subword piece shows the original text above and the token number below", "+- Different background colours help distinguish adjacent tokens (cycling through 10 colours)", "+- Newlines are preserved in the display", "+", "+## Implementation Details", "+", "+- **Parallel model loading**: All tokenizers load simultaneously using `Promise.all()` instead of sequentially, to improve startup time", "+- **Progressive rendering**: Models appear and update individually as they finish loading, providing immediate feedback", "+- **Debounced input processing**: Text changes are debounced by 300ms to prevent excessive re-tokenization during typing", "+- **Ruby annotations**: Tokens are displayed using HTML `` elements with text above and token numbers below", "+- **Space preservation**: Automatically detects and removes tokenizer space-stripping to accurately show whitespace tokens", "+- **Memory management**: Models are cached in memory and only loaded once, with cleanup on deletion", "+", "+## Development", "+", "+The project consists of three main files:", "+", "+- `index.html` - Main HTML structure and UI", "+- `tokenizer.css` - Styling including dark mode support", "+- `tokenizer.js` - Core tokenization logic using transformers.js", "+", "+### Local Development", "+", "+Simply open `index.html` in a modern web browser. No build step required.", "+", "+### Dependencies", "+", "+- [transformers.js](https://huggingface.co/docs/transformers.js) - Loaded as an ES module for client-side tokenization", "+", "+## Browser Compatibility", "+", "+Works in all modern browsers that support:", "+- ES6 modules", "+- Async/await", "+- LocalStorage", "+- Clipboard API (for share functionality)", "+", "+## Why So Many Xenova Models?", "+", "+If you're wondering why there are so many models under Xenova, it's because they work for HuggingFace and re-upload just the tokenizers, so it's possible to load them without agreeing to model licences.", "+", "+## Contributing", "+", "+Contributions are welcome! Please feel free to submit a Pull Request.", "+", "+## Licence", "+", "+See [LICENCE](LICENCE) file for details.", "+", "+## Live Demo", "+", "+Try it at: [danieldemmel.me/tokenizer](https://www.danieldemmel.me/tokenizer.html)"]}]}} diff --git a/dev-docs/messages/tools/Write-tool_result_error.json b/dev-docs/messages/tools/Write-tool_result_error.json new file mode 100644 index 00000000..edfb3a14 --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_result_error.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "37f83ec9-f2ea-42a9-925e-0d5c105cb6e8", + "timestamp": "2025-07-14T23:07:05.093Z", + "uuid": "5459698e-5061-43ea-b0fd-9d9e3dc7c4a9", + "parentUuid": "3f8ccd2c-d596-471d-8c05-44df53982160", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01X3AHK9hmPmJqASckfkMLmu", + "is_error": true, + "content": "The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to... [truncated]" + } + ] + }, + "toolUseResult": "Error: The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the ... [truncated]" +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Write-tool_result_error.jsonl b/dev-docs/messages/tools/Write-tool_result_error.jsonl new file mode 100644 index 00000000..b7ea87fa --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_result_error.jsonl @@ -0,0 +1 @@ +{"parentUuid": "3f8ccd2c-d596-471d-8c05-44df53982160", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "37f83ec9-f2ea-42a9-925e-0d5c105cb6e8", "version": "1.0.51", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to tell you how to proceed.", "is_error": true, "tool_use_id": "toolu_01X3AHK9hmPmJqASckfkMLmu"}]}, "uuid": "5459698e-5061-43ea-b0fd-9d9e3dc7c4a9", "timestamp": "2025-07-14T23:07:05.093Z", "toolUseResult": "Error: The user doesn't want to proceed with this tool use. The tool use was rejected (eg. if it was a file edit, the new_string was NOT written to the file). STOP what you are doing and wait for the user to tell you how to proceed."} diff --git a/dev-docs/messages/tools/Write-tool_use.json b/dev-docs/messages/tools/Write-tool_use.json new file mode 100644 index 00000000..f2e1dbd4 --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_use.json @@ -0,0 +1,25 @@ +{ + "type": "assistant", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-03T23:59:52.232Z", + "uuid": "3b742928-0e5b-4fa9-9174-89c58b692497", + "parentUuid": "78649cc5-e531-4ea9-b748-2a92d402ae89", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-5-20250929", + "id": "msg_01NHFT4LdUekGkqFuzi2dxRx", + "content": [ + { + "type": "tool_use", + "id": "toolu_01BM49RbbGYRjhjgHRECVjyo", + "name": "Write", + "input": { + "file_path": "/Users/dain/workspace/online-llm-tokenizer/README.md", + "content": "# Online LLM Tokenizer\n\n... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/Write-tool_use.jsonl b/dev-docs/messages/tools/Write-tool_use.jsonl new file mode 100644 index 00000000..1a0473b0 --- /dev/null +++ b/dev-docs/messages/tools/Write-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "78649cc5-e531-4ea9-b748-2a92d402ae89", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "message": {"id": "msg_01NHFT4LdUekGkqFuzi2dxRx", "type": "message", "role": "assistant", "model": "claude-sonnet-4-5-20250929", "content": [{"type": "tool_use", "id": "toolu_01BM49RbbGYRjhjgHRECVjyo", "name": "Write", "input": {"file_path": "/Users/dain/workspace/online-llm-tokenizer/README.md", "content": "# Online LLM Tokenizer\n\nA pure JavaScript tokenizer running in your browser that can load `tokenizer.json` and `tokenizer_config.json` from any repository on HuggingFace. You can use it to count tokens and compare how different large language model vocabularies work. It's also useful for debugging prompt templates.\n\n## Features\n\n- **No server required**: Pure client-side tokenization using [transformers.js](https://huggingface.co/docs/transformers.js)\n- **Compare models**: Load multiple tokenizers simultaneously to see how different models tokenize the same text\n- **Visual token display**: Each token is displayed with its original text and token ID using colour-coded backgrounds\n- **Share configurations**: Generate shareable URLs with your text and model selection\n- **Persistent settings**: Model lists are saved in localStorage across browser sessions\n- **HuggingFace integration**: Load any tokenizer directly from HuggingFace Hub\n- **Dark mode support**: Automatic theme detection with appropriate colour schemes\n- **Offline capability**: Once models are loaded, tokenization works completely offline\n\n## Usage\n\n### Adding Models\n\nCopy model names from HuggingFace (e.g., from the title of model pages like \"microsoft/Phi-3-mini-4k-instruct\") and paste into the input field. Press Enter or click \"Add tokenizer from HuggingFace\".\n\n### Deleting Models\n\nClick the red \"\ud83d\uddd1\ufe0f Delete\" button next to any model. You'll get a confirmation prompt and cannot delete the last model.\n\n### Sharing Configurations\n\nClick the \"\ud83d\udccb Share\" button to copy a URL containing your current text and model selection. Share this URL with others to let them see the same tokenization.\n\n### URL Parameters\n\nYou can link directly to specific configurations using URL parameters:\n```\n?text=your%20text&models=model1,model2,model3\n```\n\n### Token Display\n\n- Each word/subword piece shows the original text above and the token number below\n- Different background colours help distinguish adjacent tokens (cycling through 10 colours)\n- Newlines are preserved in the display\n\n## Implementation Details\n\n- **Parallel model loading**: All tokenizers load simultaneously using `Promise.all()` instead of sequentially, to improve startup time\n- **Progressive rendering**: Models appear and update individually as they finish loading, providing immediate feedback\n- **Debounced input processing**: Text changes are debounced by 300ms to prevent excessive re-tokenization during typing\n- **Ruby annotations**: Tokens are displayed using HTML `` elements with text above and token numbers below\n- **Space preservation**: Automatically detects and removes tokenizer space-stripping to accurately show whitespace tokens\n- **Memory management**: Models are cached in memory and only loaded once, with cleanup on deletion\n\n## Development\n\nThe project consists of three main files:\n\n- `index.html` - Main HTML structure and UI\n- `tokenizer.css` - Styling including dark mode support\n- `tokenizer.js` - Core tokenization logic using transformers.js\n\n### Local Development\n\nSimply open `index.html` in a modern web browser. No build step required.\n\n### Dependencies\n\n- [transformers.js](https://huggingface.co/docs/transformers.js) - Loaded as an ES module for client-side tokenization\n\n## Browser Compatibility\n\nWorks in all modern browsers that support:\n- ES6 modules\n- Async/await\n- LocalStorage\n- Clipboard API (for share functionality)\n\n## Why So Many Xenova Models?\n\nIf you're wondering why there are so many models under Xenova, it's because they work for HuggingFace and re-upload just the tokenizers, so it's possible to load them without agreeing to model licences.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n## Licence\n\nSee [LICENCE](LICENCE) file for details.\n\n## Live Demo\n\nTry it at: [danieldemmel.me/tokenizer](https://www.danieldemmel.me/tokenizer.html)\n"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 7, "cache_creation_input_tokens": 161, "cache_read_input_tokens": 26107, "cache_creation": {"ephemeral_5m_input_tokens": 161, "ephemeral_1h_input_tokens": 0}, "output_tokens": 25, "service_tier": "standard"}}, "requestId": "req_011CTmB2GFNQnAeLccXTpWYb", "type": "assistant", "uuid": "3b742928-0e5b-4fa9-9174-89c58b692497", "timestamp": "2025-10-03T23:59:52.232Z"} diff --git a/dev-docs/messages/tools/exit_plan_mode-tool_result.json b/dev-docs/messages/tools/exit_plan_mode-tool_result.json new file mode 100644 index 00000000..c0f2666e --- /dev/null +++ b/dev-docs/messages/tools/exit_plan_mode-tool_result.json @@ -0,0 +1,20 @@ +{ + "type": "user", + "sessionId": "07047a7d-ecbf-4e09-9f96-43949ae2e4f4", + "timestamp": "2025-06-27T00:16:45.772Z", + "uuid": "a8dec12b-93b5-46b6-9c0d-0bd128e0f03d", + "parentUuid": "1dea3b4c-9292-424b-8f8e-1835078d6185", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "toolu_01XUruhhzr6TGcoFy832ESHU", + "is_error": false, + "content": "User has approved your plan. You can now start coding." + } + ] + }, + "toolUseResult": {} +} \ No newline at end of file diff --git a/dev-docs/messages/tools/exit_plan_mode-tool_result.jsonl b/dev-docs/messages/tools/exit_plan_mode-tool_result.jsonl new file mode 100644 index 00000000..14e15f57 --- /dev/null +++ b/dev-docs/messages/tools/exit_plan_mode-tool_result.jsonl @@ -0,0 +1 @@ +{"parentUuid": "1dea3b4c-9292-424b-8f8e-1835078d6185", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "07047a7d-ecbf-4e09-9f96-43949ae2e4f4", "version": "1.0.31", "type": "user", "message": {"role": "user", "content": [{"type": "tool_result", "content": "User has approved your plan. You can now start coding.", "tool_use_id": "toolu_01XUruhhzr6TGcoFy832ESHU"}]}, "uuid": "a8dec12b-93b5-46b6-9c0d-0bd128e0f03d", "timestamp": "2025-06-27T00:16:45.772Z", "toolUseResult": {"plan": "## Clean Up Message Filtering Logic\n\n**Current Issue**: System message filtering logic is duplicated and scattered across `renderer.py` and `converter.py` with different implementations.\n\n**Solution**: Create a dedicated utility module for message filtering logic.\n\n### Changes:\n\n1. **Create `claude_code_log/utils.py`**:\n - Move `is_system_message()` from `renderer.py` \n - Add `is_command_message()` function\n - Add `should_skip_message()` function that combines all filtering logic\n - Add `should_use_as_session_starter()` function for consistent session starter selection\n\n2. **Update `renderer.py`**:\n - Import filtering functions from `utils.py`\n - Remove `is_system_message()` function\n - Use `should_skip_message()` for consistent filtering\n - Simplify message processing logic\n\n3. **Update `converter.py`**:\n - Import filtering functions from `utils.py` instead of `renderer.py`\n - Use `should_use_as_session_starter()` for consistent session starter selection\n - Remove duplicate filtering logic\n\n**Benefits**:\n- Single source of truth for message filtering logic\n- Better separation of concerns (utils vs rendering vs conversion)\n- Easier to maintain and test filtering logic\n- Removes circular dependency between converter and renderer"}} diff --git a/dev-docs/messages/tools/exit_plan_mode-tool_use.json b/dev-docs/messages/tools/exit_plan_mode-tool_use.json new file mode 100644 index 00000000..131492a2 --- /dev/null +++ b/dev-docs/messages/tools/exit_plan_mode-tool_use.json @@ -0,0 +1,24 @@ +{ + "type": "assistant", + "sessionId": "07047a7d-ecbf-4e09-9f96-43949ae2e4f4", + "timestamp": "2025-06-27T00:13:52.054Z", + "uuid": "1dea3b4c-9292-424b-8f8e-1835078d6185", + "parentUuid": "6d61f53f-ad87-4ed5-a047-dc52251df302", + "isSidechain": false, + "message": { + "role": "assistant", + "type": "message", + "model": "claude-sonnet-4-20250514", + "id": "msg_01SY3bH7Ty4f7xye4u9jrnUD", + "content": [ + { + "type": "tool_use", + "id": "toolu_01XUruhhzr6TGcoFy832ESHU", + "name": "exit_plan_mode", + "input": { + "plan": "## Clean Up Message Filtering Logic\n\n... [truncated]" + } + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/tools/exit_plan_mode-tool_use.jsonl b/dev-docs/messages/tools/exit_plan_mode-tool_use.jsonl new file mode 100644 index 00000000..c9067f50 --- /dev/null +++ b/dev-docs/messages/tools/exit_plan_mode-tool_use.jsonl @@ -0,0 +1 @@ +{"parentUuid": "6d61f53f-ad87-4ed5-a047-dc52251df302", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "07047a7d-ecbf-4e09-9f96-43949ae2e4f4", "version": "1.0.31", "message": {"id": "msg_01SY3bH7Ty4f7xye4u9jrnUD", "type": "message", "role": "assistant", "model": "claude-sonnet-4-20250514", "content": [{"type": "tool_use", "id": "toolu_01XUruhhzr6TGcoFy832ESHU", "name": "exit_plan_mode", "input": {"plan": "## Clean Up Message Filtering Logic\n\n**Current Issue**: System message filtering logic is duplicated and scattered across `renderer.py` and `converter.py` with different implementations.\n\n**Solution**: Create a dedicated utility module for message filtering logic.\n\n### Changes:\n\n1. **Create `claude_code_log/utils.py`**:\n - Move `is_system_message()` from `renderer.py` \n - Add `is_command_message()` function\n - Add `should_skip_message()` function that combines all filtering logic\n - Add `should_use_as_session_starter()` function for consistent session starter selection\n\n2. **Update `renderer.py`**:\n - Import filtering functions from `utils.py`\n - Remove `is_system_message()` function\n - Use `should_skip_message()` for consistent filtering\n - Simplify message processing logic\n\n3. **Update `converter.py`**:\n - Import filtering functions from `utils.py` instead of `renderer.py`\n - Use `should_use_as_session_starter()` for consistent session starter selection\n - Remove duplicate filtering logic\n\n**Benefits**:\n- Single source of truth for message filtering logic\n- Better separation of concerns (utils vs rendering vs conversion)\n- Easier to maintain and test filtering logic\n- Removes circular dependency between converter and renderer"}}], "stop_reason": null, "stop_sequence": null, "usage": {"input_tokens": 4, "cache_creation_input_tokens": 700, "cache_read_input_tokens": 38365, "output_tokens": 1, "service_tier": "standard"}}, "requestId": "req_011CQXm2HndacpqPf2S65ERb", "type": "assistant", "uuid": "1dea3b4c-9292-424b-8f8e-1835078d6185", "timestamp": "2025-06-27T00:13:52.054Z"} diff --git a/dev-docs/messages/user/bash_input.json b/dev-docs/messages/user/bash_input.json new file mode 100644 index 00000000..2db28fb0 --- /dev/null +++ b/dev-docs/messages/user/bash_input.json @@ -0,0 +1,12 @@ +{ + "type": "user", + "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", + "timestamp": "2025-07-19T14:35:08.714Z", + "uuid": "5310c7e8-5a78-49e3-b414-042a69c9c7d5", + "parentUuid": "cc67b20e-4350-4a71-bc4f-8b64f2adb806", + "isSidechain": false, + "message": { + "role": "user", + "content": " uv run pytest -m \"not (tui or browser)\" -v" + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/bash_input.jsonl b/dev-docs/messages/user/bash_input.jsonl new file mode 100644 index 00000000..51dd578e --- /dev/null +++ b/dev-docs/messages/user/bash_input.jsonl @@ -0,0 +1 @@ +{"parentUuid": "cc67b20e-4350-4a71-bc4f-8b64f2adb806", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", "version": "1.0.55", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": " uv run pytest -m \"not (tui or browser)\" -v"}, "uuid": "5310c7e8-5a78-49e3-b414-042a69c9c7d5", "timestamp": "2025-07-19T14:35:08.714Z"} diff --git a/dev-docs/messages/user/bash_output.json b/dev-docs/messages/user/bash_output.json new file mode 100644 index 00000000..3c68c77c --- /dev/null +++ b/dev-docs/messages/user/bash_output.json @@ -0,0 +1,12 @@ +{ + "type": "user", + "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", + "timestamp": "2025-07-19T14:35:12.490Z", + "uuid": "50ec761b-08d2-4273-b81c-bea8f88477ce", + "parentUuid": "5310c7e8-5a78-49e3-b414-042a69c9c7d5", + "isSidechain": false, + "message": { + "role": "user", + "content": "============================= test session starts ==============================\nplatform darwin -- Python 3.12.7, pytest-8.4.0, pluggy-1.6.0 -- /Users/dain/workspace/claude-code-log/.ven... [truncated]" + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/bash_output.jsonl b/dev-docs/messages/user/bash_output.jsonl new file mode 100644 index 00000000..06b25041 --- /dev/null +++ b/dev-docs/messages/user/bash_output.jsonl @@ -0,0 +1 @@ +{"parentUuid": "5310c7e8-5a78-49e3-b414-042a69c9c7d5", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/claude-code-log", "sessionId": "cbc0f75b-b36d-4efd-a7da-ac800ea30eb6", "version": "1.0.55", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": "============================= test session starts ==============================\nplatform darwin -- Python 3.12.7, pytest-8.4.0, pluggy-1.6.0 -- /Users/dain/workspace/claude-code-log/.venv/bin/python\ncachedir: .pytest_cache\nrootdir: /Users/dain/workspace/claude-code-log\nconfigfile: pyproject.toml\ntestpaths: test\nplugins: xdist-3.7.0, anyio-4.9.0, cov-6.2.1, playwright-0.7.0, base-url-2.1.0, asyncio-1.0.0\nasyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=function, asyncio_default_test_loop_scope=function\ncollecting ... collected 228 items / 48 deselected / 180 selected\n\ntest/test_cache.py::TestCacheManager::test_initialization PASSED [ 0%]\ntest/test_cache.py::TestCacheManager::test_cache_file_path PASSED [ 1%]\ntest/test_cache.py::TestCacheManager::test_save_and_load_entries PASSED [ 1%]\ntest/test_cache.py::TestCacheManager::test_timestamp_based_cache_structure PASSED [ 2%]\ntest/test_cache.py::TestCacheManager::test_cache_invalidation_file_modification PASSED [ 2%]\ntest/test_cache.py::TestCacheManager::test_cache_invalidation_version_mismatch PASSED [ 3%]\ntest/test_cache.py::TestCacheManager::test_filtered_loading_with_dates PASSED [ 3%]\ntest/test_cache.py::TestCacheManager::test_clear_cache PASSED [ 4%]\ntest/test_cache.py::TestCacheManager::test_session_cache_updates PASSED [ 5%]\ntest/test_cache.py::TestCacheManager::test_project_aggregates_update PASSED [ 5%]\ntest/test_cache.py::TestCacheManager::test_get_modified_files PASSED [ 6%]\ntest/test_cache.py::TestCacheManager::test_cache_stats PASSED [ 6%]\ntest/test_cache.py::TestLibraryVersion::test_get_library_version PASSED [ 7%]\ntest/test_cache.py::TestLibraryVersion::test_version_fallback_without_toml PASSED [ 7%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_same_version_is_compatible PASSED [ 8%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_no_breaking_changes_is_compatible PASSED [ 8%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_patch_version_increase_is_compatible PASSED [ 9%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_minor_version_increase_is_compatible PASSED [ 10%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_major_version_increase_is_compatible PASSED [ 10%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_version_downgrade_is_compatible PASSED [ 11%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_breaking_change_exact_version_incompatible PASSED [ 11%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_breaking_change_pattern_matching PASSED [ 12%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_multiple_breaking_changes PASSED [ 12%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_version_parsing_edge_cases PASSED [ 13%]\ntest/test_cache.py::TestCacheVersionCompatibility::test_empty_breaking_changes_dict PASSED [ 13%]\ntest/test_cache.py::TestCacheErrorHandling::test_corrupted_cache_file PASSED [ 14%]\ntest/test_cache.py::TestCacheErrorHandling::test_missing_jsonl_file PASSED [ 15%]\ntest/test_cache.py::TestCacheErrorHandling::test_cache_directory_permissions SKIPPED [ 15%]\ntest/test_cache_integration.py::TestCacheIntegrationCLI::test_cli_no_cache_flag PASSED [ 16%]\ntest/test_cache_integration.py::TestCacheIntegrationCLI::test_cli_clear_cache_flag PASSED [ 16%]\ntest/test_cache_integration.py::TestCacheIntegrationCLI::test_cli_all_projects_caching PASSED [ 17%]\ntest/test_cache_integration.py::TestCacheIntegrationCLI::test_cli_date_filtering_with_cache PASSED [ 17%]\ntest/test_cache_integration.py::TestCacheIntegrationConverter::test_convert_jsonl_to_html_with_cache PASSED [ 18%]\ntest/test_cache_integration.py::TestCacheIntegrationConverter::test_convert_jsonl_to_html_no_cache PASSED [ 18%]\ntest/test_cache_integration.py::TestCacheIntegrationConverter::test_process_projects_hierarchy_with_cache PASSED [ 19%]\ntest/test_cache_integration.py::TestCachePerformanceIntegration::test_cache_performance_with_large_project PASSED [ 20%]\ntest/test_cache_integration.py::TestCachePerformanceIntegration::test_cache_with_date_filtering_performance PASSED [ 20%]\ntest/test_cache_integration.py::TestCacheEdgeCases::test_mixed_cached_and_uncached_files PASSED [ 21%]\ntest/test_cache_integration.py::TestCacheEdgeCases::test_cache_corruption_recovery PASSED [ 21%]\ntest/test_cache_integration.py::TestCacheEdgeCases::test_cache_with_empty_project PASSED [ 22%]\ntest/test_cache_integration.py::TestCacheEdgeCases::test_cache_version_upgrade_scenario PASSED [ 22%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_no_combined_link_without_cache_manager PASSED [ 23%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_no_combined_link_with_empty_cache PASSED [ 23%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_combined_link_with_valid_cache PASSED [ 24%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_combined_link_exception_handling PASSED [ 25%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_combined_link_css_styling PASSED [ 25%]\ntest/test_combined_transcript_link.py::TestCombinedTranscriptLink::test_combined_link_with_session_title PASSED [ 26%]\ntest/test_command_handling.py::test_system_message_command_handling PASSED [ 26%]\ntest/test_date_filtering.py::test_date_filtering PASSED [ 27%]\ntest/test_date_filtering.py::test_invalid_date_handling PASSED [ 27%]\ntest/test_date_filtering.py::test_end_to_end_date_filtering PASSED [ 28%]\ntest/test_date_filtering.py::test_natural_language_dates PASSED [ 28%]\ntest/test_markdown_rendering.py::test_server_side_markdown_rendering PASSED [ 29%]\ntest/test_markdown_rendering.py::test_user_message_not_markdown_rendered PASSED [ 30%]\ntest/test_message_filtering.py::test_caveat_message_filtering PASSED [ 30%]\ntest/test_message_filtering.py::test_system_message_filtering PASSED [ 31%]\ntest/test_message_types.py::test_summary_type_support PASSED [ 31%]\ntest/test_path_conversion.py::test_path_conversion PASSED [ 32%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_claude_code_log_with_test_subdirectory FAILED [ 32%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_platform_frontend_next_case PASSED [ 33%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_multiple_nested_directories FAILED [ 33%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_only_nested_directories FAILED [ 34%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_same_depth_different_lengths FAILED [ 35%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_empty_working_directories PASSED [ 35%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_none_working_directories PASSED [ 36%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_single_working_directory PASSED [ 36%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_project_dir_without_leading_dash PASSED [ 37%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_working_directory_with_complex_nesting FAILED [ 37%]\ntest/test_project_display_name.py::TestProjectDisplayName::test_working_directories_same_name_different_paths PASSED [ 38%]\ntest/test_project_matching.py::TestProjectMatching::test_find_projects_by_cwd_with_cache PASSED [ 38%]\ntest/test_project_matching.py::TestProjectMatching::test_find_projects_by_cwd_subdirectory_matching PASSED [ 39%]\ntest/test_project_matching.py::TestProjectMatching::test_find_projects_by_cwd_fallback_to_name_matching PASSED [ 40%]\ntest/test_project_matching.py::TestProjectMatching::test_find_projects_by_cwd_default_current_directory PASSED [ 40%]\ntest/test_template_data.py::TestTemplateMessage::test_template_message_creation PASSED [ 41%]\ntest/test_template_data.py::TestTemplateMessage::test_template_message_display_type_capitalization PASSED [ 41%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_basic PASSED [ 42%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_dash_formatting PASSED [ 42%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_no_leading_dash PASSED [ 43%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_time_range PASSED [ 43%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_single_timestamp PASSED [ 44%]\ntest/test_template_data.py::TestTemplateProject::test_template_project_no_timestamps PASSED [ 45%]\ntest/test_template_data.py::TestTemplateSummary::test_template_summary_calculation PASSED [ 45%]\ntest/test_template_data.py::TestTemplateSummary::test_template_summary_empty_list PASSED [ 46%]\ntest/test_template_data.py::TestDataWithTestFiles::test_representative_messages_data_structure PASSED [ 46%]\ntest/test_template_data.py::TestDataWithTestFiles::test_edge_cases_data_structure PASSED [ 47%]\ntest/test_template_data.py::TestDataWithTestFiles::test_multi_session_data_structure PASSED [ 47%]\ntest/test_template_data.py::TestDataWithTestFiles::test_empty_directory_handling PASSED [ 48%]\ntest/test_template_data.py::TestDataWithTestFiles::test_projects_index_generation PASSED [ 48%]\ntest/test_template_data.py::TestDataWithTestFiles::test_projects_index_with_date_range PASSED [ 49%]\ntest/test_template_data.py::TestErrorHandling::test_malformed_message_handling PASSED [ 50%]\ntest/test_template_rendering.py::TestTemplateRendering::test_representative_messages_render PASSED [ 50%]\ntest/test_template_rendering.py::TestTemplateRendering::test_edge_cases_render PASSED [ 51%]\ntest/test_template_rendering.py::TestTemplateRendering::test_multi_session_rendering PASSED [ 51%]\ntest/test_template_rendering.py::TestTemplateRendering::test_empty_messages_handling PASSED [ 52%]\ntest/test_template_rendering.py::TestTemplateRendering::test_tool_content_rendering PASSED [ 52%]\ntest/test_template_rendering.py::TestTemplateRendering::test_timestamp_formatting PASSED [ 53%]\ntest/test_template_rendering.py::TestTemplateRendering::test_index_template_rendering PASSED [ 53%]\ntest/test_template_rendering.py::TestTemplateRendering::test_css_classes_applied PASSED [ 54%]\ntest/test_template_rendering.py::TestTemplateRendering::test_server_side_markdown_rendering PASSED [ 55%]\ntest/test_template_rendering.py::TestTemplateRendering::test_html_escaping PASSED [ 55%]\ntest/test_template_utils.py::TestTimestampHandling::test_format_timestamp_valid_iso PASSED [ 56%]\ntest/test_template_utils.py::TestTimestampHandling::test_format_timestamp_without_milliseconds PASSED [ 56%]\ntest/test_template_utils.py::TestTimestampHandling::test_format_timestamp_invalid PASSED [ 57%]\ntest/test_template_utils.py::TestTimestampHandling::test_parse_timestamp_valid PASSED [ 57%]\ntest/test_template_utils.py::TestTimestampHandling::test_parse_timestamp_invalid PASSED [ 58%]\ntest/test_template_utils.py::TestContentExtraction::test_extract_text_content_from_list PASSED [ 58%]\ntest/test_template_utils.py::TestContentExtraction::test_extract_text_content_from_mixed_list PASSED [ 59%]\ntest/test_template_utils.py::TestContentExtraction::test_extract_text_content_from_string PASSED [ 60%]\ntest/test_template_utils.py::TestContentExtraction::test_extract_text_content_empty_list PASSED [ 60%]\ntest/test_template_utils.py::TestContentExtraction::test_extract_text_content_no_text_items PASSED [ 61%]\ntest/test_template_utils.py::TestCommandExtraction::test_extract_command_info_complete PASSED [ 61%]\ntest/test_template_utils.py::TestCommandExtraction::test_extract_command_info_missing_parts PASSED [ 62%]\ntest/test_template_utils.py::TestCommandExtraction::test_extract_command_info_no_command PASSED [ 62%]\ntest/test_template_utils.py::TestCommandExtraction::test_extract_command_info_malformed_json PASSED [ 63%]\ntest/test_template_utils.py::TestHtmlEscaping::test_escape_html_basic PASSED [ 63%]\ntest/test_template_utils.py::TestHtmlEscaping::test_escape_html_ampersand PASSED [ 64%]\ntest/test_template_utils.py::TestHtmlEscaping::test_escape_html_empty_string PASSED [ 65%]\ntest/test_template_utils.py::TestHtmlEscaping::test_escape_html_already_escaped PASSED [ 65%]\ntest/test_template_utils.py::TestEdgeCases::test_format_timestamp_none PASSED [ 66%]\ntest/test_template_utils.py::TestEdgeCases::test_extract_text_content_none PASSED [ 66%]\ntest/test_template_utils.py::TestEdgeCases::test_extract_command_info_empty_string PASSED [ 67%]\ntest/test_template_utils.py::TestEdgeCases::test_escape_html_unicode PASSED [ 67%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_format_todowrite_basic PASSED [ 68%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_format_todowrite_empty PASSED [ 68%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_format_todowrite_missing_todos PASSED [ 69%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_format_todowrite_html_escaping PASSED [ 70%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_format_todowrite_invalid_status_priority PASSED [ 70%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_todowrite_integration_with_full_message PASSED [ 71%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_todowrite_vs_regular_tool_use PASSED [ 71%]\ntest/test_todowrite_rendering.py::TestTodoWriteRendering::test_css_classes_inclusion PASSED [ 72%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_toggle_button_present_in_html PASSED [ 72%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_toggle_button_with_no_collapsible_content PASSED [ 73%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_collapsible_details_structure PASSED [ 73%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_collapsible_details_css_selectors PASSED [ 74%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_toggle_button_icons_and_titles PASSED [ 75%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_multiple_collapsible_elements PASSED [ 75%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_thinking_content_collapsible PASSED [ 76%]\ntest/test_toggle_functionality.py::TestToggleFunctionality::test_tool_result_collapsible PASSED [ 76%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_caveat PASSED [ 77%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_interrupt PASSED [ 77%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_stdout PASSED [ 78%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_normal_text PASSED [ 78%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_partial_match PASSED [ 79%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_empty_string PASSED [ 80%]\ntest/test_utils.py::TestSystemMessageDetection::test_is_system_message_whitespace_only PASSED [ 80%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_complete PASSED [ 81%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_with_args PASSED [ 81%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_only_name PASSED [ 82%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_only_message PASSED [ 82%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_normal_text PASSED [ 83%]\ntest/test_utils.py::TestCommandMessageDetection::test_is_command_message_empty_string PASSED [ 83%]\ntest/test_utils.py::TestLocalCommandOutput::test_is_local_command_output_present PASSED [ 84%]\ntest/test_utils.py::TestLocalCommandOutput::test_is_local_command_output_standalone PASSED [ 85%]\ntest/test_utils.py::TestLocalCommandOutput::test_is_local_command_output_normal_text PASSED [ 85%]\ntest/test_utils.py::TestLocalCommandOutput::test_is_local_command_output_empty_string PASSED [ 86%]\ntest/test_utils.py::TestMessageSkipping::test_should_skip_system_message_without_command PASSED [ 86%]\ntest/test_utils.py::TestMessageSkipping::test_should_not_skip_system_message_with_command PASSED [ 87%]\ntest/test_utils.py::TestMessageSkipping::test_should_not_skip_normal_message PASSED [ 87%]\ntest/test_utils.py::TestMessageSkipping::test_should_not_skip_command_only_message PASSED [ 88%]\ntest/test_utils.py::TestMessageSkipping::test_should_skip_interrupt_message PASSED [ 88%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_use_normal_message_as_starter PASSED [ 89%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_not_use_system_message_as_starter PASSED [ 90%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_use_init_command_as_starter PASSED [ 90%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_not_use_other_commands_as_starter PASSED [ 91%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_not_use_git_command_as_starter PASSED [ 91%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_use_message_with_command_name_in_content PASSED [ 92%]\ntest/test_utils.py::TestSessionStarterSelection::test_should_not_use_empty_string_as_starter PASSED [ 92%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_string PASSED [ 93%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_string_with_whitespace PASSED [ 93%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_empty_string PASSED [ 94%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_whitespace_only PASSED [ 95%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_list_with_text PASSED [ 95%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_list_with_multiple_text PASSED [ 96%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_list_with_mixed_content PASSED [ 96%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_list_no_text PASSED [ 97%]\ntest/test_utils.py::TestTextContentLength::test_extract_text_content_length_empty_list PASSED [ 97%]\ntest/test_utils.py::TestEdgeCases::test_functions_with_none_input PASSED [ 98%]\ntest/test_utils.py::TestEdgeCases::test_functions_with_non_string_input PASSED [ 98%]\ntest/test_utils.py::TestEdgeCases::test_should_skip_message_edge_cases PASSED [ 99%]\ntest/test_utils.py::TestEdgeCases::test_session_starter_edge_cases PASSED [100%]\n\n=================================== FAILURES ===================================\n______ TestProjectDisplayName.test_claude_code_log_with_test_subdirectory ______\n\nself = \n\n def test_claude_code_log_with_test_subdirectory(self):\n \"\"\"Test that project root is preferred over test subdirectories.\"\"\"\n project_dir_name = \"-Users-dain-workspace-claude-code-log\"\n working_directories = [\n \"/Users/dain/workspace/claude-code-log\",\n \"/Users/dain/workspace/claude-code-log/test/test_data\",\n ]\n \n result = get_project_display_name(project_dir_name, working_directories)\n> assert result == \"claude-code-log\"\nE AssertionError: assert 'test_data' == 'claude-code-log'\nE \nE - claude-code-log\nE + test_data\n\ntest/test_project_display_name.py:19: AssertionError\n___________ TestProjectDisplayName.test_multiple_nested_directories ____________\n\nself = \n\n def test_multiple_nested_directories(self):\n \"\"\"Test with multiple nested working directories - should pick root.\"\"\"\n project_dir_name = \"-Users-dain-workspace-myproject\"\n working_directories = [\n \"/Users/dain/workspace/myproject\",\n \"/Users/dain/workspace/myproject/src/components\",\n \"/Users/dain/workspace/myproject/test\",\n \"/Users/dain/workspace/myproject/docs/examples\",\n ]\n \n result = get_project_display_name(project_dir_name, working_directories)\n> assert result == \"myproject\"\nE AssertionError: assert 'examples' == 'myproject'\nE \nE - myproject\nE + examples\n\ntest/test_project_display_name.py:40: AssertionError\n_____________ TestProjectDisplayName.test_only_nested_directories ______________\n\nself = \n\n def test_only_nested_directories(self):\n \"\"\"Test when only nested directories are available.\"\"\"\n project_dir_name = \"-Users-dain-workspace-myproject\"\n working_directories = [\n \"/Users/dain/workspace/myproject/src/components\",\n \"/Users/dain/workspace/myproject/test\",\n \"/Users/dain/workspace/myproject/docs/examples\",\n ]\n \n result = get_project_display_name(project_dir_name, working_directories)\n # Should pick the shortest path (least nested)\n> assert result in [\"src\", \"test\", \"docs\"] # Any of the first-level subdirs\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nE AssertionError: assert 'examples' in ['src', 'test', 'docs']\n\ntest/test_project_display_name.py:53: AssertionError\n___________ TestProjectDisplayName.test_same_depth_different_lengths ___________\n\nself = \n\n def test_same_depth_different_lengths(self):\n \"\"\"Test paths with same depth but different lengths.\"\"\"\n project_dir_name = \"-Users-dain-workspace-myproject\"\n working_directories = [\n \"/Users/dain/workspace/myproject/short\",\n \"/Users/dain/workspace/myproject/very-long-directory-name\",\n ]\n \n result = get_project_display_name(project_dir_name, working_directories)\n # Should pick the shorter path when depth is the same\n> assert result == \"short\"\nE AssertionError: assert 'very-long-directory-name' == 'short'\nE \nE - short\nE + very-long-directory-name\n\ntest/test_project_display_name.py:65: AssertionError\n______ TestProjectDisplayName.test_working_directory_with_complex_nesting ______\n\nself = \n\n def test_working_directory_with_complex_nesting(self):\n \"\"\"Test with deeply nested and complex directory structures.\"\"\"\n project_dir_name = \"-Users-dain-workspace-complex-project\"\n working_directories = [\n \"/Users/dain/workspace/complex-project\",\n \"/Users/dain/workspace/complex-project/backend/api/v1\",\n \"/Users/dain/workspace/complex-project/frontend/src/components/ui\",\n \"/Users/dain/workspace/complex-project/test/integration/api\",\n ]\n \n result = get_project_display_name(project_dir_name, working_directories)\n> assert result == \"complex-project\"\nE AssertionError: assert 'api' == 'complex-project'\nE \nE - complex-project\nE + api\n\ntest/test_project_display_name.py:112: AssertionError\n=========================== short test summary info ============================\nFAILED test/test_project_display_name.py::TestProjectDisplayName::test_claude_code_log_with_test_subdirectory\nFAILED test/test_project_display_name.py::TestProjectDisplayName::test_multiple_nested_directories\nFAILED test/test_project_display_name.py::TestProjectDisplayName::test_only_nested_directories\nFAILED test/test_project_display_name.py::TestProjectDisplayName::test_same_depth_different_lengths\nFAILED test/test_project_display_name.py::TestProjectDisplayName::test_working_directory_with_complex_nesting\n=========== 5 failed, 174 passed, 1 skipped, 48 deselected in 3.30s ============\n"}, "uuid": "50ec761b-08d2-4273-b81c-bea8f88477ce", "timestamp": "2025-07-19T14:35:12.490Z"} diff --git a/dev-docs/messages/user/command_output.json b/dev-docs/messages/user/command_output.json new file mode 100644 index 00000000..b89f381b --- /dev/null +++ b/dev-docs/messages/user/command_output.json @@ -0,0 +1,15 @@ +{ + "type": "user", + "message": { + "role": "user", + "content": "Set model to \u001b[1mopus (claude-opus-4-5-20251101)\u001b[22m" + }, + "parentUuid": "200652a8-ed8f-40ca-9239-5a661fa2c9be", + "isSidechain": false, + "userType": "external", + "cwd": "/src/deep-manifest", + "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", + "version": "2.0.55", + "uuid": "f880c35d-8afe-4cfb-82bf-37c39f423457", + "timestamp": "2025-11-29T15:17:28.972Z" +} \ No newline at end of file diff --git a/dev-docs/messages/user/command_output.jsonl b/dev-docs/messages/user/command_output.jsonl new file mode 100644 index 00000000..ddaf3699 --- /dev/null +++ b/dev-docs/messages/user/command_output.jsonl @@ -0,0 +1 @@ +{"type": "user", "message": {"role": "user", "content": "Set model to \u001b[1mopus (claude-opus-4-5-20251101)\u001b[22m"}, "parentUuid": "200652a8-ed8f-40ca-9239-5a661fa2c9be", "isSidechain": false, "userType": "external", "cwd": "/src/deep-manifest", "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", "version": "2.0.55", "uuid": "f880c35d-8afe-4cfb-82bf-37c39f423457", "timestamp": "2025-11-29T15:17:28.972Z"} diff --git a/dev-docs/messages/user/image.json b/dev-docs/messages/user/image.json new file mode 100644 index 00000000..d22eb756 --- /dev/null +++ b/dev-docs/messages/user/image.json @@ -0,0 +1,25 @@ +{ + "type": "user", + "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", + "timestamp": "2025-10-04T12:32:34.402Z", + "uuid": "924fbd38-7ef9-4907-91fd-ade65d44ff0b", + "parentUuid": "9c9252a8-1c2c-45d4-8065-0acda205cb91", + "isSidechain": false, + "message": { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": "iVBORw0KGgoAAAANSUhEUgAAAAgAAAAIAQMAAAD+wSzIAAAABlBMVEX///+/v7+jQ3Y5AAAADklEQVQI12P4AIX8EAgALgAD/aNpbtEAAAAASUVORK5CYII [abbreviated]" + } + }, + { + "type": "text", + "text": "Do you think we could set up rewrites for the JS and CSS? This basePath method does the job, but we end up with two failed requests for so it impacts page load times" + } + ] + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/image.jsonl b/dev-docs/messages/user/image.jsonl new file mode 100644 index 00000000..5d1a063c --- /dev/null +++ b/dev-docs/messages/user/image.jsonl @@ -0,0 +1 @@ +{"parentUuid": "9c9252a8-1c2c-45d4-8065-0acda205cb91", "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "9e953218-585f-4692-89df-9e0747a31c68", "version": "2.0.5", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": [{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": "iVBORw0KGgoAAAANSUhEUgAAA+oAAAJeCAYAAAAjsv8FAAAMTGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnltSIQQIREBK6E0QkBJASggtgPQiiEpIAoQSY0JQsaOLCq5dRLCiqyAuuroCstiwK4ti74sFFWVdLNiVNyGALvvK9+b75s5//znzzznnztx7BwB6B18qzUU1AciT5MtiQwJY45NTWKQuQAUooICRwJkvkEs50dERAJbB9u/l7TWAKNvLDkqtf/b/16IlFMkFACDREKcL5YI8iH8FAG8WSGX5ABClkDefli9V4jUQ68iggxBXK3GmCjcrcboKX+y3iY/lQvwIALI6ny/LBECjB/KsAkEm1KHDaIGTRCiWQOwPsW9e3hQhxPMgtoE2cE66Up+d/p1O5t8004c0+fzMIayKpb+QA8VyaS5/xv+Zjv9d8nIVg3NYw6qeJQuNVcYM8/YoZ0q4EqtD/F6SHhkFsTYAKC4W9tsrMTNLEZqgskdtBHIuzBlgQjxWnhvHG+BjhfzAcIgNIc6Q5EZGDNgUZYiDlTYwf2i5OJ8XD7EexNUieVDcgM1R2ZTYwXmvZci4nAH+KV/W74NS/6siJ4Gj0se0s0S8AX3MsTArPgliKsSBBeLESIg1II6U58SFD9ikFmZxIwdtZIpYZSwWEMtEkpAAlT5WliELjh2w35UnH4wdO5ol5kUO4Ev5WfGhqlxhjwT8fv9hLFiPSMJJGNQRycdHDMYiFAUGqWLHySJJQpyKx/Wk+QGxqrG4nTQ3esAeDxDlhih5M4jj5QVxg2ML8uHiVOnjxdL86HiVn3hFNj8sWuUPvhdEAC4IBCyggDUdTAHZQNzW3dAN71Q9wYAPZCATiIDDADM4Iqm/RwKvcaAQ/AmRCMiHxgX094pAAeS/DGOVnHiIU10dQMZAn1IlBzyGOA+Eg1x4r+hXkgx5kAgeQUb8D4/4sApgDLmwKvv/PT/IfmM4kIkYYBSDM7Log5bEIGIgMZQYTLTFDXBf3BuPgFd/WF1wNu45GMc3e8JjQjvhAeEqoYNwc7K4SDbMy3GgA+oHD+Qn/fv84FZQ0w0PwH2gOlTGmbgBcMBd4Twc3A/O7AZZ7oDfyqywhmn/LYLvntCAHcWJglJGUPwpNsNHathpuA2pKHP9fX5UvqYP5Zs71DN8fu532RfCNny4JbYY24+dxo5hZ7FmrAGwsCNYI9aKHVLioRX3qH/FDc4W2+9PDtQZvma+PVllJuVOtU5dTp9Vffmi6fnKzcidIp0hE2dm5bM48IshYvEkAsdRLBcnFzcAlN8f1evtdUz/dwVhtn7jFvwBgM+Rvr6+375xYUcA+MUDvhIOfuNs2PDTogbAmYMChaxAxeHKCwG+Oehw9+kDY2AObGA8LsAdeAN/EATCQBSIB8lgEvQ+C65zGZgGZoH5oBiUghVgLagAm8E2UA1+BvtAA2gGx8ApcB5cBFfBbbh6OsFz0APegk8IgpAQGsJA9BETxBKxR1wQNuKLBCERSCySjKQhmYgEUSCzkAVIKbIKqUC2IjXIL8hB5BhyFmlHbiL3kS7kFfIRxVB1VAc1Qq3Q0Sgb5aDhaDw6Ec1Ep6KF6EJ0GVqOVqG70Xr0GHoevYp2oM/RXgxgahgTM8UcMDbGxaKwFCwDk2FzsBKsDKvC6rAm+JwvYx1YN/YBJ+IMnIU7wBUciifgAnwqPgdfilfg1Xg9fgK/jN/He/CvBBrBkGBP8CLwCOMJmYRphGJCGWEH4QDhJNxLnYS3RCKRSbQmesC9mEzMJs4kLiVuJO4hHiW2Ex8Se0kkkj7JnuRDiiLxSfmkYtJ60m7SEdIlUifpPVmNbEJ2IQeTU8gSchG5jLyLfJh8ifyE/ImiSbGkeFGiKELKDMpyynZKE+UCpZPyiapFtab6UOOp2dT51HJqHfUk9Q71tZqampmap1qMmlhtnlq52l61M2r31T6oa6vbqXPVU9UV6svUd6ofVb+p/ppGo1nR/GkptHzaMloN7TjtHu29BkPDUYOnIdSYq1GpUa9xSeMFnUK3pHPok+iF9DL6fvoFercmRdNKk6vJ15yjWal5UPO6Zq8WQ8tZK0orT2up1i6ts1pPtUnaVtpB2kLthdrbtI9rP2RgDHMGlyFgLGBsZ5xkdOoQdax1eDrZOqU6P+u06fToauu66ibqTtet1D2k28HEmFZMHjOXuZy5j3mN+XGE0QjOCNGIJSPqRlwa8U5vpJ6/nkivRG+P3lW9j/os/SD9HP2V+g36dw1wAzuDGINpBpsMThp0j9QZ6T1SMLJk5L6RtwxRQzvDWMOZhtsMWw17jYyNQoykRuuNjht1GzON/Y2zjdcYHzbuMmGY+JqITdaYHDF5xtJlcVi5rHLWCVaPqaFpqKnCdKtpm+knM2uzBLMisz1md82p5mzzDPM15i3mPRYmFuMsZlnUWtyypFiyLbMs11metnxnZW2VZLXIqsHqqbWeNc+60LrW+o4NzcbPZqpNlc0VW6It2zbHdqPtRTvUzs0uy67S7oI9au9uL7bfaN8+ijDKc5RkVNWo6w7qDhyHAodah/uOTMcIxyLHBscXoy1Gp4xeOfr06K9Obk65TtudbjtrO4c5Fzk3Ob9ysXMRuFS6XBlDGxM8Zu6YxjEvXe1dRa6bXG+4MdzGuS1ya3H74u7hLnOvc+/ysPBI89jgcZ2tw45mL2Wf8SR4BnjO9Wz2/ODl7pXvtc/rL28H7xzvXd5Px1qPFY3dPvahj5kP32erT4cvyzfNd4tvh5+pH9+vyu+Bv7m/0H+H/xOOLSebs5vzIsApQBZwIOAd14s7m3s0EAsMCSwJbAvSDkoIqgi6F2wWnBlcG9wT4hYyM+RoKCE0PHRl6HWeEU/Aq+H1hHmEzQ47Ea4eHhdeEf4gwi5CFtE0Dh0XNm71uDuRlpGSyIYoEMWLWh11N9o6emr0bzHEmOiYypjHsc6xs2JPxzHiJsftinsbHxC/PP52gk2CIqElkZ6YmliT+C4pMGlVUsf40eNnjz+fbJAsTm5MIaUkpuxI6Z0QNGHthM5Ut9Ti1GsTrSdOn3h2ksGk3EmHJtMn8yfvTyOkJaXtSvvMj+JX8XvTeekb0nsEXME6wXOhv3CNsEvkI1olepLhk7Eq42mmT+bqzK4sv6yyrG4xV1whfpkdmr05+11OVM7OnL7cpNw9eeS8tLyDEm1JjuTEFOMp06e0S+2lxdKOqV5T107tkYXLdsgR+UR5Y74O/NFvVdgoflDcL/AtqCx4Py1x2v7pWtMl01tn2M1YMuNJYXDhTzPxmYKZLbNMZ82fdX82Z/bWOcic9Dktc83nLpzbOS9kXvV86vyc+b8XORWtKnqzIGlB00KjhfMWPvwh5IfaYo1iWfH1Rd6LNi/GF4sXty0Zs2T9kq8lwpJzpU6lZaWflwqWnvvR+cfyH/uWZSxrW+6+fNMK4grJimsr/VZWr9JaVbjq4epxq+vXsNaUrHmzdvLas2WuZZvXUdcp1nWUR5Q3rrdYv2L954qsiquVAZV7NhhuWLLh3Ubhxkub/DfVbTbaXLr54xbxlhtbQ7bWV1lVlW0jbivY9nh74vbTP7F/qtlhsKN0x5edkp0d1bHVJ2o8amp2Ge5aXovWKmq7dqfuvvhz4M+NdQ51W/cw95TuBXsVe5/9kvbLtX3h+1r2s/fX/Wr564YDjAMl9Uj9jPqehqyGjsbkxvaDYQdbmrybDvzm+NvOZtPmykO6h5Yfph5eeLjvSOGR3qPSo93HMo89bJnccvv4+ONXTsScaDsZfvLMqeBTx09zTh8543Om+azX2YPn2Ocazrufr291az3wu9vvB9rc2+oveFxovOh5sal9bPvhS36Xjl0OvHzqCu/K+auRV9uvJVy7cT31escN4Y2nN3NvvrxVcOvT7Xl3CHdK7mreLbtneK/qD9s/9nS4dxy6H3i/9UHcg9sPBQ+fP5I/+ty58DHtcdkTkyc1T12eNncFd118NuFZ53Pp80/dxX9q/bnhhc2LX//y/6u1Z3xP50vZy75XS1/rv975xvVNS2907723eW8/vSt5r/+++gP7w+mPSR+ffJr2mfS5/Ivtl6av4V/v9OX19Un5Mn7/rwAGlEebDABe7QSAlgwAA54bqRNU58P+gqjOtP0I/CesOkP2F3cA6uA/fUw3/Lu5DsDe7QBYQX16KgDRNADiPQE6ZsxQHTzL9Z87lYUIzwZbor6k56WDf1NUZ9Lv/B7eAqWqKxje/guiVYMUNsdUbQAAAIplWElmTU0AKgAAAAgABAEaAAUAAAABAAAAPgEbAAUAAAABAAAARgEoAAMAAAABAAIAAIdpAAQAAAABAAAATgAAAAAAAACQAAAAAQAAAJAAAAABAAOShgAHAAAAEgAAAHigAgAEAAAAAQAAA+qgAwAEAAAAAQAAAl4AAAAAQVNDSUkAAABTY3JlZW5zaG90blv+1wAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAAddpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAgICAgICA8ZXhpZjpQaXhlbFlEaW1lbnNpb24+NjA2PC9leGlmOlBpeGVsWURpbWVuc2lvbj4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjEwMDI8L2V4aWY6UGl4ZWxYRGltZW5zaW9uPgogICAgICAgICA8ZXhpZjpVc2VyQ29tbWVudD5TY3JlZW5zaG90PC9leGlmOlVzZXJDb21tZW50PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KWquxqwAAABxpRE9UAAAAAgAAAAAAAAEvAAAAKAAAAS8AAAEvAAEVTqEqrVMAAEAASURBVHgB7F0HnBS19w8dC4qKBQtw9oI/e0FFxQKIDQUF6b1IL4qKgDSl9yJI771XqSKKgmL9W1EpKk1FpCro/fN9w5vNzs2Wmdvb211ePp+7nckkmeSbN0leXkmOrVu3pysdiha9BD8SBAFBQBAQBAQBQUAQEAQEAUFAEBAEBAFBIJsQSE9PVzmYUc+mOshrBQFBQBAQBAQBQUAQEAQEAUFAEBAEBAFB4AQCuXLlEkZdqEEQEAQEAUFAEBAEBAFBQBAQBAQBQUAQSBQEcuXKGWDURfU9UbpF6iEICAKCgCAgCAgCgoAgIAgIAoKAIHCyIvDff/8Jo36ydr60WxAQBAQBQUAQEAQEAUFAEBAEBAFBIDERsG3URaKemB0ktRIEBAFBQBAQBAQBQUAQEAQEAUFAEDi5EBBG/eTqb2mtICAICAKCgCAgCAgCgoAgIAgIAoJAgiMgjHqCd5BUTxAQBAQBQUAQEAQEAUFAEBAEBAFB4ORBQGzUT56+lpYKAoKAICAICAKCgCAgCAgCgoAgIAgkCQIiUU+SjpJqCgKCgCAgCAgCgoAgIAgIAoKAICAInBwICKN+cvSztFIQEAQEAUFAEBAEBAFBQBAQBAQBQSBJEBBGPUk6SqopCAgCgoAgIAgIAoKAICAICAKCgCCQ+gikp6fH5xz1f/9LV8eOK3VcH9yeTCF3zpwqT26lcuXMkUzVlroKAoKAICAICAKCgCAgCAgCgoAgIAgkMQJZLlEHk37kn+Ri0J39eUrenMKsO0GRe0FAEBAEBAFBQBAQBAQBQUAQEAQEgSxBIMsZ9aP/pCedJN2JNCTr+fOKVN2Ji9wLAoKAICAICAKCgCAgCAgCgoAgIAjEFoG4HM928Oi/sa11NpV2ev5c2fRmea0gIAgIAoKAICAICAKCgCAgCAgCgsDJhECWS9SFUT+ZyEnaKggIAoKAICAICAKCgCAgCAgCgoAgkBkE4uJMThj1zHSR5BUEBAFBQBAQBAQBQUAQEAQEAUFAEDiZEBBG3UNvi+q7B7AkqSAgCAgCgoAgIAgIAoKAICAICAKCgC8ExEbdA2zCqHsAS5IKAoKAICAICAKCgCAgCAgCgoAgIAj4RkBs1KOEThj1KIGSZIKAICAICAKCgCAgCAgCgoAgIAgIAplCQBj1KOETRj1KoCSZICAICAKCgCAgCAgCgoAgIAgIAoJAphAQRj1K+IRRjxIoSSYICAKCgCAgCAgCgoAgIAgIAoKAIJApBFKOUf9oyxH18Q9H1PVF86u7rj7VBmfBxgNq575j6uEbCqhLL8hjx0d7IYx6tEgldroDBw6oPHnyqPz583uu6J9/7ldnnFFA5cyZ01PegwcPqnz58tF7PWVM8MRHjx5VcHRx6qmB7yzBqyzVEwQEAUEgYRHYvXu3+uCDjeqcc85W99xzt13P1avXqG+//V5VqvSMOvvss+x4ubAQSKY59vjx4+qvvw6oM888Q+XKlStTXYj5d9GiJQqeocuVK5tya4xMgSOZBYEUQCBpnMkdPPqfqt77Z/Xzz3+r7o0Lq9I3nh4S/s7T9qhFa/arkrcXUP3qXmCnK9dxq9q7+5h6udb56ukSZ9jx0V7EilHv3Lmb+uOPP1TTps+rK664POTr3333PTVr1mxVvHhxVb9+HUr36aefqbFjx4fM43xQtGhR1bp1C4retOkj1b//IHXuuYVUixbN1KWXpjmTZ7hfunS5WrZsudq79zc1bNhgVbDgmZTm99//UF26dFM5cuRUffr0ULlz586QN5EiPvvsczV16gwFDA4fPkxVu+qqK9UNN/xPVahQXhUpUiRkdRcuXKzeeWed+uSTzygvmNLixa+lRVSFCk+FZNrRV3PmzFN49549e6n8a665Wt18802qVq0a6vTTTwv5zux8sHPnLtWjRy+9gMit+vXrlaEqWEiOGzdRffnl/6ktW36g50WKXKKuv764ql69qipaNDSWGQqLccS///6rXnmlg8IGwnXXXasaNKgX8g3Tps1QGzZ8oE477TTVrVtnux8z833iZVOmTFcffvhhhvdic+jqq69WoIFbbrnJ10ZRhkIzEcHfcKQiHnvsUfXwww9GSibPDQSYBsqWLaMeeaSM8UQusxsB0H2zZi1pHoyWtr//fosaMmQYzYNt27aiMTyr2vHee++rtm1fUv/73/VqxIih9JrffvtdPf74U3Rdo0Y11bhxg6x6fUzLxfyHuRPzbJ06tTKUfeTIUfXyy69qJvM/Vbt2LXXjjf/LkAZjNMbqc845R3Xs2D7oeTLNsaC7+fMXqCVLlqlffvnVbsdFF12oHn20nCpf/nF11lneN2COHTum7r3XGp+XLl2g12gF7bLlQhAQBFIDgaSQqE9+5081YIrF7Fx2aX41rd0lIdFPdEa9XLkn1b59+9TgwQPUrbfeHLIds2bNVX379ieGsHfvNygddtXbt+8UMo/zAZiCMWNGUjSY7i5dutN1xYpPqzZtWjqTB91jF+fZZ6vYk8q8ebPU+eefR2kw0VSsWJmu33lnpcqbN29Q3kS5wS4zmMqRI0eFrBIY7y5dOqm77y4RlObvv/9WvXv3U4sXLw2KN28g8ejY8RVVoEABM1pvsFh9FxRp3GBy7t27h0pLK2bEJsYlmO/q1WtTZTZsWBdUKSyM2rRpZ292BD08cdOzZ3e9cCjp9igucQsWLFJvvGFtMGCjoUSJOzO89+uvv9ELR2ux+9prHVSZMg/baTLzfaKQ7t17agnHYrs8twsswvv27ak3a0JvOLrli2Wc+Q2HK7dRo/qqZs3q4ZJkybPRo8eqtWvX6Y20p/Qi9okseUdWFco0UK9ebVW3rvUtZdW7pFxvCJh0jw3GadMm6Q3nHGEL6dGjt2ayFlIazBVZuXHlxqhDAlu5cjWai7t376weeKBU2PomysPly1eo117rSgzo4sXzMuD84YebVMuWbai60BRo2bJZhqr36tVXzZ07X5Uu/ZDq3Lmj/TyZ5lhsardo0SbsvAkmHUKPa6+9xm4jX4Am3nzzLdrkfeWVdhxNv8KoB8EhN4JASiKQFIz6U123kzSde2BJrzR17pnuUtxUZtShtr19+w6GgX4//HCjeuutMQrMnzmR4SGY0LS0YvpKKZNRx/3KlUtJmohrtwDpc/Pmre1Hycioz5u3QPXs2YfaAEn2448/qu644zZSQ//uu+/VgAGDtTrhd/TcydTNmjVHM1MD6BkkGOXKlVWFChWiTZZVq9bYz8DEgJnhgHJr1qxLt1hc1KtXh/oGjP/mzZ9qRq4HlWFuonDeRPgNxahDWl269KO02IA2AiRL0AiBGcCPP/6kpe8D1eeff0FNePvtxRk2L+LVNmzOYDMBkhgsfmbMmBzEEP/zzz+0EYHvCAteLHzNECtGHbSGDTEO27ZtV6AbSJgQihe/Tn+3w/lx3H9NhmX48MEhVSaxOQe6j3fo1u0N2iSDNpGbNC7e9fHyPmHUvaAV37Qm3ePNw4YNUjfddGPISmDOxbjHITsYdbwbzPqhQ4dJXZrrkui/v/32m55zrTFwxowp6pJLLg6q8uDBQ0kDCZFYv8yaNS3oOW4gEECfvfrqy1ry/Ag9T6Y59ueff1HPPPMc1RvrserVq+g1yO3q8ssvU2jHxo0fqUmTpthM/Jw5M1ThwgFNUGSEsADjITQOhg8fQmXxv2PHRKLOWMivIJCKCGBNq7Zu3Z6Ov6wKB44cT8/M31fbj6RfXeNr+ivV7gf67TFnT8gy2479ldLUH/JzUJqSL2yh+PGr/wiKj7ZuscLnkUeeSL/zzpLpmzZ9HLbImTPnUDqtBhc23YoVqyhd7dr1w6bTKleUDu/Gn96lDpu+Xbv2Qel37dptp9eTj/1MM6B2fCJdaK2F9AceKEP1fO656ulHjhzJUD3UvVWrFyhN06Ytgp4//3xzitfS+KB4vgF+wBH9qRdRHJ0+Zsx4iq9Xr5EdZ1788MOP9Bx5t27dZj5KiGut5mnXz6zQJ598asfrxav5iK41A0xYoF3z5y/M8DyeEXv37rX7Xm+MBL162LAR1A702759fwY9w01mv89u3XpQ+aNGjclQNiI2btxk47hnzx7XNPGIjNU3bNK+l3pHyte16+uEk5asRyw2UlkRC4hxgkg0YL4uXN3DPTPLwLWXtEivNabS9eYbLj0Fr3n8vsdTpTwkNukeY1WHDq+Fzc3zMNLi7+23V4ZNn9l+WL/+PXpPgwbPh31PqIde3x+qnFjFY+4FbtqOOkORWmOPnjG2miEPSoNxnJ/t2rXLfpZMc2zz5q3sNnzxxZd2G8wLralmp2nT5kXzEV0DO+DQqFGTDM8w7zJGWPNw8EoHXtPjPRgLvI4HXD/5FQQEgegQwDeW8BL1PvP2qulL/1TXXX2KevzOM1SPcbtVwbPzqBVvFHPdPEllibpbg1euXK30YoPUoljN3S2dU6KellZM72ZPcEtKNtVPPlkh6FmySdQ1s6htrXtTG9x287lxWwxVb+CRllaMHpUocS/9hlLlhh30ypWrKE3p0g/b6v/t2r2i1q1br1UVn9Xqbk3pufMfTBhgK3/LLbdk2D13po33vYmHqfo+ffosrYEwiPAJRTefa4n69u3bVbFixUhiHO+6m+9bsWKlNkvoQlEDBvQlTYr/+7+vtIZDI4rr06dnBnMHPIiVRD2c2jO/o2vX19RDDz1A9Yn3P1Oy6NV8BfT9wQcfamnQJpJ23X77rdqM5xZVtmxpbft7blBToNmgN0dIavnMMxXIxwacZelFJdHSs89WVE888ZjtI2DZsrfV5MlT1a+/7qRvBFKoCy8sTGVCAyF/fssJpF7EkyYRfEBAO+K8884lvwRQNb/sskuD6hDvm1AS9bZt26ndu/eQ5pNmyJTeZCU/D1DBhu0xJIaQQo4cOVqtX/++rXlTpkxp7USsYoZm/PXXX2r06HFaU+cTKgdYwR4YmjzoC7eA/liz5h27fJhhgFbz5MlLplbQloH00gzQQnn77RVaS2Uj9fvpp5+mv527VMmSd7ualiAvpIDI8+WXX1E/QoPk3nvvUVWqVM60Ay2zbl6vTbrnvAsXztVaI+fwrf2rl1Gkcm5qsblJ1NFny5ev1Nh8oD76aDNJS++6qwSpyENy6hag9r1mzVqaK/AtoB+gOXL8+LEMNurI3759Jxpb27d/Sfu6uCqoSNASVMEhoUVZeCfwrl+/brY7nsO3P3HiZKJtk67g66R8+WeoHTA7e++9Ddq/SDvSeuPGsakfvo/p0ydztEqWOdaUpjvbZjfmxAXU+6HmjzBv3kxtZni++umnrXoO66xg345+RWB6evHFNuQXxpSoL1w4h8wEgCU0BdPSitG43LBhPVftSS90a47jGFvGj59ItIsxZ9WqZaiaBEFAEMgiBBKeUb+39Q/qyKH/VAftGO6hG05X9zXfQlCMbX+JKl4ko+duYdTdKYUZdSzitDSfBv6RI4fRYO/MMWbMOFoEY+GIRR2YymRj1LEoxoSFxWHPnq87mxh0v2PHz0rvWpGDITgXQ2jSpAUtgLGIeP31rjYjHpTR5Ubv9mvsRhPjMHLkcNuu3yVpQkaFYtS1RF1pLQOq89ChA7PUoVKsgMHiFos9qMBPmDBG92lzYuqeeupJhYWOWyhXzr8PCZQXikkz34VNMDgYNNU5zefxuDYZFi+MejjbUCyoR4wYZjudRDuWL7fsVLHABGOJdjuDqd4ernwsCLEwBI02bNiExiWUhfdu18w6B96Y4ft4/4aiAe53Zkyc9WrevImaPXuu7RfEfA7no1WrVraj4PSxbt2G9gLefnDiAo4Ua9euERTNm7pBkSdusPjGJonpxAyPwKzCtwmeuQU3xhW+PeBIDAEbKMeOHbfrCROkwYP72xszbmVmZRzTPcaEO++8nUzCGjduqDdKqmZ47ebNn9A8gDYUL16cxhJnew9o1Xhs/pn0xwWBVkePHqE3LotyFP1iXsL85BYefLAUmcg4+6FKlRrEuDnH3jfffIuYJreyUG84pLvgggvcHscljk3ogPeSJfPtd7I6N8yP7rrrTlLtdpoiwZxq5szZ6rnnKmkzvCZ23mSZY3lzGxVfv35N2A0qk+HG3IQ56ju98cJmdHbjT1wMGtRP3XbbrfrbCqi+Y62zbt16Z1JSme/fv4+9yYkEXul2+XJrHMdYe+DAQft7FkY9A9wSIQjEFAHMwQmt+r7q8wOkrn5tra/T9/51jFTWK/XcRnEtRv3iqsKeLKrvUHE7fPhIyD9tt0QqTbFWfYdK6fjxk6hs7eFa00Bw0AO/rf4LVS1WH0821fcKFSpRG0eOHB3cwCjv9ELTVimDit6UKdNIVV0z9GFL0DvZdj5gN3Tom+nAUUulwuZLlIehVN9hOvDEE0/bbQMdvf/+hnQ9aSdK1TPUY59h/sDqgaCLQ4cOZUjLEaz67vf7jKT2bOKrNRD4tXH/NVWAozVfeffd9Xb/69Mn0vfu/Y1UrkHfrMYKkw+MIRw0g2fnQRqYfuAb0g6W0rXHZ/uZk47Cqb5ryRPlg9mKPpaJXoU+1Y6rKD6U2QnXKat/Q9GA+f3AFAljAtoN9WumT/xqTQXCFc8YI9CtGfr06U95gKmWvBGmWlpP4w2XpbV+7CzmuKQZ73S9OUl5QI/AkfM4Va5hesDP8L2DVmAyYsZ/9dXX9ntQBzM9HkD9XWs+2PFoX3YFpnt85/j+UFf0i5vqb/v2Hek55mIt2aRrU/UdeVi1uVq1WunAGLT9888/p7/44st22XqDym6uZugpHu9F38L8CXmQl82t8MzZD6xC/vHHm+2yMCcz1pMnT6NvATQFMyWmNXyn2Rkwb3Ad9SaJXRXGE2rdMAFCGsyXZj/wmAK6M4NJy8iTqHMsf6PRjkegIeAwcOAQs7lkNoD4SKrvSAP6BOZa4ygdNIE4/GHs5uCHbs1xHLSltaL02HWA1q9crvwKAoJA7BHA/JnQEvXWo3epd/X557fdeJoa1vhC2qVYtvmA6jBil8qdJ4daP+hylStn8OZFskjUg2sd+g6exdnru1sqlpJEck7GEnVIyRs1amAf9+I80gPOrl566VUFFchx40apBx8sm5QSdVZdz4yXXNYsMHHHDvI999xFxy7dpne03c5BhVqwXvya2egaKsKQXMErOkvuMyTK5ogthimAqfqOau3UUjy9WMggFYW0FJ7TH374oYTTIICEoZ02R+AQSouEn5c7IVHn+0i/zu+TpakwfahWrYqdfefOndqR3LukEqw3EEiFEVL+HDly2GniecGSRbyzVKn7XCWckIKZp0MAR+AJR3lO78NwlgfP1AgYNzB+ILAkBtdOR0maudaq/5aDqLFj3wpS6e3WLbQzuUqVqpIEs1OnV4NUvHHc06effqrbkotMHfDO7AhMA07zB5aoV6lSSR8RFpAQQrX1scfKU1XhpLFCBesoLkSY6rOmk0bNZJJk68orLw8yN9CLdO0k0ToSDtJUSGYRRo0aq6W7Y0klGg7UcFwgB/QDTkGAVNiU5OoFgv6my9H472YqAkk75hWzPdDAekUfkQhp7vz5s/kV9Ks3BWjsSEsrZpszBCWIww3TPUt4WVLtbJ95JBokwQMGDNaq/CvphBD2+v7dCYkn5gSYA/GpKGgGaLFWrbqEKeZvjBMIUAOHOnhaWjHdHyPVKacEtAIxLlStWouklWY/IB/Xc+jQgDYT0n/11TckKcWRj2aYMGGSdjw2UmHOGTiwn/ko7tctWrQmp2l8wobemLAdk0Jdu1ChQuTgE3MPzFuKF7+OMMBYjMCaNGbFk2GOxToK66loTtlB215/vadauHAxma6YjoEXR+lMrkmTRkFzDsrs1KkL0a05FvmhW3Mcnzt3RrZqaaBdEgSBkwqBRHUm99uBY+mQpMOR3IKNf9rS89+N+NkbAvHsFC5ZJOrY5cRucKg/3gnNCok69nx4Rxu7rmZgCYGeMCga9UNdkk2izvhBMpqZAAnIKO0YjCUUXC5+IeWAVNEtQOo0e/bc9NrayZ+ZB9fAFNKcRAymxNetfpCo6cV4OujS2S7ca1VFt2zZFmdK8lC/P/74I2xdWKLO/eT1++x2wpmcGzYcB0kRJMvZGViyyHVy+wUWZuCxABJ0t8CSWX1Env2YJTFOiTAnYEnh+vXvcxT9hpOoswQa9YMEiaXqQQVk4w3TAMYNM/AY8v77wRJCpGFsIS00A6St3Dc7d+40H2W4PqY1Gfbv32+XpTeG7DT6eCgqZ9asOXacecESclOSa0p/UQ9nWLv2HSrTlPT9+ONPdn27a0eOGE8gEUiUwHTPtK1PBqH6Op14aRtcimetM54vTYk65kj0DaTnbqF//0H0HNhyaG9I6TnO/IU0FWWa/YDn/J18bEjUzXx8DawPa009dh6LfNkdoI2GNvXo0Zuq8pXWwMA9JMgc8K0gbvTocRQF2sU9tAxChUSfY3mOHDJkeKgmBMXrU2aoza+80jEoPlpncsDVGSZNmkplgn45+KFbHse1Kj4XI7+CgCAQBwQwpiesRH3+xr9Ut9G7adPkpVrnq9y5Avsnr4/drfRmv7rhulPVqOYXBR7oq2SRqPs5Rz2ooSduVnp0JgeJOmxjP/30c9W4cVOSfMydO5MkanAEVqlSNbIDXbRoHu32J6tEnaVXzz/fUO/WZ7Q/dMMyUhycrwA3OCqDZBEBx8rAWR2OKQsVDh48pL7++mvKoxfKdrJwTu7sRHG+gFQj1DnqzqpoxkBBSgbpBmyLIeFBcNpxOvPF6/6Ili6iLZCicYD0GD4HQgVIcdAOv98nS1MhZStkOKiCtBIBkrXXX+8SJNEMVZesjGfJIt7Rr18vcibmfF++fHltHxawLcc3hbBixZKgI+84HySFkBiaPgBYElOixJ30Hk7Lv7A1h3S4jz5DGA7KOHQLI1GH9L5x42Y2vSEPNIrQt6VLQ6vjfC4mW36ZBkwpFirCYxLslp3nJfM4i3O9ixYtElTvEiccWzolWfjuMP5rr9FBNM6Z4ZvjXm23isDlDxkyUDuxDJa+4vnateuUVsUOkqizMy88jxTee28tjYF63aK/nWFq6tTpdhZ8C6gH+ub2229z1UKyE2fxBdM9S9ThfwXYIOB4MIznelOCjhXDOMAaOCyZNMe2Pn36k0+BSFU2tW6YBpwSfC5j8QnpaTQSdeQ5cOAA2XHDFvy777bYfhu4vLS00E5jOU1W/2KOqFHDOqYUGLO03/w+vvzy/7Tzu8YkTYdUfdCgoURD0c7fiTjH6o0JrVWyMGqtBh4LnRJ4pgk/x7PpjSg6oha+D7p1s44i9UO3PI7DvwZoV4IgIAjED4GEZdSr9t6hvttyNCISqwZcps44JcAkCaPuDpmp+g5GHQuqqlVrkoMadr40ZMhw8rhsOm/hBV6yOZPDJgSYaiwOTTUyd3S8x06bNkOrFFpnmk6cONb2xhqppK+//obUTJHOqeYaKW88nnth1M36aKmmevbZqsQ8OZ0Cmeniea0lFHrxPYecyQFrLc2i17MKpltdysWIUTcXoXgP1FCxQAXTAq++BQoUcHt93OKYYcELo3Em98svv2gVzueofmvWvB3kmIgrDSeKcPTEm4GIj7TA48WpF0Yd5cIx3apVa8izOL5zM3Tv3kWrf99vRsX1Oh6M+ltvjdFYj7Pbdbk2PznnnHPUGWcUIG/yeGAy6tiwwrdtMpp2Zn3BTvxMBpEZBKTD2c/OAJV5MFjYhIF6t2kGpLUuSC0eGwBgeDmA2cBCP7tMf5jumVFHvbTkW2+2ztKOu6pr05762gnp++R5HZhibEdwY9S11od2kLaMnrvhAwwu0WeH40SEpk0bUzoeX9gZGEUa//i0CrMf8LiKizM5mEXAQSY20RAwtsAU4owzziDv89iESEvLfkZdS4RsE4oFC2aTJ3N8s6zmjrpjc6S0Pq8edYaJh5akE72OGTOSNuGQJtqQKHMsO71Dv0B9P1wARnfffT8lAa1UrWqNtYjg7zBWjLofuo00jlPF5Z8gIAjEHAHwagnpTO7H3UdJ5R1q7/e02ZLedOQvQX9NRvxiPx+6ZK+tFg/192RRfc+uc9ShUsqBzwKH6t4R7YCE1S+3bQuc781xyab6zqqLUHGEU6ZQ4R/tfAfOaAYNGpIOp0wI2j6RHKXhfNNQAeqFUM3DH6tDoqz3tVor/o4ZDrWcZUCdD/l69uzjfJTt91q6abfLrAywQLuATaigFyaUN5Sac6h8WRGvJUx2O1BvBC1JoLgHHihDDozc3gt6Qd/4/T5DqT3DoRTei7JBb9kdWAUY9YnGmZxeSNv1d6pnc1vYgRbOnubAKpNO1WJ+DhVf1MFpooJxCvGjDbVhzuP8xdgFcwzTzATfYnaFUDTAqu/6mMAMVWPa2KqdizkDcMDfzp2W6jtU/TkO3xxoywzslMpUfcdYgzzaI7uZ1L5u1649PTdVrtlpF+qWmQAVetA811lvWGWmuEzlZbpn1XcUBjMU1A3txLfgZsLhpvqumXvKByeG0QZWh0Zet6C1UjL0A9K5qb6PHDmK0mK8RV/hG+WA7wltSgTVd9QJJgSoD685gLXpOA5p4OQQabQUmH7d0iTTHIvvHO3BH+bVcAEOFjntd999H5Q0WtV3vSEWlA83jLfepLaf+aHbSOO4XbhcCAKCQEwRwLiekIx6r7l7iBG/oeG3QUw426HjF17fwciXavdDUBph1N1phG3WTEYdDCxPDlgQ4xo26mbAZIn4ZGPUsdnAbXvllY4h7SS1xJXSoZ1gvhF4gepst4kLbNO5fN7YwCKC8Vq1arWZPOiavdmatrxBCbL4ZteuXVRP1FU7sgp6m5bUUbvA9JiBF/KhFvpIyzZ2Xhau5jtida3VQe2TC7prO1kOWgJo+xrAYlxLMfiR/ZtVjDpeoNWBbZrRkj37ndlxwQwLaDgaRh11ZDtn0IgzYAOHad9clEZa4EVi1PURTUGvwgYYFrJuts/sPRpt2rLlh6B88bzJakZdn9Vt05Hp2R1t/OOPP+xnJqOuNarseOemiHnChcmoYzzjMe6bb77NACE2CNDXJmOBeQL3znEFmXmzAONxdgWme5NRR11485TnQWs+OGxX041Rx+Yl8EFZbn4Sdu7cSfiY3zqXD7t+J6OKDSeeG8x+QCXcGHWUgfezXbddWX0xYMBgepYojDrTH4+vYMqdYcWKVTaeaBd8UThDssyxqDcW2NhEYRoJ5c+G52Okc+svZtRBG85gfqPRMup+6DbSOO6sl9wLAoJA7BBISEb9zpZbiAl/YeyvQUy4yaiv/+qgLVX/ctsRO50w6u7E4caoIyVLGTFJ4E97KQ0qgBffycaooxHsmAftGjJkuD425xe7bVgksaMVPHcyHyyVwtEqTsdfmFh5kQQpmRm0faY9MTsXxFiI8dFSeCcWctkRsIDgfkX/M8MKjQJeWMARkhlYQoN6T9YOCMEwcUB+3rnHc0zq2RnAnKMe6BunNoXJ5EBy4wy8kIy1RB3vAVPFUlW3RaizLll5zwwLcIqWUWdJF/KsWrXGluDBqRMzOqAf0DmHSAu8UIw6NFzwHpS3XztI44BNGMTjb7w+ZtIMzAyAtrGAza6Q1Yw6jl5iDFibB23988/95ISMn5mMOr55MMj8DOPaG2/0IodeiGO6dzKIkMTxc1PajzrwGGg6qmJGFOWZ2jdg6rH5h7JCSZPj0V9M96ifGbCxytjgF4yuGdwY9cN6Y5e/Z2xioY0ctPq1jak5znz1leVIDe/AnMTfChh9ljrjmbMf3Bh1bJoiLeYqLgfv5+8Az9wYP65jPH/NTTTUC0y5M4B+8Yz/wKC6hWSYY7nepmYXvrnNmz+xxyaMu5hn4KCN2wznp86Ao9X4OTYozYBxjp9Fy6j7odtI47hZJ7kWBASB2CKQcDbqX+04qmp2sxwvTehQRF1zcb6QOv/3t/1RHTrwr6rwcEH1UsVzKZ3YqLvDpSdvbZ/YPch+FCm3GM7DYLeH41JMW8NINurub7NiGzduqJ3IxMaRW7j3hHqGI3L0QlNt2PCBnaRIkUtUwYIFyYEVR953372qQ4eXg+wm9Vm0ZKcImzkEOBkqUqSI0kw62fVz3n79epGNJt/DXlCr+RKuiAOml12Wpo4e/ZvsOTld/fp1tK16Lb6N+++UKdO106eh9N7z9FFKqKfWJKB72NTBidJll11q10tvbBD96AWWnebSS9O0rXI+OiKIcYLjpC5dOmpHhKfYeeN58d57G3S/taNXDh7cn+xDne9n23XEz5o1VfdtwCFluSyyUec66AWP9pnQjW5NG01+Hq9fttXF+6KxUed6wS8D/DMggE5wJJXe4LHvcTQb7HI5LF++QmkNC21/6e6EKJSN+urVa22fAigrLS1wnNWwE07rEA/avf764kqf321/Xw0a1FO1a9fA42wJ8bBRb9asJdkho4EYm2CXrJlD6pN8+fKRXbhpo450eqNIde36hoKTODPAj0epUvdncCaHNBhDYQeNshGcYwXGDdAx6oCgGXh9TF9126nZzTffRP4YNMNCcUg/YcJoOpKLMsT5H9M96oFj1zjojUft7K+ibU8/depEVaxYUX7saqOOh3DAWrt2A7u9aWnFyN6anUfCrhjHo+XNmxfJKcyePVfb6ffnW/JvgnkYAbbpWkshyKkf4qu42Khv3vyJ7psWeEwB79q2bQe1AfMc6pCWlv026lbtAm3A/bJli9SZZ57Bj+xfHg8QMXeu+zFgyTLHcqOmT5+lj/cbxLf0W1wfQQf/DmZo166tKl/+CTOKrjG2lS9f0Y7Ht9axY3uiEdDtvfc+SM+cR+0iUm8SZXAmh3ivdBtpHEeZEgQBQSBrEEg4Rr3bjD1q/qr96syzcquVPdLCtrrHrL1q9oo/1WkFcqm1fSymouv0PWrB6v3qvjsKqD51LrDzP9Zpm9q96x/Vvs75qvwdGScIO2GIi9Pz5wrxxFs0e30dOjRwHqpbCTyZw1suFlyhAnvmhddjOF4JFZhBeOyxR/UC2GJkOC1Pjg0b1lO1agUvcJlRhxOYc8+1NkN26vO0n376Wc4e8je7GXVUDAwmPBBPnTrDXoRxhbHorF69mj63uLzreda7d++ms003bvyIs9i/mGhxlnRaWjE7ji+wIIbnY2DODCw/wyTbrNnzCpsD2Rm0FFyNHTuezlc26wFMevToHtKBD+hSq1pmwBJMG85Trl27ZlgP+Oa7Yn0N3J9+uhLVzek513wX+kRLmsgJk/Oc4cx+n3wWbqiNGOCuJWDE3N56683kXd6sW7yuzW943bpVUXuhR/0nT56q1q9/397sAtNTosQd+sSIZ7QzqyuCmoCNHS2NJG/3cDjmDOz00ekFW+9H67Opp5GTLyzMEVasWKq9zZ9GjjAXLVqiaXeM7UgLz9PSiik48YMzw+wMoWigYsXK5J3dzUEWj7PTp0+iDUGz/iVKWGOF6dATtK61c8hhG6fF2fVY7GtJrQITZ57fzWnwqyV5mkn4SjPuR7SH+aLq4osv0v35nnrhhZczMIhIv2/fPjVu3EQ63YEZUMSXK1dW411HFS4cmGcRD8aiX78B9skYiMP4cM89d6nWrVu6MmhIE4/AdO9k1PFurQ1A46Hbd4nNNYznXbu+ph566IGgqmITQx9LqfHZaI+LPB5WqVLZddMSzvuWL3/bZtaQHvMlsMRGo9OZHDsDHD58sLrxxhvs93/44Sbdz32DvP7XqFFNb1DerJo3b02bAOwQz86UTRdDh76pJk2akqFtZnXwHOmw0TB9+mTzUdB1MsyxZoWxUQXnj07Hl0iDOQjzptmvZl5cv/vue+SMlJn7fv0sAQHWNyVLWvTotvmhzeuU1pxxdarrhW4jjePO+sq9ICAIxAYBrLkSjlGPTdNiX0qsGPXY10xKjAYB7DxjkQmJD6Qbl156qZasnxlNVmL2kReLPDCykLTkyZMnYl4wG8izY8fPdNQd3glGI5HCX9p79rZt25W23yYJOm/GRKojFu8//riVmKZixYpkm4QsUj3ledYhgA0PrYquv4nzXDe6YvFmTFJgLHPkyOHqaR51+OOPfXQUXv78+WPxyqQqQ6u0k5bPaaedRppCoSqPMQiaD6eckl/ddtutGZKxF/lnnqmgmemAlNaZEFijPzAOmppXznS4R99pu1y9cZfD3uR1S5cqcRjvtYkYzQ3nnHN2VN/E/v1/0YYJxt1wR3xGwgjl4OQN9Es0c1Ok8pLleTLMsSaW2HSEVh7GzQL65I8LLyzsae7UJg66uBz0HZvlZubaD91m5n2SVxAQBLwhIIx6lHgJox4lUJJMEBAEBAFBIKEQ+O677/XxY3WpTs2bN9HnhD+qNw1PJ0nshg0fqr59+9Mz5zF5CdUIqYwgIAgIAoKAIHCSISCMepQdLox6lEBJMkFAEBAEBIGEQwA+SuCrhAPUraGRwOGpp54kaXru3Lk5Sn4FAUFAEBAEBAFBIBsREEY9SvCFUY8SKEkmCAgCgoAgkJAIwM56/vwF6rvvttjO3a677lrtM6OkeuyxcglZZ6mUICAICAKCgCBwMiIA0xRh1KPseWHUowRKkgkCgoAgIAgkPALw6A6bdQmCgCAgCAgCgoAgkJgICKMeZb8Iox4lUJJMEBAEBAFBQBAQBAQBQUAQEAQEAUEgUwgIox4lfMKoRwmUJBMEBAFBQBAQBAQBQUAQEAQEAUFAEPCNQFyOZ7um1jdKpfuuY2JkzKHU1+OuToy6SC0EAUFAEBAEBAFBQBAQBAQBQUAQEARSGoEsl6hX67tdffx5wLNsMqJ5y/9OVZPaFEnGqkudBQFBQBAQBAQBQUAQEAQEAUFAEBAEkgiBuDiT+3jLYVWt2/bklaprafqkV4uoWy4/NYm6VqoqCAgCgoAgIAgIAoKAICAICAKCgCCQjAjEhVEHMGDW+8//TX38hZasJ4savGbQb7n+VNXqyULCpCcjdUudBQFBQBAQBAQBQUAQEAQEAUFAEEhCBOJio56EuEiVBQFBQBAQBAQBQUAQEAQEAUFAEBAEBIFsQyDLbdSzrWXyYkFAEBAEBAFBQBAQBAQBQUAQEAQEAUEgCREQRj0JO02qLAgIAoKAICAICAKCgCAgCAgCgoAgkLoICKOeun0rLRMEBAFBQBAQBAQBQUAQEAQEAUFAEEhCBIRRT8JOkyoLAoKAICAICAKCgCAgCAgCgoAgIAikJgLiTC41+1VaJQgIAoKAICAICAKCgCAgCAgCgoAgkMQIiEQ9iTtPqi4ICAKCgCAgCAgCgoAgIAgIAoKAIJB6CAijnnp9Ki0SBAQBQUAQEAQEAUFAEBAEBAFBQBBIUgTS09OVzagnaRuk2oKAICAICAKCgCAgCAgCgoAgIAgIAoJAyiCQI0cOYdRTpjelIYKAICAICAKCgCAgCAgCgoAgIAgIAkmPQK5cuQKMeuHCFyR9g2LRgJ07d1ExgodSgkUsKCq1yhCaSK3+lNYIAoJAfBCQsTM+OCfTW4QmrN4SHJKJauNTV6EJC+cg1XdhTGXAcH5+8qE4EZF7oQmhAUFAEBAEvCMgY6d3zFI9h9CE1cOCQ6pTuvf2CU0EMLNt1IVRlwEjQBaChRMLuReaEBoQBAQBQcAvArLw9Itc6uYTmrD6VnBIXRr32zKhCQu5nDlziuq7k4iEOAKICBYBLOTKQkBoQihBEBAEBAHvCMjY6R2zVM8hNGH1sOCQ6pTuvX1CEwHMRKIewIKuhDgCgAgWASzkykJAaEIoQRAQBAQB7wjI2Okds1TPITRh9bDgkOqU7r19QhMWZkFe30X1XQYM56ckH4oTEbkXmhAaEAQEAUHAOwIydnrHLNVzCE1YPSw4pDqle2+f0EQAM5GoB7CgKyGOACCCRQALubIQEJoQShAEBAFBwDsCMnZ6xyzVcwhNWD0sOKQ6pXtvn9CEhZl4fXehHSGOACiCRQALubIQEJoQShAEBAFBwDsCMnZ6xyzVcwhNWD0sOKQ6pXtvn9CEhZmovrvQjhBHABTBIoCFXFkICE0IJQgCgoAg4B0BGTu9Y5bqOYQmrB4WHFKd0r23T2jCwkwYdRfaEeIIgCJYBLCQKwsBoQmhBEFAEBAEvCMgY6d3zFI9h9CE1cOCQ6pTuvf2CU1YmMnxbC60I8QRAEWwCGAhVxYCQhNCCYKAICAIeEdAxk7vmKV6DqEJq4cFh1SndO/tE5qwMBMbdRfaEeIIgCJYBLCQKwsBoQmhBEFAEBAEvCMgY6d3zFI9h9CE1cOCQ6pTuvf2CU1YmInquwvtCHEEQBEsAljIlYWA0IRQgiAgCAgC3hGQsdM7ZqmeQ2jC6mHBIdUp3Xv7hCYszHLkUEqOZ3PQjxBHABDBIoCFXFkICE0IJQgCgoAg4B0BGTu9Y5bqOYQmrB4WHFKd0r23T2jCwkwk6i60I8QRAEWwCGAhVxYCQhNCCYKAICAIeEdAxk7vmKV6DqEJq4cFh1SndO/tE5qwMBMbdRfaEeIIgCJYBLCQKwsBoQmhBEFAEBAEvCMgY6d3zFI9h9CE1cOCQ6pTuvf2CU1YmIlE3YV2hDgCoAgWASzkykJAaEIoQRAQBAQB7wjI2Okds1TPITRh9bDgkOqU7r19QhMWZjFl1A8ePKjy5cun8uTJ471HEiiHEEegMwSLABbhrtaufUd9//0WVbHi0+qss84KlzTpn8WLJr755lu1atVqVbr0w+qKKy63cXv//Q/UH3/8rm699RZ1wQUX2PFyIQgIAoJAIiMQr7EzFAZ+5qljx46pWbPmqFy5cqkKFZ6i31DlS7x3BLKbJrzXOGtyCA6RccW3+PnnX6gffvhR/f3336pcubLqnHPOiZwxSVPEiib+/fdfdejQIXXGGWeERcLP+Bi2wBg9zLTq+2effa7mz1+giedLtXfvXqrW1VdfpW666UZVvXpVddppp8WoqvErxi9xvPjiy/TxXHvtNaphw/oRK4wPrVOnLmrPnj36g3uEmLyImeKcwA8WH3+8WU2ZMi1DTfPnz6/OO+9clZaWpu6//96IH02GAhI04vfff1dPP/0s1a5atSqqfv26CVrT2FTLD0388ccfqnv3HhErgO/gwQdLncCyptqx42eF8WTEiGF23mbNWtJk1a1bF1Wy5N12vFwIAoKAIJDICPgZO6dPn6k2btykypR5mDYt/bbP7zy1bt27qkOH1+i1vXv3ULfffpvfKkg+FwS80gTW3BMmTHIpyT2qSJEiqkWLpu4PEyjWKw6oeq9efdTu3XuibkWDBnXVVVddFXX6REr4zz//qI4dO6sNGz6wqzV06CBVvPh19n2qXfihCcbg6NG/NR8yVW3e/In64osvKfqsswpqvIqrRx99RJUocScnpV+/42NQIVl0kzNnTv9e3+fOna8GDBgUsmoXXlhYvfFGd1WsWNGgNOPGTVAY/J966kn1+OOPBT1LhBu/xHHffQ/a1Z89e7oqVKiQfe92sWLFKtWt2+v06MknH1etW7d0S5atcX6wePvtFVExZXXq1KLNHBBhMofjx4/rdtRSv/66U3Xu3FFvQtyXzM2JWHc/NAFsnnuuWsSyscmBzQ4ETMKLFy/VmyBPBS00hFGPCKMkEAQEgQREwM/Y2bNnH7VkyVJVu3ZNVatWDd+tCjdPhVuTbdu2XdWoUZveO3XqJIV1nd8Q7j1+y0z2fF5pAlI/CHiiDc6N7mjzxTudVxxQP6wpsLaINiTzRtP69e+p9u07UlOxRrrxxhvUtddeq4Whp0bb/KRL54cm0Mg9e/ZqrDqo7777PmSba9asTuMp8x/hxseQhcTxga/j2aDmW69eQ6rmQw89SJMIBnBIiT/99DPVs2dvtW/fnxmkYcjQo0cvtXTpcgVGDWAlWvBLHOXLV6A2oz3RtK1x42bqq6++ouanIqOODZp27V440b3patu2Heqzzz5TH3zwoY0TaKdDh1dOpEneH3zkhw8fThktgXA94ef7MBn1QYP6hzSPOe+88/QGV0CVC7ucTtUuYdTD9Y48EwQEgURFwM/YGStGHZiEmqcircmOHDlCkJ5yyimZgjbSezJVeJJm9koTBw4cIE0zs7mbNn2kxowZR5soHTq0Nx+pU089NYOwLChBgtx4xQHVZhVwswmvvPIqrS+ff76Ruv764uYjVbRokaTU8kUjsMk1dux4dc89d2tBWPQbNUEAJNmNH5pAE199tZN69931RPvgQW699WZ9fZrWvtitpk6dprXAFxISffr0VLfddquNSqjx0U6QjRe+GHWo3owePZZ2dIYPH5yh+j/9tFXvVlgqwBMnjlVQv+GQ6IO1X+IwGXUMjgsXzlW5c+fmZgf9btnyg6pbt4Edl4qMOgbJIUMG2m3ki2PHjtNu14cfbqSo9u1f1ip9D/Fj11/YmMBGzkv477//FDlhyJEjQzav5XlNn+GFRkQsyzKKjduln+/DZNRXrFiq8ubN67u+0TLqyY6zb4AkoyAgCCQkAn7GTi+Mut8xLxZrsmje7fU90ZTp1tFYcIdae7mlz844PzThrO/q1Wu1Nl9XV8GYM22i3scCB7TtuRNS9l693lB33HF7VM0FnZF6sctaEQX4oUOveSKlHzhwsJozZ54WkNYhTdRwDYNNM/5YWhwurfks0neDNTWC13LNd3i59kMTUHkvU6YcvaZfv97qlltuzvDK117rqtasWaviISj02xdmpYmX2bp1ezoiCxeO3jETVDCgivHssxVVkyaNzTLta6joQMp48803kdOnt99eqaZNm64APuLBzPI7hw0bovLnz2fnzc4LP8SB+pqMOu7D2dD27z9IzZs3H8konEyMOhqMHfqWLdsoOAy75JKL1aRJ4y0gjP+gL9jmYbcYjB6ch+GjA1PvNCsA0z9ixFvqhhtuUI89Vo5suJAXATZ1TZs2Vueee67ChIYdNajEwF4F6eFPwE2d7/33NyiYd3z//fe0Q3vZZZfSxlTdurUyOIyDKtqOHTtIg+Cqq67Ea5VZJzjhGT9+om7LJioL2gYVKjyt6/qoHvQybiRQAQn6z8/34YdRnzZthoIZhdNEJhyjDk0fjDuw48JuO8Ye9H/ZsqUz9FmCwivVEgQEgRRFwM/YGYpRf+ml9uTfBhppH3/8CY17sMXEugpSokaNGmSY15zzVDRrMjgJbt68FfXIW2+9GbRh/tdfB0jS9+mnn9J4i3djgx4LYHPzPZr3cJd/++136p131tG8j3n6yiuvoPbcd9+92r7Ymls5LXyftG3bjurUq1cPvQYYaWvsTZgwliSonDZRf/3QhLMtkRh1+If54Ycf1DPPVFCPPFLWmV0tWrSYmEDYO2eXCWYscEDDIjHqWIeBvipXhl+hHFqgtoh83jiZNi/rPz/fIur65Zf/p3miGfr3S3tdiDVpvXq1be3MBQsWEa8AfJx8E1T5TY1DrHvwt27du1q7+R9a+2AswPrH3LiK5rsx16/4lidNmkI8H77xpUstiTTakJXBD01gzKhfvxFVa8GCOerMM8/MUMVff/2VNL9PP/10de+9Je3nzvERD5he7EQuF5UrVwoa75Ak2r5wKS5DlG+v76g8VG3A/AwbNpichGUo3RERzqYdHQ8CSITghzhQb2bUn3uukmYGpxNj2bdvrwxNgvfBcuWeoHhsdMyYMUudbIw6Gg+nc61bW6rx48ePCVLPCkcrYOzhRMP8ANneH8w0Fg9791qODRl85MEENXLkKI6yf0HDWICAcefw1lujaWDie/MX6YcOHajOP/98O7pmzTpq69ZtauDAfmQ7hAeR6oQ0dRLU/AN1CxX8fB9+GPWhQ4fTt+HEKBSjDiadzXGcdcdiDyr3mVXddJYr94KAICAIRIuAn7EzFKNesWJlmuegCotNbWfAemratElB86Rzngo3z/Ka7M8//9TrkwpU/MqVy7XZkqUluGvXLr0Z0IQYDOe7cV+3bm1t2275JYnmPcjz9dffUJm4dgtvvjlUXXPN1fYjqLI++6zl0wSbsnAcxcG5puD4RPv1QxPONkRi1LEeffPNkSE1YMHYgMEBk461aHaEWOCAekdi1Hv37kcbE//73/XEoHNb4cS2Y8dX6dbr+s/Pt8jrQ7wQ32rBgmfa9vZYi44aNYIEUmCQUR+3MHPmNJv3wmYL2uYWnPxFNN8N1w9rZ2zWwZQZIdEZdVOi7lw7umFjxjnHRzxjTQYznfO6RYum5EuJ4730BecJ9+ubUTcXxei48uWfIA/MV1xxpT2Qh3qxV/WnUOVkVbzfAYMZdXxg2IHG7tfEieO02v8lQVXFDlnfvv1JLefee++hj8v5IQVlyMYbP1iwM7lQqu/cHHz4wAzB1D7ATubLL1sDJtR8cARFwYIF1bfffqtef70n2WfBicbgwf3tXUIeVFAW3gs7LXiYx8QNexX0BQJ2TZs1e17vVp6pPvnkE3ujAAM0BmoEOKJ45pnKdA07J0i9oaoNfwJdu75OiyPUC6cacHD7wM06YbDr2vU1vcNfTGsRfEO7qNjVRViyZEFS2U35oYmsZtR/++13zaQ3oMkEx7lhgD7//PNIG+K117rSBFiy5D2azjpzl8mvICAICAJxRcDP2BmJUUcDoNUIT8ZQkVy3br02ORtGcx6cz8EJHQe3eQrPwq3JQjHqvIDF3Na9e1etGXeJ+v3330gyy6e+vP32Ejqyl98f7j3Apk6d+lRvOGTFGI610/btO0goBE0prDXHjn3LPpbTZDjwjpdeeoGk76efXkC/Ny+ZvvG7E/XXD0042xKJUYdn9GeffY6yzZgxleZGLgPSxeees3xFhZJActqs/I0FDqhftIw60kI1HnRWrFgx3JJWr5/1HzPqKCPab5HraTk1q0malb/99htpmu7Y8TP570LdOPD3Vr9+wOEuPzOFXnh/2bJlaM26ceNG+8SGFi2aaWayPGWJ5rsx168QTrVr15Y2ySCZxwlO8Qh+aQJrPqi2I9yqNXHLlClN2pWm/yN66PgXanx0JKNb3nyENi54Pj7hzGtfuJXtjIOpgS8bdRQE1eIXXnjJWSYBA1WJe+65x9UjYbjBOkNh2RDhlziYUZ8yZYJWVVlA0kBI16GCZgYmBnjEx3nQ2AU7GRl1YMIDXJs2rdQTT1gnALBZBRYeL77Y1oROT9rbNYNcm+IgBYekFMEcVObPn02MPT3Q/wYNGqpmz55Dk/yiRfOCVPeYFs1+wgYCmGkMRjhm0Ay8u4mP39SW4D51k6gj//Tpk+3FBe4PHTqsNyCsneuRI4dnUOlDmkQNfr4Pk1GHCqObjRN2kTGZcPAiUYftFiYybNL07983aLPQpJmFC+fpTZoC/Ar5FQQEAUEgbgj4GTsjMepu8yQ7nrrrrhL65J1udvvc5ik85HkQjIHTwW8oRh1q9pC0XXHF5UGmaDBrK1vWmsvhowZjModw78FZ7YMHD9VMU1Gtwj48yBQSUrKGDRuT1lqzZk3so2xNhqNjx/Z6s/0BflXS/PqhCWfjIjHqSN+mzYvqo48+phNUcJIKh+nTZ2qt2De1oC17N7JjgQPaxAxwKBt1lqhjg2ns2NFBawXk97P+43VstN8iVM+feuoZvE4LbSbbJsC4hzboFu3D6uyzz9LrwqsQRSEco87O06pUqZzhaOilS5fp77u3grbpmDFvUVnRfDfmmnrGjClBGqQnqpTlP35pAk7NcaQk+8HiimLDoVSp+7Sa+sM0bnE8/4YaH/k5/2Lsa9q0Bd2OGzdaHzldjK7xz2tf2BnDXGSKUUe5+/fvJ9uoJUuWkb2x+S7sfsKGAnYvZgg3WJvpsuvaL3GYjPrx4//SkSbAYO7cWfakwx0MggHzho/oZGbU+cNo2bI52SKjzx955HHaVYeTQkjOnQHn1eMDBBOPgRGBBxUw7mDgzQBfAPAJABVBp7fMyZOnkjo8pOYvvNDazBZ0DYcQ+PjXadsf2HuucAMgAABAAElEQVRhMQHVOg7cDjdGHTtuONbGGThPjx7dM5zp6EybSPd+vg+TUQ/VFjDq8+bNth97YdTRJ9DkMBdxdkH6grEeMKBvhs0XM51cCwKCgCCQVQj4GTsjMeqdOnVQDzxwf1CV4fEYC0bnfMjjoDlPIWO4NVkoRj3ohSdujtPpJ0dUpUpVaA7HfIt5l0O493Tv/oYew1eSYAMb587A6tsQAsEBLYLJcGATvkCB5NuE9UMTTmyiYdR5jYQ1lekAmtXenX3lfEdW38cCB9QxWkbdFM5Ealuk9R8z6tF+iygP5q/Q9MRGFjbIoIoPaXWoEI5RZ97DFF5xOabm6vLlS4gXiea7YXpxjiFcbjx+M0MTwBg+AFasWEn8AWvVcr2hXfvyyy8GYR5qfOQ8+DW1N92OY/baF2bZoa7RFt8SdWehsL2GczDYS0HCxWHy5Anq4osv4tuwk4KdKBsv/BIHdxAk6hdddJFq1aotqV5jUsHkgsAMBRyYYfeLbRlORom6aUvCBI/dRAx6CIsXz1enn346XZv/Rox4S0G1zsSMB5USJe7U9NXdTK6PYlig+vUb6OrhkXeTnYw6jkABDWMHGrubzo/cC6MO9Srs7joDduSwcQPNirvuutP5OGHv/XwfJqMOLPLkyZOhfTAvMDf1vDDqPDlnKNQRAbUw+IWQIAgIAoJAvBHwM3ZGYtSHDx+iN7SvCWoKq1+aUjQkCLUQDcdAh2PUN27cpB20rtHHrn5u29eaFXEyf+Hew2N4qI1rOGeC0y5z49tkON55Z5X56qS59kMTzsZFw6ibmg6s/s5q7xAoQe3dbV52viur7mOBA+rGdBRJog6zxkqVLKm2s01e13/MqHv5FiH0gdTXDFgrlip1v3ZwBm3k08xHtq20U/UdjGOFCnCMFzmMHDmMpPTRfDfh1tSR3xSbFLGiif/+S1c///yz5sc2a75iqX2+uslDoMahxkduDU6satWqDa3bQTugITP46Qszf6hr3zbqoQrkeDDsDRs+T7etWjXXtshP8qOThlHnD/Hqq6/SqlzDSKUGzDzCvHmzyAv1ycyof/XV16px46aEB0vPeeJAJO/+UQLjH5wYjh8/UUvey+iJ+0V6Em5Q8cqo//LLL1o9rI3tkA6T2OWXX0ZeOGHzDqbdC6PutnmASp+sjHq0x7N5YdR5kwy4wsu7M2BBifiHH37I3jRzppF7QUAQEASyEgE/C89IjLrTwRrqHw9GnedhxgubAvBAXaDA6WrVqjUU7YVR5zE81JFK3CZT8yoahoPrl6i/fmjC2ZZoGHXkYVpq0cJyfgVHxphncQJN8+ZNnMXG9T4WOKDCmWXU/az/mFH38i2irngX1q6wp4YzYg6g8QED+tE6k+NCSdRRRpUqNSjZrdokE2rSzsDrH5jh4juN5rsJt6Z2lp9V97GiCbN+OAYP3t35nPXFixfYJy9FYtTh+2PmzNnkMBqmr04NCD99YdYt1DWOpvYsUceuAnYmEHBclrOy/LIWLVqTC3znrkW4XVXOm52/fomDJxqWqB/XamAVK1YiBh3OBjDRDB8+ghgFVt06mRn1Xr360O4WBqXZs2eQ7Th2vh591FIJAmZXaPs3Z2AbdlNdPtyg4pVR5wUIdu67dHlND2yX2R/y++9/oNVl2gujrjuFj1Z09o/bvSlRzwpGnWnCNIdwq4fECQKCgCCQXQj4WVswcwWncHAOx8EPcxBqIRpuTeYmUTd9rMC7O47QLGConcMpHI7H9MKos20nM5HcTv6dM2eulioOUfBx0qVLJ4qOhuHg/In664cmnG2JllH/9NPPtBCite39ndXe3RhM5zuy+j4WOKCOmWXU/az//HyLTjxhRowNrgkTJhLPAMECTIc5hGLUsWYuVcrS2J3o4rya85u/0Xw34dbUZllZee2HJrDhgfbhVCYI1NwCNID4yMlZs6bR6WVIF2p8xLNVq1brcac7nQ4FW/+zzz4b0UHBT18EFRDyxofqOxj1J554iiSLrLLsVn61ajXJQ7dz8cyTQiLs4rnV2w9xoBwno444SH7x4cOWeuPGj0hKazpYOVkZdcYFGDkXIC9oB4XYAXR6rEVaOOGoWrUm0R6OaGNV6XCDildGnY//quPiWIelvCJRTyxGnZ38wR4Snt2hKmQGLBqh+lekSBFxJmcCI9eCgCAQNwT8rC3iyai7rcncGPVPPvmUvFMDOKdnd9MmNhSj7vYemLPBrO3GG2/QPmX6BEkGIQVrrY9yBaPJZoN4dzQMB9IlcvBDE872RMuo41QAHGe3V5sYAmOYZ8Kp2qRJ451Fxv0+Fjig0pll1P2s/7wy6lCthyf+U089RZtyXBiENZj1Ll26ZTgGLRSjjsxsZuvmowd9Du1VBBxtCOlsNN9NuDU1FRaHf35ogh0Lg64nTBhnC9nM6uLEpY4dOxPTDR9ivF4Mxahj/YjNRwTW/jXLM6+99oWZN9R1evp/3iXqKAxnMsK5B6ShL774QpCNLWyP4UUSDBKC02Mgnk2fPpNsjeDd0+mFGc6/Jk2aSrakbE+KHdzOnbuqAwcO0jEBvFMCuyUwT7fccosm1ub0vsz+80MceKcbo47jFipUCDhGcTJ4oRj1zz//Qts19yF7kvbtX7InLWCH48saNKin7VhKUlOhbvHGG73oGJROnV4lFe3MYsD5/WDhdjwbnCH88suvZNsBbQw4jUGAGg52c2GfzIExwX3nzp3IXgfqPNhxxMeFyRrSbnjszJ8/H2ULN6h4ZdTheA40iLoNGzbEfgfqDIc3CM5+dPvAw9UJZYjqO1AIHXhTxLlhwhOpeaTf998HzlCH1+Jq1aoQTYHu0A/cb8lytm5oVOSJICAIJCsCfubTeDDq4dZkboy6aYvZsWPgaNO//vpLvfJKB5rn0UdORj3ce8zFMPz31KxZg+ZerCfHjRtP602UCWkW5maEaBgOSpjA//zQhLM50TLqyDd27HiN5wS7CHPjw47UF/Feg8YCB9Q/s4y6n/WfV0bd3OgaNKi/uuGG/xH02JACQz5//sIMzo/DMep85DMKef31rto5cQliUOEAGWewQ10bvNrMmdPJy300302k9SveldW8mh+aMMcrmMfCjvyMM85AdSnAL1SHDp1Ia8Hpm8ptHY9NlQYNGpMPjtatW2jfWE9wUa6/XvvCtRBHpG8bdezItWv3Cqk3oUwQQVpamsKgivOmOTgX2Yhfu3adthHozEmI6YENN5/N98gJr99IsGbNCmJScYYm7AoQqlZ9jhhVXPOiHdfwrA0GLrPBD3HgnW6MOuJRb9QfAZ7FQRwcmCl1mgeYedgBhDk5st07yjGl02DqcfRArIIfLJhRj1SHm2++STPerxLtONMyk4Z42IjjTHS24cE9vFuaDgrDDSpeGXVsBEA9jAO8ce7YsYM+bOzS7djxszDqGpxEUn1HX5ljBO7xjWBCgoQHwRw3KEL+CQKCgCAQRwT8zKfxYNTDrcnMhe/Klcvt46xYcgT4sO7CYhi+iTA/4wxzjLtORj3ce1AOe6vHNQIYcjDwHKAtVbLkPXwrjPoJJLww6jiXvnr1WjaG7FjOjjhxEe81qJ9vw1ln3GeWUfez/vPKqEOA0KhRE/ukLNA5tP2+/fZb2ymj01dDOEYd7ebnuMY3WKTIJfo44x22I2TTaXGsGPWs5tX80gQf9QgsEIBvoUKF1I8//mT7nsKY1bt3zyA+wo1RN3kRt1OoUH758o/TWe24RvDSF1aO8P9BL55t1LlIlpzD/b3TKzZAwE6GOahyPrx02rQZ2i55rg3akiULbC+HDAx2LrCDgQCV5wYNntfnTx8iD9rX6yMNEJjRBTM1cGB/VzUHSujhn1/i4I/VuWEAB2SY1BCWLl1IHxFXBx4IITk324pnzHSAwLCJwd44+WgyU8UFH3fLlm1pckRa2GbEKvjBwpQ8O+sBRhfHPZQpU1rdqh1fQA3HLUBdBzQC7QHsgCFgMwheMStWrJDBdn3VKktdyHluLPLxDhc2MLCRYQZ2puLcKNm06SPtKX6APWgiD6S02FxorVXw0C/Y2efANnnm7mi4OiEfbzKF8nLLZSfarx+a2LVrl/awWpWasnLlMpuew7WNxwHYQNaoUc1Oyr4vsHN899132fG4wHezbNnbCpo2HDAW1ahRXZ/tW9pWceJn8isICAKCQLwQ8DN2si8Xp9CDGRLeyDfbwBI759FKbvMU8oVbk0FK/vjj1rnbq1Ytt30SYf3Xt29/OhaT3433tWnTinzxgOHBGe6YkzmEew+ngVoqxnDM/RxQBsZv2KebYc+eveqZZ6xTYk5mr++8XjQFOCZOzms4esamCtYzUIF3C1xmvNagfr4Nt3qzyW2fPj3VbbfdmiEJ1nWQWIc7Acbr+s/Pt4jvZ/ToMZqHWRLEP4GXgf8l4G4G+GeAn4ZQGhCQxkPL+YMPPrTXzMiP8qCByzwT4qL5biKtX1EOr9FM/iWWvFpmaAJ26OhrFvChvhxKac/6bdq0DPKrgWdu4yOPv5zX7Rf4QhDEwUtfcJ5Iv74ZdS4Yg++uXbvJ/T2k4pdemmYz3ZzG7ReG93//fVQvnnPa6sWcDiob+fJZas0ch8YjOJm7f/75J0h1mtP7/c0Mcfh9p1u+Y8eO0aTI9hOcxq29obDhPH5/EwELbALB5AFSdScWftsVbT4sUg4ePETOJvLkCX3GZbTlpUK6RKCJSDge144csWsMKY/p5ChSPnkuCAgCgkBWIZDoY2e4NVkoTLD2wFgLKV7BggVDJQuKj+Y9GMP37NlDggfnmi+osCS/iTdNoL8gOYRmIJwalz5xdLAbjPFcg8YbB7f2OuPitf7bt28faSNjjRsLWoep8P79f9IpDE4+ytnGzN5nJa8WC5qAbyIw6+hLaOEWLlzYNiXObNujyR+LvsiZM6d/iXo0lUzGNLEgjmRst1udBQs3VE7uOKGJk7v/pfWCgCDgDwEZO/3hlsq54k0TrBZs2SxPi0q7LR74xxuHeLRJ3pE5BIQmAvhlWqIeKCo1roQ4Av0oWASwkCsLAaEJoQRBQBAQBLwjIGOnd8xSPUe8aAKnD8FMFUelIrz00gvqkUfKJgy88cIhYRosFYmIgNCEBZFvZ3IREU7iBEIcgc4TLAJYyJWFgNCEUIIgIAgIAt4RkLHTO2apniNeNPHyy6+S3T/8BMGrfrlyjyQUtPHCIaEaLZUJi4DQRAAekagHsKArIY4AIIJFAAu5shAQmhBKEAQEAUHAOwIydnrHLNVzxIsmYPMPyVwsbKCzok/ihUNW1F3KzBoEhCYsXOEHThh1B40JcQQAESwCWMiVhYDQhFCCICAICALeEZCx0ztmqZ5DaMLqYcEh1Snde/uEJizMRPXdhXaEOAKgCBYBLOTKQkBoQihBEBAEBAHvCMjY6R2zVM8hNGH1sOCQ6pTuvX1CExZmwqi70I4QRwAUwSKAhVxZCAhNCCUIAoKAIOAdARk7vWOW6jmEJqweFhxSndK9t09owsJMjmdzoR0hjgAogkUAC7myEBCaEEoQBAQBQcA7AjJ2escs1XMITVg9LDikOqV7b5/QhIWZ2Ki70I4QRwAUwSKAhVxZCAhNCCUIAoKAIOAdARk7vWOW6jmEJqweFhxSndK9t09owsJMVN9daEeIIwCKYBHAQq4sBIQmhBIEAUFAEPCOgIyd3jFL9RxCE1YPCw6pTune2yc0YWGmD2sQr+9O8hHiCCAiWASwkCsLAaEJoQRBQBAQBLwjIGOnd8xSPYfQhNXDgkOqU7r39glNWJiJRN2FdoQ4AqAIFgEs5MpCQGhCKEEQEAQEAe8IyNjpHbNUzyE0YfWw4JDqlO69fUITFmZio+5CO0IcAVAEiwAWcmUhIDQhlCAICAKCgHcEZOz0jlmq5xCasHpYcEh1SvfePqEJC7Mgibp3GCWHICAICAKCgCAgCAgCgoAgIAgIAoKAICAIxBKBXLlyBWzUY1mwlCUICAKCgCAgCAgCgoAgIAgIAoKAICAICALeEciZM0eAUS9a9BLvJaRgjj/+2E+tOvvsM1Owdd6aJFh4w+tkSC00cTL0srRREBAEYo2AjJ2xRjT5yxOasPpQcEh+Wo51C4QmAojm2Lp1ezpuhVG3QBHiCBCHYBHAQq7k+xAaEAQEAUHALwIyn/pFLnXzCU1YfSs4pC6N+22Z0ISFXJAzOWHUZcBwflDyoTgRkXuhCaEBQUAQEAS8IyBjp3fMUj2H0ITVw4JDqlO69/YJTViYCaPuQjtCHAFQBIsAFnJlISA0IZQgCAgCgoB3BGTs9I5ZqucQmrB6WHBIdUr33j6hCQuz//77T2zUneQjxBFARLAIYCFXFgJCE0IJgoAgIAh4R0DGTu+YpXoOoQmrhwWHVKd07+0TmghgJjbqASzoSogjAIhgEcBCriwEhCaEEgQBQUAQ8I6AjJ3eMUv1HEITVg8LDqlO6d7bJzQRwEwY9QAWdCXEEQBEsAhgIVcWAkITQgmCgCAgCHhHQMZO75ileg6hCauHBYdUp3Tv7ROaCGAmjHoAC7oS4ggAIlgEsJArCwGhCaEEQUAQEAS8IyBjp3fMUj2H0ITVw4JDqlO69/YJTQQwE0Y9gAVdCXEEABEsAljIlYWA0IRQgiAgCAgC3hGQsdM7ZqmeQ2jC6mHBIdUp3Xv7hCYszMSZnAvtCHEEQBEsAljIlYWA0IRQgiAgCAgC3hGQsdM7ZqmeQ2jC6mHBIdUp3Xv7hCYCmIlEPYAFXQlxBAARLAJYyJWFgNCEUIIgIAgIAt4RkLHTO2apnkNowuphwSHVKd17+4QmApgJox7Agq6EOAKACBYBLOTKQkBoQihBEBAEBAHvCMjY6R2zVM8hNGH1sOCQ6pTuvX1CExZm6enpmTtHfdiaP9XyTw+rX3ceV//+m+69J7IwR65cOdSFhXOrMjeeqp4vVTDqNwlxBKCKFRYHDhxU33//vdq6dav67bff6QXnnnuuKlq0iLriiitUgQKnB14qVwmNQKxoIqEbKZUTBAQBQSDGCMjYGWNAU6A4oQmrEwWHFCDmGDdBaMIC1LeN+tbfjqkWE/eqHT8fi3HXZE1xl1ycRw2sfq4qVihPxBcIcQQgyiwW//zzj1qxYpX66aefAoW6XF1xxeXqwQcfULlz53Z5KlGJhEBmaSKR2iJ1EQQEAUEgXgjI2BkvpJPnPUITVl8JDslDs/GqqdBEAGlfqu9P9v81aZh0biqY9fmtLuTbkL9CHAFoMoPF8ePH1cyZs9Tvv/8RKDDM1fnnn6+efrq8ypUrV5hU8ii7EcgMTWR33eX9goAgIAhkFwIydmYX8on7XqEJq28Eh8Sl0eyqmdCEhbwv1Xeou49atD+7+i5T76332JkR1eCFOAIQZwaLFStWqm+//S5QWBRXN954g7rnnrujSClJsguBzNBEdtVZ3isICAKCQHYjIGNndvdA4r1faMLqE8Eh8Wgzu2skNGH1gC9GPRppeqFzc6viRfPRWzZ/f1T9tf/fsH1+ZsHc6kFtS35rWj61ZY9Wl/7siJbY/+OaJ3fuHOq2605Rd1+VX118Vm51at6cauvvx9SmH4+qFR8ccs3DkdFI1WNJHLAt2L9/vzrzzDNVzpw5uRpJ8+sXC9ihT5s2Paidp556inrggVLqggsuoPidO3epVatWq6NHj9rpIE2vXr2qOv10sVm3QUmwC780Eetm4NtatGiJwiBWrlxZlSdPZLOWWNfBrbzVq9foDarvVaVKz6izzz7LLYnECQKCwEmIQFaMnWvXrqM1xt13l1CFChU6CVFN7ib7oQkIQL755lt12WWXquLFr0s4APzMzX5wCNVwYmxy5Aj1OOr47Fq/J3r/Rg1gJhPGkiYyWZVsz+5Z9f22F7eHdBxX5OK8qnPFs9UNl1hMOlrXbeE+NWftXyEbWunBM1S7chkXtO9+d0S1GLEnQ76P+hRVOUN8gweO/qfqjtqjtvz0d4Z8iICDuU29irg+48jMEgfsst9+e6WaNWtOkET5xhv/pwfV4uq55yq5LuA3bfpITZo0hasR8rdixQqqZMm71YEDB9Srr3YKmc7tQa9eb6h8+QJ945bGjPOLxbp176rPP//CLgpMeqVKldRpp51qx+ECTubA0P/9d6C/SpS4U91yy81B6eQmcRDwSxOxbsGxY8fUvfc+SMUuXbpAFSwYvcPIWNeFy8MG1eOPP0W3NWpUU40bN+BH8isICAInOQJZMXZWr15bbdnygxo0qJ+67bZbT3KEk6/5fmhizJhx6q23xqjKlZ9VLVo0TbhG+5mb/eBgNnz58hXqvffeV5999rnas2ev+t//rlfXXnsNrbfPO+9cM2nE6y+//D81atRY9eGHGyntqaeeqtfu11JZd955R8T8SLBt23bVr98AO22PHq+rU07Jb9+Hu/Dav+AdmjdvTUXefntgDMiRI6e6+OKL1JVXXqFKlLhDwYFzMgW/NLFr1y71xhu9wjYVvFj9+nXCpkmUh74k6je32Zah/qeellONbXK+uuL8vBmehWPUHy9ZQHUufzblOaa9xn/5yz/a4Vtuddaplp3y6q8Pq7aj9gaV+bFm1LFZ9tvBf9UuLakH037lBXlV7hMC67+Pp6sHu/ysDh/6Lygf32zuW5QvXX/9EgcK27dvn3rxxVcUPvRQ4ayzzlI9enSjgcRMs3TpctWlS3czyvW6TZtWqmLFp7T39N80U/C0a5pQkatWLVMYdKINfrGYOHEy7fLzex5++CF11VVX8m3Q7//931dqzZq1dlzhwoVVhQoWs2NHykXCIOCXJkaPHqsg/UHfli//RKbb42cxkOmXRigAfhkqV66mfvnlV9W9e2fSIImQRR4LAoLASYKA37EzHDzCqIdDJzbP9u7dq1q3fpH854wePSKmfnT80IRXRo5RaNmyrfYZ9Lt65ZV26pprrubomP/6mZv94ICK4139+w9Sc+fOd20H1tu9e7+hrrvuWtfnzkhoxLVvHxCAYb18+PBhOxmYuzp1atn3bhf//vuvatSoaRAf8Pbbi/XpRgXckmeI89q/0fAOaAc286LFIUOlsiHCL01gswb4hwvQQOrTp2e4JAn1zLNE3Y1Rv6xYXjWzWWFq2D+aUV777RFV+jqLIQzHqK/scrE6+7RcCkz6Q91+UQf++peY8AWvXqQu0urwWqtV3d1hhzp6JMB0P3j7aeq9L44ExeHFPeqca79zyOo/1ZjF+12BzipGHR9n3boNbSk6JOd33XWnuv764urXX3eqjz/erObPX0i736jYhAlj9NFkl9t15I8tLa2YHihesuOdF2BkoVKL90H9yQxw3Nau3SsUhY8SHyeHHHp34+qrr/Kkgu/3Q3nzzZEKTAuH2rVramn6aXwb9Hvw4EE1btwEOy5//vyqXr3k2OmyK30SXfiliW7d3lCLFy+lXcxIE100cPpZDERTbmbTgO4PHTqszV3OyGxRkl8QEARSCAG/Y2c4CIRRD4dObJ79/PMv6plnnqPC1q1bFVMzKz804ZWRYxTKlXuShElDhgzUWos3cXTMf/3MzX5wQMUXLFhkS0+xAXH//ffSuvenn7aqzp270XobQqJx40ZFbKfZz2XLliZtBWjqHTx4iNaokydPpTImThyrLr/8spDlTZ8+Uw0YMJjqwUx+vBh11I0DNpg2bPhQrVy5mvod/MCKFUs88QBcVnb8+qWJNWve0ZtRHdTDDz9IWhBudcemycVa2yAZQswk6kUvyaumNLlAjVq3X41ftl+rWORRS1+0PKyHYtShJj+vlcXcv7l2vxq58E/CLE+eHGpdt0tUPm2LjmA+o4gQ/2DnvqaDBfyGH46oJsP2uKbMKkZ96tTpesdqKL2zdesWemCvkOH9UI2tVKkq7dA98kgZ1bFjezsNM+pQ2RkxwirHfhjlBT7MJ56w3rty5dKQzHGUxSm/H8qQIcOCXtG06fNB984br+md+eU+fgj4pQmvjDo2osKdAOBnMWCihMEPf159R0Sql/mOWF37rSs2DeTIw1j1gpQjCGQOAb9jp/lW5/gTLaPuzGeWGeraT55QZYWLD/eecM/cyvSaHmXAFhmCDPy5BZOBS1RGHW1ACDefeWXUw+ESDmc/c7Pfb6NhwyZkZtmuXdsMmnpg1qtUqUG4LFw4J6IPB16jXHTRhWry5PEZzEQhBFu3bj35xOnQwRKIUeHGP2jTVaxYmWIgyX/hhZfpOh6MeijewcTBKSA0qp5wl35pYs6ceVqLop96/vmG5PMq4RrmsUL0HW7dul3LrZUqWvSSqLK7SdSdGc8/PzKj3krbslcvYamClO35q7YrOUbF9Kx7rnr42oAk+Acd/4x+HingVK+NPS21+KkbD6je092PBcsKRh0L6fLlK5JtjJMBd9b73Xff0+rx1se7YMFs225EGPUAUpEY+0BK62ratBkkrX3kkbJ6YK4U9BiDKzQaGjSoR7b9/BC+BBo0eJ40EwYPHqCWLVue6TK2bt2m+vbtr7+lIqpbt878KvqdMGGy3s1cqe2q79FS5bpBzzBBwIFI8+ZNEt7O0OvguWzZ23rSm0p9gN1l7OpeeKG1QffWW8MVNCg4QEMEZhAffriJ8MBO+B133KZKlbqftEE4HX7DLQZgq/bmm29R8hdeaB1kZvLeexvIlg1q+PCNAJszvAMO6ZxMbdu27dTu3Xv0znxHtWnTxwpqcfC9gDYgT5MmjRUmdTO0b99Jbd++nbRioMGCAAkIdnnDhapVn1PYxTdDtHWFJk3Llm1oY6N//95q6NA31fvvf0C76FOnTlTFihU1i5VrQUAQyAYEvI6dXMU//9yv56Ylav3699Snn36uzjtPr5G0OVnNmtX0YrR5SBt1bNxj/P3oo4/Vxo0f0Vh1++230ZiHecgtwKRu+fIVOv0mygPV4ZtuupHGppIl7w7K0rfvAF2fz/Q42IjKNB+inFatXiDp85gxI+1HY8aMpzG+SpXKehw9hXz5fPTRZoX3PPZYOa1WXJMYZgg+FmlnoWB8ihS5RKHeeI85X3Ch3333vXZMu0aPeRsIi1tvvZnqg/WA06Gn+f6zziqoZs+eR7iiLDA6DRvWUzffHJA216nTQP31119UD6RJSytG4+wTTzzmKoxBGi/BD02YEvUHHyylpkyZRnMm3nvTTTcQw2qengPV8M2bP7G1OUE/Z5xxBkmFO3V6laq7YcMHatiwEdTXaNvYsRPUBx98SM8wR7Zo0ZTobqWWzsKXEtYrTBtgiMx58Ngx7/5j/OAABmbUqDF6DfefqlbtuQyq5UeOHNXmZ9acCsb70kvTqD2h/vFGBlSioRrtDFgzosw8ebQTa70GcAbwAZiH8a1V0CZ+oNcHHihDyWLBqMPxcteub9D6AhJ97rtoeIdIbXO2JRHu/dAE6j1q1FgFU8uOHV9RGANSIcRE9d0JRDSMei/NkD+kGfL/9DbBrW23URGXp+VVM5pai/jD/6Rrj+45yBa9dKefna8Iui9WJK/q9sw56toL81J86R6/qN/2BlSvzcRZwahjoqhZ02K+Ro16M6IdCCYxBAx0LDWM5mMz2+F2nd0S9Z9++kkzQRvUn39a2hFudYwmDrjcffddUTMYcF7Xrl17srsyFwXAme34y2lGzNwFBcOF3VhMMLNmTdM7pZkvY5/2UYABEcGp0QBNiu3bd1CfL148z969x+B/330PUZ5582ap888/j64T9Z/XwXPWrLm0eeHWHtNnwldffU2mI27pEAfbQDiH4RBqMQD6A4ON8IpWhXv88Uc5C5me9OjR2743L5566km9gdbGjFJPPlmBNt+wqMVOujNgsp49e7p2ZHem/Qg7+NjBHjp0oL3gw4IWziXDBacWDsxkoq0rnKc89dSzVDwWqVj4cpgyZQItLvlefgUBQSB7EPA6dqKWGOeaNm0Z5JyVa4+NTJiOgZl1OpMDc9+w4fM053B685d93ZhxYEjr128cMk/durW1WVptO0trbbcNBq9Ll06kZmo/0BemZHHDhnX2I4xpGNvuuON221mX/VBfPProI6Rx6LaxWaLEndpBVy8zuTLXXkEP9A3wGT58sHbidYr9iN+PslB3t2Cqhj/4YNkgG2VO78SC473++qEJZtSxoYDNbVavNt8NaS4z69g8xiazM8DB8fDhQyh6+fIV6rXXuhLzDjqAQzYzYLPk0UfL6fQjzGi6BuMP1XKs2xBCzc30MMQ/PziEKMqOZntzzNORVL7xHT38cDnKu379GtKy2LLlB3IKV6jQOeRd322TyH6ZvmAMgcOMGVP02j5nzBh1MOkQOmETAHQ9YEBfe90RiXfYvXu33rx5hqq6ePH8DJtXZhsS6dovTfTq1Zd8FgCjG264XmFtCZ7kyiuvTBp1d2c/ZBujPrblBeQdHs7fSrTbTvVim/WlXxymo9eu1+rx5nOz8h2rnaPuuvwU7Xgup8qjvblzWPz5IdVhvMUIc5z5mxWM+urVa7UErSO9ZvXq5UETg/nucNf8seG4jSFDAt4inXnCeW3fq3fQs1P1fdy48WTP46yzn3vs+MJzdjThwIEDqnRpiyEzmT9IE2CnhOAcrCHhxqSDY7RatmxGXvQzWwbegx34r7/+hgZSSF0RcBTd009bjBTuzd3dL774kiT7aWnF9M54wFYf6RIx+B08oTUQykYdGg9Q4cSC44EHSpEdO7QStm3bTt5tMeGi/yZNGqcKF76AYHFbDGDhhcUjAnabTQk1pErNmrWiZ9BcwIIwb968WmqwUb38siVVaNOmpVZbCzhoZEYdmZDn8ccf0+ry/ylI4wcMGEz1dS7Y3Bh1eqnLP97EwGbRuHGj9bGEp1Eqr3U1GXUU0L79SyTxhx0WxotQKp0uVZIoQUAQyCIE/Iydffr015uBc6lGr7/eVUuKbyf/LxiDXn894AzJZNRh8tK4cTNyZgUGCxvU11xztdaw+VMtXLhYjRw5isrr06cHbYjjBnmaNGlBGwIYj5AHa5E9e/aqJUuWkpQK6UzV2cww6ijr2Wcr0liPMWrOnPl6TB2EaAqYk5988nESZMyYMVuviYZR/Pz5s0myixtsxNeoUZc0h6DJCN82F1xwPkl7O3R4jTYL7rvvXnLeS5n1P2bUcV+69EN6Tniejs+Fh+8RI0aR1NmpQpyoqu9oA7x8t2v3Am3wb978CWlvQesCElfTXhlpy5ULbaPOTCbSof1dunTUOJ9HvpUgBOHNAGDWqlVzksjD7xJ7Gzc3a9zmZpQbLvj5NpzlHTp0iBhs0CzmUMzRCDjtqGTJYG0QZ16s2bB2A25t27bSkuvXbS0KTgvtBXwXbmvw/fv/ojUecOrZ83XSnDxy5EhMGHVo/qEPQKP4jgcO7BukPcC8A771UVpQyAF1wkbOW2+Nps03aJXWrm2ZAnCaRP71SxMwN4D2UePGDdX48RNt2kVbsanUtGlj0khK5LY765ZtjPr89hepS87OrfZrR3GlXt2hGj1RUDW470wFSfr92oFc//rnqrs1I25K3M3KT3+xcAYv84s+O6g6TvjdTJbhOisYdV5w40OZPn1y0DthywMG2i0ULFjQVuXij80tnRkXbiMguxl1nkzN+mbm2osKfOPGTUkt0FywgEkHs47BDQMxdn2xG4nQsmVbGvhMNadYlMG73VDhYxV3LI6wqOJ6tGzZXG8QVKR6wEP+MK1yhvPjoUKW6MHv4BmOUZ8+fRYt0tLSiumFxkj7mwAW2EnGBAoptYmbczGAs8uhdobQvXsXPUHer68C4aWXXlXvvLPOFedFi5boPD0yLG6YUYdUHtJ5M0C1CipWkFpAesEhWkb9888tjQ7kMzducO+1riajjgUW1GIlCAKCQGIh4HXsBPNcsuQD1Ag3NU7ebEYCc96DWnKtWvUo37Rpk8gUi25O/MNchDkJmkJgKhDMPG7OslgqW6NGVVoAI09mGHXMwxjrTZtq1jrDZm337p3xCjswk2luLkBLCdpKYCwh3MiTJ4+dHpu8OIEDYfnyRcRY4poZdazVMO7mzp0b0RSgidSsWUu6fv/9d+wNzkRl1LF5vVDbXuOXg9nuNWveDppLGUNTY4DzLT8hUce987hTqM7PmDGL3gMVbtYCRVqe12G6BQYIwTk3Y50bKXj9NtzKc2o/gCl77bUOpM7vlt6Mg1lix45dSPsMaw0EaBxACguJLJ/kBE0MnNqETX4zsBTX3BiKBaPeqFF9LUjoQNofqA/Wq07HzNHwDhBCPP10+aDvzax/Il77pQkWlqFN0G7A0XTp2iwBQpl9+/ZRUzG+YJxJhpCtNuqzX7pQpWmncwf//k890XunWqkZdxy11nj8XvXh54fV0OfPUyUuC82o33zdKerKwnlUoQK51E1F8tEfQMexbZUH7VJ//H7ctQ+yhlG3JgzsQMPu1gzmwGnG49pkKMyPzRx4nXkWL54XNPiaz09mRp0XLXXq1KJdehA3VJlgh/b8841I46FJk0balqlK0ERibnzEogyWkJu78rzIAW1AtRBqfwMG9KGu48XO4MH91a233mJ2Z0Je+x08eUKvXz/j8Sa8oYKJHhO+M0zWNu5DhgwnCTnbZZmLAcSx5oS5kDPL4UWKuVnDzzF44zmCubhhRr1r19fUQw9ZC2bOA6YfDDUWnKZH2WgYdVMS5DZheK2ryah7sYXjtsivICAIZD0CXsfOH3/8SY+HNaliUMc1GSREQmJWtuxj9Nxk1HljGExFv37BquJIDKYD8xAWsUuWzKf87D3bnJvoQZh/PHeZ0lROHkn13W1jmudJMBUVKwY0m1BmixatSe0XjFeZMg/Ta3CcLdZN5gYuvx+/PBabjCkz6mBaXnihtZmcFvE8D5jjaKIy6m5HTGHdc/fd91O7nP5JeF4x8WAAmFF3zmd4Pnv2XM0g9g/a2OF8vGaC9sNLL71A0ebc7GT6OZ/z1+u34cyPewhfoLaPNfdhLdlGAI1jfXDHHZZ2I0W6/INPhEGDhtpPhg0bFMTg83yPBJC4V6jwlJ2WvydEmBofmWXUYY63Z88eMieF5kSos9hN3iEtrZiuhRWgYQDtAg7QMIS2nbk5xc8S8dcvTTCdQ/sD5oy8sQGhKTZj2Axk9eq3oz7bPrvxyTaJ+lvNLlC3FMtHR7P9tPcYnYW+6aejquGQ3YTJxNaF1XUX5SUJ+z0vW6rx4cC6/qp8anwDSy1287a/VT3NrLuFrGDUWeUWDPaqVcuCXgv7EOyImQHSXQRzkc4fm8ngmXmiuT6ZGXWo+NSuXZ921+E1n1WZKld+ltR9ypR5jHZIYZOFs9vr1WtEamMDB/azoY1FGZCC4F2YKDAQ5NGORyAVSUuzVNsh6YD0wnyGCqxdu8JVpQrPEin4HTzDMerwkoqFnandYLaZ7c6hkgl/AgjmYsBMazLaHG/6KuC4UL+Q8kDzAYEZ9VGjMvqdYPV0p4ohLw6HDg3YqJvvQr3Z5hSOD5s1a2I+JnVO9qsQ9MDlhutqMuqmPahLFokSBASBbELA69jJUj435ombwItSk1Fn6Z4bM4x8pj0uMxY9e/ZR8+YtcNU44nc5fzPDqGPcczp+ZUb95Zdf1CZ81gYEvxN+RzAPmIw6zxucJtRvixZNtXTdMj1jRh3aelWrVg7KAhXj+++3NgGSgVFHm9A2Z2DJMjQGTAdqTCvhGHU35p+9aIPxgWNVM0yZMl0NHjyUzBSym1HneoEhgxR8yZJlRNOId2LBafmX7dlx77ZRhHi0E+2FJJY1PvAurOm2bPlBbxQ0I1NKpEXILKNulWL9f/XVl8lcz4zj63C8A2gaTgG7dHmd1qQ4jQr+cJIheB0vuU0rtcPDY8f+oX5ymilg/VW27OOEhdt3wGUk2m+2Merda52rHrk+oLJz7F+l7n9thzpy+D/CaIGWsF+sVeP3HPhXlX3t56hwY+Ye57Lf8aI7c58VjDq8PFeqZKlZLVu2KOwZyviwYQsNRs7ctQv3sUXVeJ3oZGbUWYLODPLMmbO0DfpILbnuS7uprNYOpyILFiymQbd58yZB5yzGogz01auvdiIvtPAmnzdvHnJaB9ugBtpGiFXj4Z0bdvh16zYkD6NgUpMh+B08wzHqvIAwF5smFps2fUS2cKYEKBSj7nbqgikRwc50jhw5zeLpGvZfkCZBqg/mG4EZdacjOzzzy6jDbm769JnkaA62Zs7dbT91FUYdPSJBEEhsBLyOnYsXLyXV4nCb9zxGmWMnj7WsXeZEBeZEpUqVpuhZs6Zqh6oXacajp/ayvlg7xa2uoG4bTchuRp3nDdQVY7cz8JgOSSL7K2FG3W2jQBj1rq5rkWRj1JkO0rW6MzbFN2/+hDZq3DY1OC0LdnDvpnWHeBYYmAK5uXPnaxv4vmQWAMeFpvkF1qIQCCG8+eYQ7YPmdHJenD9/fooL9Y/XiM7nbiYpSBMN77BixSotTe5M9YzkWM/53uy69zpeRltP3hBs2LCe3mRJDpv9bGPU6z9eUDW+/0wb207z/lAL3z1g33/Upyipwv/fL/+o6v122vHhLgY2PE+VvPIUShLqGLmsYNRNpiFS55u2YKZaUDQfW7i249nJzKij/awKB2kmHMPADpglrJNPqE/Dscj8+Qto0HUb+GJRxqJFls0z1Lz/004WYM8MST5sjFhN6rnnKumj+QqRupVTlQptSdTgd/DkxaOb6jvbZDs9nzMGM2fO1iqcA/Xi8j5t69+Vos1vrmfP7qQW2rbtS/TMqQppqgNO/3/2rgLeiqKLHxoU6UYQFPhoUbpEUrqlJESUECUVFAQxEFBCaZAS6ZDulBQRpEQEpENaSiT0ffOfZfbO3bf3vrf77nvvcj3Dj3dnZyf/e3Z2zpyY2dPEcT9ZVdV+f9UiOFCMupKQYcMBtJc6dapw7bvpKzPq4WDkBEYg6BBwOncqDS8MZOvWjeHsS8EIQHqKoDPqymeOboMuMz38c/iwcUoNGA61aFfMmJ1EVS+rxxWjbieB1Nc5upaPP0ZZLaAjK1FX51pbT/fQ+2iN+2ufGfVHi1GHptz+/b9Ix2pFijxvfdTy+uuvJ0nhSER0Da/g1arVlmXGjx9NBQrkD1efkrrD9h2aKAiDhUnAfGEaENngS9NOL68z6jDNhEo/1rJod+rUyeEEgZHhHXRhon4stN5usMWdzpfo/61bt4UDxF3ShwBMf+yCWm9aBXV2eYMhDRtOscaoJ0senzb2zSxxsErNixZIQuNeTSfvWRl4X8DFEfbt6z55klIkiUf3hCf5Eg89yVvzRwejjjbgXXDs2K/ljhW8d9sdswVp+ocffiylrboaL8pH5mVDPn/hv86oK0YI6j1g7nRHX1BNgmdx2FItWrRE2i4tE/b+cUA4WghEHeo4DByfAjuh06fPSIc2kJ4qjQrYzmfPnl16p3TCPGpdjZWom8kTHVWMOjz9wmusHr596FAPeMFWX3cwBLzgWRa74nC2B3VOBJ1RVxteOCMXHk4RrGpNcBIEZ0FWJh55wRzDHAIBR8ApW9BAMuqK/tAGfBXAn4Wv4LSvzKj7QpLTGYHgQcDp3KlLvnVGXI1IrRlwrd9XflLAiM+dOzPccUwTJhjnDOv26MocDGWmT58ivKdnQLVmwEb3zp27pJ0y7LsRlLq83YaAYvyRL7oYdbXmQvsDB/YP9y3HnAv1Y5wgAu01hKgy6ssCfLyVU5rAGBQj51b1fcCAT4WK/wuoygyrVhnHs9kxtOpZVgky1XfYoiuHgb40WdVmEo4VbNOmtTleuwjWh6CZluK0oQ4d2obLonzp1K1bW3hhf0feh237xo2bwuVFAtYoyswV5nSQtmNTK1eunLb5VaL1+eo+bfDODhkyyFyjoIyaB/xp3qgTkCAksFv3qraD6dfNuwEfBTA9RbA7mhZzao0adaVGszIdDKYx++zLiROnwvA/suG5bifC7P4Xf/9k2Isfn5H/W4w7b1Y3av01M71Er5NeZY9dvGfmaynKoN6SvU+F3b77r0wXKuxhhd/xtDd87bWwoxfuhb0z65JXermPToftPvm3WdePx+54taP318zkI3Llyp9h+O80/PXXnTCxIxdWokTZsAoVXgoTH7UwwQCY1QgmOkwcsSDvI4/wQGjeQ0TY08h7bdu+6ZXu5EI4njDrF3ZoTora5nWKhbDhCQvkf9tO+Um8fPmKOX5gLNSSzNx4Fur54F7//gPNe3okEHWgvkaNmpl96d27r95EmJjwzXu1a9f3uvfzz3tlWaGmFCaYVPPepUuXw958s1NY+/Ydw/7444KZvnr1Gpl//PgJZpr4QIT16tU3rFWrNmFCgm+mByLilCZUm8OHj5RjbtCgcdj169dVsvw9cuSoiceoUWPDxOJKpuN35Mgx5j3kU+HevXtm+rVr12Qy8OrR432Zjnfw/HnPPARawHPH/02btpjYiok7THi1lemgD9SrAp4N8otFrEoyf4U6vrzXvPmrZhoiTZu2kOm7du0208XHIwzjRl1i991M9xVx2leMU43NV52czggwArGLgJu5U5/Pfv/9mBwAvmV79+4z33m8+z/+uNMcHOZNNd/gmyGOZZP3MD+uW7fBLCc2rM0ymAfVN0v4egnD9wYBZTZu/N4sg++TCoKB09L3mHOqYE7kGshuThow4HNZZvr0Waoa8xffLJTR+6Vudu/eQ94TDIdKChNSe7P98eMnhgmJuLwHfATzYt47duy4TMcff+0DA9VnzNkq4Hul0jF/o/5ABTc0MXHiZNmfL78cYdsNfPvQX0UvKhO+VUjH+kP/zuE+cMU94GwNGDPuYU1iDXiOuAdcVUDdSMN/9W1W93z9usEBdYFW0U6nTl0FzV4yq8czEhs5Zj/0b7g4LSAM33bBZJv5EREb+WZ+oRVp3rPWJY5jNu/5i9y8ecusT6cnf2Vwz+75is03sy6skfTgi3dAv4G/cBRpvo/i2Dm9aFDH3dJEr17GPGLMYx6awLzYuXN3iSPeEeFPyhy/L5qwSwdv1bXru2HCrCHs2LHjZh1btmyTc+gXXww10wIVCZhE3WpzbrczcOTCPWr8uUeNPW/ORDS1XQap4o78OJotSYI4YmfUKD1szZ/07crrZlVdG6aiFiWfMK9hi46gn6OOtMr9z9KN68Lo3SZEl0QdTR0UDixwJrPytIjd6bx5c8szDFUa8tk5eVG7YrjvL8BTqdrRtuYTE1WsnqMem8ezKSyUszZcK/s7dU852cF1f5sjvFS+QNQxWhy5BkkxQt++vYRalaGiiGtlL4Q4vNtil1WF3r0/NL1S6jt+4mMp1ayQr7PmHEcdaYN0qDEmFXZQu4X0uWNHw2FIzZo1hKdP76PFkNdtcLPLibbEx0163lftZs+eTZgEjDe9bupeVZEHduJHxe62CjgSBUefqGAnUcc9qD61adNWvnNwwgTbMGUThqN8xMQrq8C7qc5qhwopwuDBnnOFcR0oifpXX42kWbPmoEqfkvT69et40YiTvrJEXULLfxiBoEbAzdx58+ZN6SQVzjYRoP56//4D6aFcj+sSdeSDFhe+Y2puy549G124cNG8tpPGnjt3XtrUisU9qpBzMNJUHboTLdzHmgZtqPzoDyTXmLehMaj6HF0SdfRBqSMjjgDJpdjINvvUsqXnODncHzjwC6lR58RGHeX07yykkrDlb/zwiFXcdxvc0IRV4mpt25czOes3pUyZUqZzuFWPoEQd44YpYY8evcznjXVDihTJxVr8kEm3kKS3afOqqXGh8EF5q0mJMn3EPdBzvnx55bG/isbt1u7IaxewFqlcuZq8pTsntMurp/l6vmIDS9Iv8uon0USWd8C7ATPBtGnT6s0FbdzNu4HBiA0q4aS3q0kT8EsEbQb4GFDBqtXoiybs0vU5R59flB8stAGnx5gDAxHEhkvgVN8/Fc7hqmvO4ew6ePiPe9REHMWmh9w5EtHkN9JTovgeFWRh1ktDV12jGWtu6FnphcKP0Yd1U1HKx+J5pasLeHvvOeMyXblsfzQb8kUno4768WHFuZN4eawhe/Zs8mgHqPhag9jRNI+Yst7Tr/3ZM+vereF9HsxIVILTFyUYGHVlk4RzUq1n2gtJqlBZ6iUh8aUqhZuBqEM5QEN9S8RZp2nSpEFUBt0eysocqkkAHxww6so5CWyM2rd/m2BHh0lGeXOdOnWadJqn22+LnUO5gDp16nQ45lP1we2vU5pQ7YidRaGKNEuex6o2rdasWSE2Fh5XWYQ/ge+lp9YtW7aaaTBfqF69qrRPNxNFBN711RnD1mepq8TpampCOiTUOmfKCXvfvv1mdfAd0KFDO3ligJkoIsqrsL5hou7v2vWzcFTTOdzxbEp9Do5lChV6VmZX5xarsna/aB+TvgpO+ooFeN26DWVRfVGs6uJfRoARiH0E3M6dmPtxDBbmR8U0w/6ye/fOYvPzQ3mKCByXFrHY6R4+fEQ6rty+fYe5aIV6LBg0HIOpmxgpdMBkz549T8yR28wyYFhwVBRUgq1lkB/zm1LxRT1g6OHRXTnS0ucktVmubzartpUTVmwsY4NZD++++740E7M7Cg7fTHj41hfiWCS3bt1Kfjt08zZ/7d+7d09sBleSzVoZKyz+haRTPgNk0Bfoej+dxt3QxOTJU0lo0ElHuJ2EQ1xrUMyF9Xg2IdUVY5ginAYul3QExg3fNgQlPNDNBVW9CxculmYOdo5asQGNjWjQB47CQvD3bVZ1Wn/d4KDqgIBqyJCvhGnGT+b7gXsYX7NmTcIdrTp8+CiCyjoEXtYj+rBOwXhBJ3rAOwChijK90+/5isPssVIlg1G3rnV8lUG6v+erzE2Qb86cGZQly5Pkj3dQmw0w6YNJqNULOuoJ1hAVmsCaqF+/j+Umiz4+0AR4KOChB180YZd+5cpVeu21N6RQCE6hMaciqI0UrCdHjRoebq7U23MadyxRLyq8qQttKKftRJg/Ver4VDRXIvrt7H06ceqe3/yJk8Slp59MQJlFmfji8PVjF+7T8TP36N5d//2KFy8O7fzcvyOpqBCH3mnsgmBH+cyZM9LZBaR3Tzzh0QbQ8wZr3CkWwcCoByuWTvp1//596Q1cX2CgPJg3BGVDLS/EHzDv1gkY9If8itFXeaP665QmrO2hX+gvxpY4sb33U3zoYecPO0nrWK31ub3GRxQbJthAsWLnts7oKvco9TW6MOB6GYFHHYFAzJ3CzEVIDFOYZwNHBhMwH5hPkydPLjSYDGe7EZVTZXAGcWTWLbD9FGZj0kFmZNuIqA9O7+O7AWl68uTJItVnp/UjP5h5tINvl3XTwk19UaUJN21iXYDnlTBhwoCvD9z0B2UChQPoXKicC4exWeT4fPXHbs2k58UaDBv+2ATInj1bOJ8Nel6ORw8CgaAJCK2wYYn5DL4B/AkvfdGEXbqvtTjmB7xXgQzou2NGvc6wc3T6zP1A9iPG6soimPtFXf2rIwSCOGJsQNHckFMspkyZKs9oDUS3sDiAehmH4ELAKU0EV++5N4wAI8AIxA4CPHfGDu7B3CrThPF0GIdgptLY6RvThIG7K0Z99IY/acJSj9147DxCd62+XjM5vVk+hd/CTBweeJxicfz4cdq27QdTZc5Tk7NYmjSpqUSJEpQt21POCnLuaEfAKU1Ee4e4AUaAEWAEHgEEeO58BB5SDHeRacIAnHGIYcJ7BJpjmjAekisbdRR9FKXqkZGmY2xMHEDBCIyFQoJ/FQJMEwoJ/mUEGAFGIPII8NwZeaz+KzmZJownzTj8Vyg+8uNkmvBg5Vj1HUVPXL5Pnb+99MiowINJ/6pFWsqWJoFn5D5iTBweYBgLDxYcMxBgmmBKYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkPZq4YdVUcavCr9vxF584/iBYHc6odN79wHJcpY3x6qdBjEaq76/UzcXjQYCw8WHDMQIBpgimBEWAEGAHnCPDc6RyzUC/BSg10jwAAQABJREFUNGE8YcYh1Cnd+fiYJjyYRYlR91QTOjEmDs+zZCw8WHDMQIBpgimBEWAEGAHnCPDc6RyzUC/BNGE8YcYh1Cnd+fiYJjyYMaPuwULGmDg8gDAWHiw4ZiDANMGUwAgwAoyAcwR47nSOWaiXYJownjDjEOqU7nx8TBMGZq6dyTmH/NEpwcTheVaMhQcLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEBzOWqHuwkDEmDg8gjIUHC44ZCDBNMCUwAowAI+AcAZ47nWMW6iWYJownzDiEOqU7Hx/ThAczZtQ9WMgYE4cHEMbCgwXHDASYJpgSGAFGgBFwjgDPnc4xC/USTBPGE2YcQp3SnY+PacLALCwsjJhRt9APE4cHEMbCgwXHDASYJpgSGAFGgBFwjgDPnc4xC/USTBPGE2YcQp3SnY+PacLAjG3UbWiHicMDCmPhwYJjBgJME0wJjAAjwAg4R4DnTueYhXoJpgnjCTMOoU7pzsfHNOHBjCXqHixkjInDAwhj4cGCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYMaq7za0w8ThAYWx8GDBMQMBpgmmBEaAEWAEnCPAc6dzzEK9BNOE8YQZh1CndOfjY5owMPNi1DNmTO8cyRAscevWHTmqpEmThODonA2JsXCG138hN9PEf+Ep8xgZAUYg0Ajw3BloRB/9+pgmjGfIODz6tBzoETBNGIjGjRvX40yOGXWeMKwvGr8oVkT4mmmCaYARYAQYAecI8NzpHLNQL8E0YTxhxiHUKd35+JgmPJiZNurMqPOE4SELxsKKBV8zTTANMAKMACPgFgFeeLpFLnTLMU0Yz5ZxCF0adzsypgkDuThx4rBE3UpETBweRBgLDxYcMxBgmmBKYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkPZixR92AhY0wcHkAYCw8WHDMQYJpgSmAEGAFGwDkCPHc6xyzUSzBNGE+YcQh1Snc+PqYJAzN2JmdDO0wcHlAYCw8WHDMQYJpgSmAEGAFGwDkCPHc6xyzUSzBNGE+YcQh1Snc+PqYJAzNWfbehHSYODyiMhQcLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEgRkz6ja0w8ThAYWx8GDBMQMBpgmmBEaAEWAEnCPAc6dzzEK9BNOE8YQZh1CndOfjY5owMOPj2Wxoh4nDAwpj4cGCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYMY26ja0w8ThAYWx8GDBMQMBpgmmBEaAEWAEnCPAc6dzzEK9BNOE8YQZh1CndOfjY5owMGPVdxvaYeLwgMJYeLDgmIEA0wRTAiPACDACzhHgudM5ZqFegmnCeMKMQ6hTuvPxMU0YmDGjbkM7TBweUBgLDxYcMxBgmmBKYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkDs1hn1B/cuEkXF66i66s30L2zp+jBzauyZ/GfSE0JM2ehFFUrUtq6L1H8pI87f8ouSzBxeIALFBa3bt2m33//nU6ePElXrhjPOE2aNJQly5OUI0cOShqDz9czOo65QSBQNOGmbS7DCDACjMCjigDPnY/qk4u+fjNNGNgyDtFHY49qzUwTxpOLNRv1+5eu0Kkvv6Y/Vy6isH/u+6WjOPETUspqdejJzq9TwjSp/eYNxE0mDg+KUcXi3r17tGHD93TixAlPpTaxHDmeoXLlXqD48ePb3OWkYEIgqjQRTGPhvjACjAAjEFMI8NwZU0g/Ou0wTRjPinF4dGg2pnrKNGEgHSsS9avrttDxnj0p7MFdR887boIklG3gAEpVsYyjck4zM3F4EIsKFg8ePKAFCxbS1avXPBX6iaVLl45q165J8eLF85OLb8U2AlGhidjuO7fPCDACjEBsIcBzZ2whH7ztMk0Yz4ZxCF4aja2eMU0YyMc4o3527Ld0fuxXD597mPiN45gGMnboQpnbNXdcLrIFmDg8SEUFi/XrN9CRI0c9lUUiVrBgASpZskQkcnKW2EIgKjQRW33mdhkBRoARiG0EeO6M7ScQfO0zTRjPhHEIPtqM7R4xTRhPIEZV3y/MWUqnP+v38NmDSTdCkqx5KWXdGpQk19P0eJ4cMvH2r0fpzm/H6OqCJfT3md9UVvM3S+9+lP7lmuZ1ICOBJI6wf/6hv4VEOXGqlBTnEZQUu8UCdujz5s33eiyPPZZEqrdDco5w4cJF2rjxe/r777/NfJCmN23amB5/POZ8EpiNcyRSCLiliUhV7iBT2L//0sFvZxImsTxNG1G8RAkdlI6+rEfFnHVp3356tsMb9Fi6tNHXENfMCDACjxQCwTJ3Rha02JpjY6vdyOISyHxuaOLinn2E/6nz5qGMxQoHsjsBqcvN83ODQ0A6a1PJ3T+vU/wkSYJmTWHTxf9EUqBo4sGdv+nBnTuSD3sUgYsbNy7FOXHilOSaM2ZMH21juH3oKP3atCmJVbVoQzHphjQ945tdKXPbV+zbFovxUyMm0cVvvib69x9PHtHxvLNn02M5s3vSAhSLKnE8+PsuHZm3gH6dPIWunTxs9ipN7mcp7fPPU6G32tsu4E9t3Ez7ho8y8/uK5H29NT1d/SX6+9qftLZNe1/ZbNOrTp8iJqDEtvfsEt1isXXrNjpw4BezSjDpDRo0IPzqAU7mwNDfvesxgyhevBgVKvSsno3jQYSAW5oI9BD+Ef4PpuTNLatttn0nJUkb/f4rIhrD7T8u0KwyJWW2vC1ep5If9oqoCN9nBBiB/wgCwTJ3Rhbu2JpjY6vdyOISyHxuaGLnoCG07+tRlLtRCyr92UeB7E5A6nLz/NzggM6ua/cW3bl0mXI1a0K5Gtb12f8Tq9bSgXETKG2RwlS817vh8p3dup0OTppKl37eRXduXJb3U2XPQ+lLFKfC73ShRMmThSvjNMHpGh/13xYCrQ0dOtk29XjmTJS28POUuVRxSpkrZ7g8P48YQ2eFjyhriJswIaV5tiCle74QPVm2NMW3rMut+WPr2i1NqP7+vmQF7R70Od3446RMip8wMeVp8Ro993YHSuDAgTVw3P3VF7KO9AWLqeoJOKb8Xy5KXSC/5MmiE8cYYdQPtelKt3ZtFgP0ZtLViCNSZ7+xez8dbvPaQ0bfKJWsWHnKNd4AT9UTiN+oEAcmjFUtWtOVox4m1dqnRI8npwpjx1Gmkp4HjjyHZs6jrX16WLOHuy7asw8VfKM13Tr/B80uWyrcfX8JLX7eRwmfSOovi9c9t1jMmjWHrl+/btZVoUJ5ypnT0JYwEx9Gfv31EG3aBNowQoYMGahOnVrqkn+DDAG3NLFz4GA6uXIl5W7VivK3bhHlUblZDES50Qgq+Pf+fZpTuhzdvvoHlfviK8pRj+k4Asj4NiPwn0HA7dzpBKBAzrOxNcfGVruRxXlZw2Z05/JFKjt0MKUXzE5UghuacMuoB7Lf/sbs5vm5wQF9mPbsc3T3trHWbLh2AyXP9pRt1w4IJnyH0OjNVLQsVZv5jVeefV9Ppp2DPvFK0y8eT5WBKk+dTKlz/09Pdhx3usZHA9dPnKR5lcpH2FbZQcMoV4M6Xvk2vN2Njq1Y6JVmvUiTqwBVnTGVEqVIbr0V69duaQId3zvma/ppyADbMaTN+zxVnz0t0kLLyOCITZ0q0ybT4+kNjWHbhqOQGO2MOjy8763ykodHl8x6nHBdztixG2V+o1m4dJVw8vPRdGnGJHUpzdsLrltLCVOl8KQFIOaWOMLgPO2lmqYUPXfjlpS1SiWhmlSErotjyc5u2kqHZ8yg62eOyV7WFiqyaQvkM3usXuJkGZ6issOGmOnWSLKnskiJPNq7IBhvPfx18RJt6NxBJlWe8A0l1FXIBeT4qMQR2giRDW6xmDhxMsGZnAotWrwipOmPqUuv39u3b9O0aTPMtMSJE1OrVlFn5MwKORJQBNzSxPqOXej4qsVik6kjFe3ZPcp9crMYiHKjkagAzPq9G7coceqUkcjNWRgBRuC/goDbudMJPoGcZ2Nrjo2tdiOLs2IOq0z+lrIIiWRUghuacMuoB7Lf/sbs5vm5wQF9UGNCHExn7cXzKY7N6UG+GPVLe/fT4ocM7lPlq1GR97pLZh/q0pCyb+nxrtwIACNWb80yNOM6OF3joyGdUa8+bbbJXKJ/p9ZtoOOLFkrBAPJWHj+ZslYoh6gMisF8ulo9yv/6qw9Tia4d+Z2OLVpMZ7dvlGmpc+SjuiuXmPeDJeKWJi7/8istqlNDDuPpanXpua5v02Np09KR7xbRD5/0kekF2rxJxd5/J1JDVThmeL4UFe9raEnC5PLqwUN0eu16Orlhhawna7mXqPLEMZGq00mmf4VmebQz6mfHTaPzY740+5UoYw5KJSRN50cPM9NUxC+zLjq7r2ojunfxhMpOGd/qTplfFyr1AQxuiWPPyLG068vPZU+KvtubCrZrE65XUI2dJ6TLD+79Tdkq1aCKY0eYedRLjMmmzvJFZrqTyK3z54WU3fhwNN+9lxIle8JJ8XB53WIxbpwwVdBCu3ZvaFfho07zh6+BU2IKAbc04XQBiY0ouw+uGqebxYAqK3/FRAtbOqe+IyLql1cbgbpw2VdsGsRNkMBRL5yMz0ledMJpfkcd58yMQJAj4Hbu1IcV0TvkdJ7V67bGncyxEfXLWre//L7a9VfGWj+uneaPbBnFHAYTo45vGYI/QYzTfsPHkqwvTnjBmj9sfT0/2UEff9y+G2pMqtpCwpS2cLe31aX564tR/+mLYbR33Ajyxaxe+fU3Wlirmqyn/vJVtirmZiMRRNys8XVGvdWBXyl+4kRereAZLa5Vny4f3k856jSickMGmvcVg1nw9Tep6HvhmVKo4q95vZXM33jzVkqaMaNZNhgibmliXfu36cTaZQRNiJc3b/TyNbC9X386OG2iHF5keaSIcDwgNux29P+QoFrfct8+v+tWN7jGiNf3X+q2ojsnPKrgmQUTm/GVenT26xl0ftTQcP32x6yfmzKHzj1khlEwydMFKN93k8PVEZUEV8QhFtIzCheXti1WBtzal2PiZd/QyZB66y+Hm5fYWvd/lVHfM2ocHZ07l54RdvCwP9HDildepdtnz9DzYmcUtv0qwJfA4hp1hOuDB1Rj3hz6bfbcKNdx7fBh2tbrA0qe/RmqPHmcakr+7ho6go4vXkRZKleh4r29TRywuLr6ywEq1u9DyvpiWa9ywXbh9P04NGs+HRg7hm5fPCc3qDCZPZ4ukxxW3eVLvOyjLv68l35ftJTOb94sNVNSPpWLMpYtS8/UqUnpnvP2W+BvMXB85RraNXCQbKPUgM+8zExgq3Zy1Ro6vW41/XP/LmUsXoYyv1iO8jRrFI6pXd6kJf31xzl6cfRIOvv9ZjqxZKn8IGIMGYuK3dWP+oRTtVvdui3dOH6MXhBaMarPkICcXGHsuvp6nvnbd6DcTRp43Y5sX2HHtqJxM4obLz5VmzWNtvf7lM5t+V5KAuotW0mphB2VvwC8Tq1ZJ+zZ1ss5DIuWtIWLCNu8zpQ4pbfGEt6bXUOH07kNG+nqcbFwEFikzl2Q8rzakp6pXT1cM+d/2El7R42hy/t+lv2BxlC64sWFnWBP1joIhxYnhDICTudOhUVk3qGI5llIgBbVqC2rfHHkcEqTP6+q3vzd8HZ3urJ/L+V7/XXK07wJ+ZtjUQjSyKMLl8h5A5qC6fIXoczlX6TcTV+29cOD9cnOzwbTpV076ebFM5QkWRpKU6Cg+DZ3pzT58pj90NttvHkb/SLUls+uXy+/CZg/MpQqTcU+6GkriIBJ4OG5C0T+DXTxwE+U/MmnRZ8qUM4GdW3H7GQcW4TZ4R87tptakeh/wmTJhJ1qbqo4bqTZfycRNzShS9Sfrlub9ov59fzObbLZtAWLCPOy5vR0tSpmNyLT75Nr1tPO/p9R+mIlKE/LZrR7yJd0fscWWQe+kaX696OkmTLR4fmL6MDoMfJZwIwz3fNFqfiHvb2+g/rzi6z/GDc4oHOKUc9U/AU6t2OT7G9t0ce0zxaQcfXHF6OO9SHK+bP3h4PYe7du0ZPlylCyrFlUlY5/3azxI2LU0YlDM+bS1r49JWPa5EeDDpAeEYOJPAo/O9V53I/NEFWasBsTfHvNLl1KrkWhoZCxRNEIhxgRjn/+fozmv1RJ1vPKzt3h1kwRNhBBhhhh1HcVKe11ZnqO8V9TimLPya45Zdavbd1Jv3f0MGJxEz5Gz/9ovJwRjDXSt90Qh64+U0PYmmco+rzf9vAxQXg8bRpz98XNS2xtJLYZ9ZNCxf+HH36kP//809o1R9cpU6akEsKJR9ZIToq/L11JG7u8SVb1JN2OP3uVWlRh9FdmP85t/5FWtGhiTm6BqAM+CmY89D1g3a2bU+oFuTjBx635nt1i69vYpQbj801+Y5HSSDCDT2TObPYxGCNO3w9/9l+6z4Q/ftpNy5o09DnkGrPEe1XE8175WgyAuV3Xsa2sp2TfTymvWHSocGDKNNrxaV916fWbo/bLVG6owdyrGzOeLyYZV30RoO7hF0xqo01bKEmaVGby3DLlpfOSl0RbT5YpJdM3i83JwwtmmnnsIlYtHCd9vXn6DM0p/4KsFotlLFBVqLt0hV/buv0Tp9KPA/qp7F6/2JGuOnMapXjmaZl+7+YtWla/sWTQkfBEuiclTatCVrOG32bPpy2935W3gVWipClMNT28B3VXLg+6XXw1Fv5lBAKNgNO5E+1H9h2KzDwLG+U/9vxA+V5tRyUEo6uHv69co+nFC8ukOouXUxrhqNPXHItM+ppHrwdxbLDWWjTfy2HT5QMHaVmjhnKBjDzWuUNX29Xb9TX3wjlvjTkzvDZ6sQhfLDYjsAlgDZh/ai1YGG7T0sk4sAl7evNaa9WEvtRZuiBcemQS3NCEYtRha3vt6EETU7298iPGmsx6ZPqt6AwbG/du3DAdqqk68byeafgy7bHRhMWGRb1Vy4VD1zQyu/78YopRx/d27/DR9MfubZK26q9d5UUbvhj1nZ8PpX3jR8pNo1pLFkTr+svNGj8yjPrheQtp83vdSK4t9/6sHlmkGHW1xin10QDK80pjs2wwRNy8G/dv/0VTn80vu99k6w+2NuNL6zehC/t+pNIfD6LczV6OcKgRMeoKf2wivrxlQ4T1Oc0QI17ffxKLXXqoloMOFhKSnviaxz0nzPr9y1dpbyXPTiHFjU9Fdv/gdNx+87shjqMLl9L37xieGVvu2e/1gfLbmHZTvcSQZtVcMFe74x3157U9thn16dNnEDy5ByIkEzvVOKotMgEf6OkPN0d05g9Shq0feBiFlgcOmKphkHDjo/O/l5tTmQEfSy/6Ua0DfV1QuYZkZPSFx41Tp2muZjtUV3ijTJ3nf3JokJYsb96Yousljwx+TvK4eT9Qvz+VzOsnT9HCalXlgiNL2UrCTuwdSiW8mF49fIR+Ek7osDjCQqveylXmjrbdYgBSgbUdXpfDKf3pF14S6tPfb6HVbVrKe4W79BATdGOhRpaYTgoHNBu7vSXTi/b4gAq2fU3G8Ud9xBBHmTwtmkl1+WNLltPOgZ/K/lptnewYdZS3C2pxDYa43pqVpmdZp33VGXW0U6rfZ5SlYjlKnDyFYdNmo7qIfPqmRv7W7Snfay0pSapUdE5stm1/v5dc8GYu+SJV/XYSsktpyuaeXeWipvq82ZTi6ewE9XpsKihnPPqO8qxipSRjjrphDwY1SsxRyxs0lnUj3apdIhviP4xACCLgZu50+g75m2fVghLzTZMdW83NYkCtpHJgsuuvWynRt5tjcQMb4AurVpMaMtAgLPL+u5Tsycx0UUjYN7R/U77z+ryBMt93f4+OLppDsPOsNGGMlIZj42/zO+/RiXXLvVSP9XZRtuzAocZJN8JBLU7UUR6YdWYUqtjLGjWXC3Awm+W+GiYl6NfFt3f7B/2k1BQMZX2hSaW0hNyMA/1RUsjYVn1HX+CFuuywzymZ8AB+est2+nnwULp8aK/UJGi40XtTwV+/FaOOOmF+WWH8KMm0nha+ldZ3aGtuBsCOu/RnH0ttKNxTqtN4Rsrruv78YopRrzZ1BiXLno3mV6wg+2qVkPti1PWNGqwxcr3cjLLVqEbpChWgeMKrdyCDmzV+ZBh1tRGToVAJoR06w+xyRAwmNs8W1a0p80dWumxWHgMRN/OlPqbXDh811/t6d7/v1pOOLp5L+Vq+QSX6vq/fso0rHHPVa0pFH54YEPbgH/rr8mU6I7zq7xn5paS5QMwHdh2IEUbdKlEvuHolJUxn7LypTkWWWb938TLtq1JVFRPS6ERU+CfxwQlgcEMcasGNXcdG27wl/PiAKAm6tZtJUqc2d/3US2zNY732txEQ24y61dbc2nen1xHZtuv1LapZT36g4ERPqY/j6A4sAiBph5puLbHzmK5QQVlMSRcqjhpP2V4y1FYCUYfa7daZkIPiI7L94w/MfhTp/r48Zxsd2TVsJO0RJiB5X3mNSn70gT6koIy7eT8wEH8LSOWhE5sV9VYvN98JlHvw1x1aUKW6lFLruFkXA5f2H6A1bVujCJUbPJxyPPwAyQTxZ6U4lgPOU+xw/nXaLNrWr1e4xY1i1OGMpfyIIaoq+QvvyvsmjA7nRTayjLrS6EBl+sYNrp32VWfUywwYIjaf6qGaCINS+7Mz14EZwpKH9TT74SepNbBZbGQc/m6GkNSEx+O00Cz4587fQpWsmNxwgDr+rNIlZB8arttIyZ/Kavbn1rlzdHn/QSmBiarXZLNSjjACQY6A07nTzTvkb57VpU1W9WCY+Zz/aYtw9GmcKgMorXOsOgJz3/hJtPPzTyVDV3PhXC+G5prYXP3uoYnZKz/uMs8uVhpl1g1U9Oncth+krxDlCEtv9znhh+j5Lh29nuxa4ZAUDpx021vFcIHZqr9mjZdkFG0sqFxVbg7qzL2bcaAj/hher45G4sIpTaBKtcbAWJvu+NHrJB8d/1b7fvH6lvrrt86oW5lrqM7/Nn+6rQ2uorc8TV+lUp/0lSPWn5+1Ll+QuMEBdakxgVHPVKoE/S60QdTGu74W9MWoow5s1q9tH96nFDZBcjRsQNlrVrU1s0BZJ8HNGl9n1PEdhXAB4d/79+jM91vp2IJFUksGaeWHj/Ey71QMJjYtCnX2vEM3Tp6m40uXC4dy38nNNmxsNdywxmvjDvXFdnBDE+r52/FiajxKSGfdTFT3rb8KR2u6usZ7WEH45YqqY0lVn/UXZkvR7kxuT7ka9OD6BbPtp4cMo1QVw9vhRoZZv7xmE514t5tZV/wUGanQxsB6K3RDHGrCt3NIoU+cZscfRnSGQn+J8eB9hVd+2uU1+er5/suMunr58r/WQZ6TCScbUwsUkOq2RXr1khoP6qOvf0j0jY9A1KEk5LpTQLXjWV3YEi0Xqjb6ziekAFDXqiJUkLMIG6hgD27eD4xJfdCt6tG4pzZUnnv7HXq+85tI8gq7vxpNP48YTNkqVjdtAfVniMWf0pyoMHIcZa9a2as8LtQHXd+sUZl0kwV9caMYdTtbJ5zRubFrR6nmqSRQqC8yjLouybE7xs1pX3VGXZdoq/H5+v0mb165ExzZ3XTlNAX14V3K1bi+b9V18XH5Jl8+WT/eheffe5cylywezg+Ar75xOiMQagg4njtdvEP+5lngubHzu/T7svle6u9/XxVq78UMtfem23aY9uX6HKszXMpZk75xqj8rNQfqEqY1bTrQqe9XSfXcYn0+pKeqVPTJ/Ojt1hDmMxkKG6aSqo3dgiH5efgXBOluJXGeOILabIWqfLXpU5DkFba835d+mzvN69QRN+NApWp+1sfn1ZiDC8c0IepWjHrGImWo+qypXq3BqdykXDlkmtU/ib9+K0Zd16hQFe//egr9OOhjssNWrZmeqdGAXnx41rT+/HS6UfXZ/brBAfWoMSlGHWnqHYAqeEPhdyVxqpTkj1FHmTtCWxf+Fo7OnmOadiEdAevxKpO+iZQ9s1HC/q+bNb7OqNvXaqTaSYcjYjBREgxtxYlfmxqe/tqI6XtuaOI34Z9iy/vCcz82HywaJar/yvE3tHtqzJmmkn3+6jhCmKTCX1cvyPWNui7ctScV6thOXQbsN0Zs1H9t1Ylu791mdjrdq+0paxdDPdVMfBiJiFk/OXgsXZo2wSyWtHBZyj1xmHkdiIgb4lAqt3ihWx086NWNm2fO0NrW3g8P0l0EfZGuXmKdwfOqKBIX/2VGXUkAFX5/7PqZljVuIJ2EFH6ni1yIKFuyP4TDh2VNG0q1sZrfzTKRDUQdUAX+9tln5Qvccu8BIW1IQJOFmrtSbf+uYlXpiEW/hw60EhJGf2YNZidjOeLm/UCX1cfTjlFX6p26doM+TKWiLVU2HzpL0RcDel6d0VbpYIxnlzXsxVWar9+ac74zz8ZVjLqd3wmlnm79IKhFqm6jrreFfi+t+7J0Spe7cSsqLbyF6sFNX3VGvc3RY3p1PuOYl+a8+IK83/ynnyN1jipUVVc0bSE1V1TFeCZZKlWmnC/XN53nqXtqd1td4xcfx6fr1KKna1YzVf31+xxnBEIVATdzp9N3yN88C1whvV4h/Hbo6u9q7WE9Y1qfY3WGS83XET0nfeEKgcWylxtJCZ4qB023rNWrCXXjuvTEk0+qZJ+SfJVBbRhmLVvZdNq66Z336cjC2SqLz199jG7GgYoVcxjbjLpVvVsNWm3AWjW1/PVbMep2zP9+oaX448CPvDZGVFs/jxgjTRGCiVG/e/0GzS9fSdrZqyOzImLU1Xjwi/IXhEbZSeGIVfct00D4v1H+WvT8kY2r90ytUSNTTmfU8c7GjR9fFrt3+6b5LtmtT5BJMZjgS5Kk8GgxKx8OeBeqfPO1l0ZMZPoUU3nczJdqbY8++loLqU07X++PdXwKR12DR+WB2e0BsYmzd4zh/6rckBHC+75xNJzKE9XfePHiRb9E/eyEmXR+pEdtNP4TaangukUU14f9hy9mPUPbt8U56jPpn1uXzXFn6vQOZXqtiXkdiIgb4rh25Ch999DL5is7hLqXnzOUoQo/tWBBycjpi3k3L7F1vP9lRl1J0HH0HZjgfeMmCBv0L82zJZVaO5iSg9/OkB8X2B0Xequ9CWMg6kBla8TGzKnNa8Qu7FSKlyChdFqnHPio3fBKYydK5ggO1Ow+jmangizi5v3AEPwtINUCQldV04etjhHRnaXoi0g9r50a95/HjtN8IcFBgDqb3RE2cLIETYfi/T4wvQObjLrFkR3qccuoq48EHAHVnD8znITZTV/dMOr6AkDXKsHY/IV/7t6jE6vX0ZE5c+nCrh+8dpTlh094B1aOElEPxnNkznw6sXSZNF9QdeNZVps9y6+jO5WXfxmBUEDA7dzp5B3yN88CQ0hcZxYpIZkYpf6u1N5fHDrS6+QGfY7VGXU1X6M+zJnWoObSZ4TasH6SBST3R4QH7WMLFnht9qG8rl3oq13Vjh2jvv7NznR8taFdadenywf30BMZswqP8aVMFW2341DlmFEnoekWfIw66OSc0AxZ0bKpJBmYg92/eZN2fNYvnKmaoilfv0rgg/vFevalAm+86itrhOlu1vj6d1o/nu2vi5doZqniss1nO3SmIt07h2vfF4O5/aPPxBp4gtQUaLx1m+mzIVwFsZzgZr7UnWL60i5c0bQVndu5WWje9qP8wi9PRMEXjno5ZfcOH0tVJo/XbwUkHu2q738dOU4HX/b2rJe6/iuUvW9XnwPwxayLz4woY3jLRuF8380XR7R5VBF8VujghhviwOJ1Sr7cspVn23eiIkKC6ytc3LOPljSsK2/rHz83L7G1jf8yow4slCobNkB2fTZISi2VhFWpT8OO59DU6dIer45wAmg9piYQdSg1PEiPw/4No/0TR5NSzTov7Pag/p67cUt6PGMG2iWOG4zqB8BKB9F57eb9QH/8LSCVTbbV87kax75xE2nnF/0pS+kKYgfY0KjRF3Mvfjla7DTHo/VvGZorVpVMXR2w/orVlDKnoRqo6vf1G2hGXallgUmtt3qVrUdSN311w6iTplZr9ajvCw9rOja2sIN9QGyKYWMKwZ8aPdQLj3y3kPaNGiGlAboJiLVuvmYEQg0Bt3OnjkNE75C/eVbVozaLsXkMVU04UYXU7ZVdu720uvQ5Vl+rKN8W1lM1VP2R+YXvEdgG7xsxylQ1fvWXQ/LMY1/tqnrtGHX1jdDNo1R+X79ux8GMugfRYGXU0cMfPh5Av0z9WtI2nMQdnD7Ji1EHnZ0RTmYRsohjceMmSCDj1j+LazekSwd3k93JMNa8/q7drPF9MepoR50Bj3gT4Ujw8QzpETWDLwZTP6LMzmePWUEsR9zOl0qjRBeEqqFAUDq9cFG5/oAgLcsLEZub+sJR1Ylf5YdK1/rU70clHiM26nIQTdrRX4d2efoqvBDnmjCRkhU2HHt5bnhiZ8dNo/NjvvQkyBgYdYQ49Hi+4pRnumGfZKQF5q9b4vhpyFdS/QEfPKszE9UzEAkcoWBRa32gbl5iVa/6/a8z6ooRylW/mXR6pau5KW+QUNGCjZ6UzmrHpCkMA1GHUiuG1PTB7Vt08/wparF3r/wQKI0KHFWV/JmccmfPCfOo+hlbv27fD7WA/F+DV6jMoE+8uq8c6gGvWgtmS8dCKgPwWiKO08CHslDHblS4q+Gh3W4xp45aQVmrtGNJ3Uby2DIrE4+8iuFEPEOR58wjEwPJqCv6QxvwVZDxoU0orq3BaV9dMeqiUeUfwU4FDA7fvhcaJzixo9LEsVJN/epvh4Va6n1KKY5ri/9YEk+3BdM/q3hp6e252HsfUoHXW8lTFG6eOStPwEiRPZsnr4gp79N2pkJeGfmCEQghBJzOnVhQO32H/M2zCkqltYM1SKEuXeUZzDnrNqYXBg9QWeSv3RyLG2qtI22Wp0320qDBfcx198XpLymFrTTsg2EOduXQYaFoE8c4L107hUJfs9RZtEze99Uu6kawY9SVBBXf9Zc3bwpn/4458tbZ8/RYxvSmY0un4zBa96i+WzUQ1H0nv05pAnWrjRa7eRv3FaPiS/Xdrt+hpPoODBBAR4uq1ZamhkYKhWPUpxUqJLXCdDNUlVf9KkeIUdmYQl1u1vj+GHW8Y7NLl5FMp90mgj8Gc8+occKRsXEcrdXZqxp3bP+6eTfQZ+UoWpm66uPQj72129zQ86q4Pxw9ebrTsRULbM1DVB63vzFio47O3Tlxmg42eFksiB+YfY0TLz6la/UGZXlLeGoWx/bYhbPjheTTPLfRw6THiZeA8oszDxNlymBXLEppbokDnkVnlyotXxosQCuMHW94AXz4UYLt6ZYevaTnaXRQP74L125eYpTTg/7Rs57jreeLbNwpFrHp9R1j0r3k4tpLtUUwE9MKPS+fD+7Zea5GeiDqQD1qckdct6XDtZLaI44jY5rt/hFRGWBDuOWdHpQqXwGqOHa4ybTe/uMCrWsLCf0/wsPkSNOrLTYW9g4bRk/VqGkedYWF0VqR99apU1Rq4GeU4eHRdaqNqPw6pQnV1tbeH9Gh2d/IDaq6K5eZ3oBxX2discNbtGd3yQhC6vKj8K7+6wyxGBRB14CwW8xBGg3p/Lkdm+Quev1Vq+mJLIbto7JRQz3QqoDDOajAPxCeyn/4+DPpaAiLvKY7d5g2W4Fi1LHgXvhSdS9GFv3wFZz2NSJG3RdNqTkH/cA5qrmFT4c4wgYOKqqrX31Dbo7gXPZaC+fIrirzEdiZV58xxdzQAAO/oEZVmUep057evJVWt24h06p+M50yly4p49h42dSjtzwexc45kczEfxiBEETA6dzp5h3yN8/qkC6qXkdqnKk0O00YuzkW+ZWHdcThvLVwt07CG3UiseMZJub470zHnnWXimNIc/+P7v55naaJDVCEQm92FfnflnH8UUeoYs3UfM8eOff6alcVsmPUsf6aW/ZFqdIPTZ2KX482VXrhe2b1q6/Kb3/xDz6m/K82l1U5HYdqf96Llej6mWMEM6sXvxoitQDUPae/TmkC9btl1P31OxQZdWB15dBvtFD4Q1FBF94gbVufj+nXmVOk4Kb0oM+9HNFi/bG1z0fyWEHkbbRhk7mewLXToL63bm3UddV31ba+VqgjPN6nyWto9uK+PwYT6545pV/wsuNXdeI9Xv9WV7r8824qOUAc9frQyTHely3iOMXEadJQ5UnjTO0bbHgdmz+fCrz5JuVpHjhTZDfvBsage/HH+56vRVO5jsY6ZXmjxnIeyF6lFlUYbdiVm+P2EfGFI9YyN8+dp18mfytNCVA8Os6jjzGJOgbwx6zFdGbgx4h6hSRZ81DKujUoyf+eocfz5BQSnH/o9qHf6c5vx+iqsGf6+8whLb+h9p5FvFzpG1TX0gMXdUsc6MEfP+2m9W3bS+LHNT4+qXLlp5unTphpSLdTN1EvMe77C0piZZfnv86oAxPlrA3xBuJcbV2ap+xIcE+3icO1HgJRx/YPP5WqVqi39MeDxLndHvMPJVHEPZzNWFaodaugPMTjWndspjyvIl130qNvCCinYGe2bKNVDxcjT1erK44WG4piAQlu34+jwszg+3c6mX2Ac726q5ZRgscfk2nKi7rKACdtWAyp8OKwUfRMLc8H19diDo5gFlWrKY/igQfb2mJDT0l/N7/b23QOg3czWebsdOPscdPO2uoxPlCM+tZeH9KhOd/KoeBkCLuQu2VLLxpx0teIGHVfNIV+bO/Xnw5Omyi7BEweT5fJC3dd+n985RrTvAB50z1XjP4VZj+wSUXQmXp87BdUqWmqteJ5Js/+DF35Zb/csEB+Xz4JcI8DIxBqCDieO128QxHNswrTg9/OpO0f9ZaXUrvPcq46bviaY3HvqFibff+uxy4WjuFuXzxnboTnbd6GSvYz6kd+/XuIzem0zz5Ht8+foytHf8Fterbd21Tk3a4y7q9dZLBj1JEOX0GL69Q253N8Y7CxrZxnQcJWY94sY1MBBURwOg6Usc7NmUuXN73P476T4JgmROVuGXV//Q5VRh3PQh2fjLiVUYfm2MpmLc1vHjbrk2fPSf/8fcekTZSzc4KLdCfBzRrfn0QdbUMoM6d0OflNtXox98Vgqj6rTTJc6995XXCi46XTT6UxE+ipyuLMesHwf1Mgr6wS2DXf+7OqPsq/bt4N2aiYN9e2fUse4YhrzDePpUtvPk/0s+7K5b5PrLH0XOFoSQ53ibV22S8GeM0v4TK5TIh2G3W9X2dGT6U/xg/XkyIZhzTdYNIzduhCmdsZO6KRLOwom2vieNgKpGdb3+9DJ9YuC9cuPhwlB/anJ8uE90CtvzThCmoJ/uyZdY/RLX7e53W+plZFpKNOsYhtiToGtuOzL4QXxjHy2AnrmfbHlq2iDZ07yPH7c/oXiDqUAzQ01njzNjEpeLQ/7ly6QjNKFpX9sDKHauEAxqbequXmbj0WIcsavkz/3L8rFhvfmTunu4YOl07zdPttqEQtqFJNLlCs9ctGo/DHKU2YTYnJE8fqHPpmirlp1XzXHi/P30eF6uORWXOkOYAqhw9FziaNwnnSxAcK3vQRrM9SPxLxmZoN6UXhBwABO6C7h4+mM2vXeUmTsIAr2vt9ylSymMyn/iivwPqGibqnpF3W42yU1EKXIm9421CLUmXtfq2SJid9vXn2LM0pZxx5aefp1BdNoR/QQtg7ejydXLnKy8ETmO4yn/cX6qti81QLOH5we+8+XuqE+BDmbNRI+uaARF4FSCR29B8kFsNzzMUz7gHv0mIetPqHUOX4lxEIRQTczJ2O36FIzLPAFusU2KYj+HJG5W+ORTnMK4dnz5X+XnCNAKb/2c6dKU+zRt4q8aJf2BzY8+WX5vyP/FgTPde9G+WoVwuXMkTU7i9CQ+eHT/rYqple2L2HDoyfSOe3bzE3DbCpmLfla8JxbAdpiqPaUb+OxiEKQePop0FD6fcl8+W8hk2KemvCr/dU/f5+3dCEsk2Gn5vS/fuFq16pvluPZ/PXbyU80BkzVbGS2to5alUq1LrqdUTPT9Wr/7rBAeXVZnq1b2eF+36r+vGNW45jcMWGsp0Wl5Kcn1ix2Os7hfKg56J9+ngJCVS9Tn/drPFvnDpNcyuUk00pHw7WdrFu+r67oaWifCEhj1p3+NpkgMnfvHKVpJNXfZMda4+F1erITXYcfascQqo1LTCpK9amiVOmkF1Rjhzzt25vanVa++jm2i1NoC34Dfvho/6mgES1j7XHC8O+iLSfIpRTOKo69F/MX6nFKU+ZXyhL/xMaidER4grNzxhl1DEInIV+8v33xaL5rqMxxU2QhLINHCDOYC/jqJzTzFEhDr0tTA7Xj5+g68dOUMLkyQVhPGMStp4vmONOsQgGRj2Y8Yxs3zDJ4Fg33Xs2ymICRdCZIVxjV9N6tBvo71+RP56P0xVQzk1wShPWNtAvfBihdq4k3dY8+NDfEPbNyYXaunWs1rxur+/euEl3rlyhpBkyhMPObZ3RVS4QffVFU3qfcfzanatXhaZBpghxh9Tr5tlzlFjMbbBDjSjAS+39O3ciVXdEdfF9RuBRRCCqc6eTdyiieVZnAKJqoyrna2EDnjhlykitcTDP/HXpkjH36r4uAvlQxcbADdEnfP8eT58u3LfUrimn48D3+J74lkHt3+13Nqo0YTeOiNIC0e+I2nB6PzZwCNdHQTPXBWN8/ffjYk2QhFILNfJEyZN5ZQPtQgrvJCRM9kSkpbdO6o3uvA/+vhtOOozvPo6Is56cY7cGjWr/AkETmDOvHj4iNCTuUrKsT4YTPES1jzFVPsYZdQzsrnDq8Xv3ft4O5vyM+LHchemZIf0oUeaMfnIF5lYgiCMwPYn9WpxiMX36TLp161ZAOv7EE09Qs2aBs3cJSKe4EvF870gUkiZNwmgwAowAI8AIRBKBYJk7waiteeNNOr15ra1UOpLD4WwBQCBYaCIAQ4lSFY8KDkrzwMlgM5d8kap+O8lJEc4rEHhUaCK6H1aMOZPzNZBrW36kG+L/bXHu9f0/ztKDm1dl1vhPpKYEGTLR44UKULLSRSllGW+VVF/1BSKdicODolMsTp48STt27KRr1655KnERS506FRUrVoyyZs3iojQXiU4EnNJEdPaF62YEGAFG4FFBIBjmTpz0cPnAblPF1+oZ/FHBMlT6GQw0EQxYPio4QP17V/+BjiDLUKoklfzoA0dlODMz6joNxIpEXe9AsMUflQkjJnBjLGIC5UerDaaJR+t5cW8ZAUYgOBAIhrlzYo6nJRiwqy752ScBPREkOFB+tHoRDDQRDIgxDsHwFIKrD0wTxvOIUa/vwUUCvnvDxOHBhrHwYMExAwGmCaYERoARYAScIxAMcyd8VcSNH8889tP5KLhEIBEIBpoI5Hjc1sU4uEUudMsxTRjPNtZV34ORxJg4PE+FsfBgwTEDAaYJpgRGgBFgBJwjwHOnc8xCvQTThPGEGYdQp3Tn42OaMDBjRt2Gdpg4PKAwFh4sOGYgwDTBlMAIMAKMgHMEeO50jlmol2CaMJ4w4xDqlO58fEwTBmaxcjyb88cVsyWYODx4MxYeLDhmIMA0wZTACDACjIBzBHjudI5ZqJdgmjCeMOMQ6pTufHxMEwZmbKNuQztMHB5QGAsPFhwzEGCaYEpgBBgBRsA5Ajx3Oscs1EswTRhPmHEIdUp3Pj6mCQMzVn23oR0mDg8ojIUHC44ZCDBNMCUwAowAI+AcAZ47nWMW6iWYJownzDiEOqU7Hx/ThIEZM+o2tMPE4QGFsfBgwTEDAaYJpgRGgBFgBJwjwHOnc8xCvQTThPGEGYdQp3Tn42OaMDBjRt2Gdpg4PKAwFh4sOGYgwDTBlMAIMAKMgHMEeO50jlmol2CaMJ4w4xDqlO58fEwTBmZso25DO0wcHlAYCw8WHDMQYJpgSmAEGAFGwDkCPHc6xyzUSzBNGE+YcQh1Snc+PqYJAzOWqNvQDhOHBxTGwoMFxwwEmCaYEhgBRoARcI4Az53OMQv1EkwTxhNmHEKd0p2Pj2nCwIwZdRvaYeLwgMJYeLDgmIEA0wRTAiPACDACzhHgudM5ZqFegmnCeMKMQ6hTuvPxMU0YmHmpvj/1VBbnSIZgiatXr8tRpUqVPARH52xIjIUzvP4LuZkm/gtPmcfICDACgUaA585AI/ro18c0YTxDxuHRp+VAj4BpwoNonBMnToXhkhl1AxQmDg9xMBYeLDjG7wfTACPACDACbhHg76lb5EK3HNOE8WwZh9ClcbcjY5owkGOJug0FMXF4QGEsPFhwzECAaYIpgRFgBBgB5wjw3Okcs1AvwTRhPGHGIdQp3fn4mCYMzJhRt6EdJg4PKIyFBwuOGQgwTTAlMAKMACPgHAGeO51jFuolmCaMJ8w4hDqlOx8f04SB2b///kus+m6hHyYODyCMhQcLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEBzNm1D1YyBgThwcQxsKDBccMBJgmmBIYAUaAEXCOAM+dzjEL9RJME8YTZhxCndKdj49pwoMZM+oeLGSMicMDCGPhwYJjBgJME0wJjAAjwAg4R4DnTueYhXoJpgnjCTMOoU7pzsfHNOHBjBl1DxYyxsThAYSx8GDBMQMBpgmmBEaAEWAEnCPAc6dzzEK9BNOE8YQZh1CndOfjY5rwYMaMugcLGWPi8ADCWHiw4JiBANMEUwIjwAgwAs4R4LnTOWahXoJpwnjCjEOoU7rz8TFNGJixMzkb2mHi8IDCWHiw4JiBANMEUwIjwAgwAs4R4LnTOWahXoJpwnjCjEOoU7rz8TFNeDBjiboHCxlj4vAAwlh4sOCYgQDTBFMCI8AIMALOEeC50zlmoV6CacJ4woxDqFO68/ExTXgwY0bdg4WMMXF4AGEsPFhwzECAaYIpgRFgBBgB5wjw3Okcs1AvwTRhPGHGIdQp3fn4mCYMzMLCwqJ2jvr4HV/R+kNL6MLls/TPP/84fxLRWCJevHiUPk1mqpC7FrUt3jnSLTFxeKAKFBY3b96iI0eO0IkTJ+jy5SuygbRp09JTT2WlnDlz0hNPJPU0yrGgRiBQNBHUg+TOMQKMACMQYAR47gwwoCFQHdOE8RAZhxAg5gAPgWnCANS1jfrJa8ep5+I36NyFUwF+NNFTXab0WWlQ7a/pqZTZI2yAicMDUVSxuHfvHq1Zs46OHz/uqdQmljNnDqpYsQLFjx/f5i4nBRMCUaWJYBoL94URYAQYgZhCgOfOmEL60WmHacJ4VozDo0OzMdVTpgkP0q5U35t8U+mRYdLVUMGsz2q1Vl36/GXi8EATFSwePHhAc+fOoytXrnoq9BNLnz491a9fl6AJwSF4EYgKTQTvqLhnjAAjwAhELwI8d0Yvvo9i7UwTxlNjHB5F6o3ePjNNGPi6Un2Huvu070dF7xOKptqbl+sYoRo8E4cH/KhgsWbNWvrtt8OeyiIRK1ToWSpTpnQkcnKW2EIgKjQRW33mdhkBRoARiG0EeO6M7ScQfO0zTRjPhHEIPtqM7R4xTRhPwBWj7kuaDklo+YI1qWCmwvRk8mx0694N+u3iL7R072y6ft2/VDVZshRU9n9V6fkni9OxK4dpw2/LIpTYJ0yYkIrmKkels1egv+7fpvWHl9Gvx/dSWNi/PukrMlL1QBIHbAuuX79OyZMnp7hx4/rsV7DecIsF7NBnzZrtNazHHktCFSqUpwwZMsj08+f/oHXr1tPff/9t5gMNtWjxCiVNyjbrJihBFnFLE4EeBt6tpUuXi/c9jKpXr0oJEiQIdBNcHyPACDACAUPAzdy5fv0GseF9hBo3fplSpUoZsL5wRcGBgBuaCI6eB7YXgcJBrScTJ04c2A46qA1rEqz7sY51Y84JAdehQ7/RM888Tfnz53PQcmhlDRRNABXJ7MaJ88gC5Fj1vfyQPOEcx7Wt9C41e64NxY0TnhkNozCav386DV/xsS1I9Uq0oK4v9Al3b+vJjfT+7Lbh0pGQ/cn/0cQmCyh+XG+b5st/XaKmEyrSXY350ysAI7ih+696Urh4VIkDdtmrV6+lefO+85IoFypUULx0+alp08a2H9ydO3+iadNmhOuPNaFhwwZUtmxpunnzJn3wwYfW236vP/98ACVKlMhvHv2mWyw2bdpM+/btN6sCk964cWN6/PHHzDRE4GQODP3du3fN9JIlS1Dhws+b1xwJLgTc0kSgR3H//n164YWKstoVKxZTihQpAt0E18cIMAKMQMAQcDp3YsO7Vq16sv2WLZtThw7266GAdZArinEEnNJEjHcwhhqMCg537vxNU6Z8Qzt37qJffz0kewwGt1y5stSsWZMoCclOnjxFQ4d+aaIwcOBnlCSJ/SbA1avXaMKESbRgwSIzP9b9b77ZngoUyG+mRRSZNGkKff31JGrSpBF17vxWRNmleelrr71BFy9eIrQHIaYKqVOnpv/9Lxc9//xzwnFzDpX8SPxGhSYwwFWr1tDWrdto7959EpuCBQtQ3rx5JA+WLl3aRwIDdNKVRP2Fz3OFG+DXrRfQ/9IaOz/X7lylczdOU5YU2ShZouRm3uFbBtK8bZPMa0SqPF+fPqg0UKbd/+ce/XJhH2VL9QylSGzsHG88tpr6zvMm1HRpMtKc1hvkpgA2AQ5fOkhJEjxOWUV7CGi/zogSMm73Z1MP/+rYUSGOa9euUY8evejAgV/smpZpKVOmpIEDPyUQjR5WrFhFH3/cX0+yjXfv3pUaNqwnvKdfFh/x+rZ5fCWuW7eSHnvMm1n2lRfpbrH49tvpckdR1V25ciU5Wahr/feXXw7Shg0bzaSMGTNSgwb1zGuOBBcCbmli4sTJtHHjJvls69atHeVBMaMeZQi5AkaAEYhBBJzOnfDz0qRJczp79hz17/+R1EiLwe5yUzGAgFOaiIEuxUoTbnGAwKpbtx7mmhvr27/++sscQ5UqlYRA631XGnc4yap9+7fMulHp6tXLxClFT5j1qwjW/m+80UG+qypN/x08eBCVLl1ST/IZd8qoY35o2LCJz/rUjfff70G1a9dUl0H/65YmsDYcNmy414aJPljwYF98MYDy5curJwd13LFE3Y5RH9j4a8Eyh9Hnq96na38ax29h1Nky56KpryyVAIARrzjEe1dp4dvbKVWS1IR7tceWoNu3b1EcoZ4wq/06yvjEk7LOKsOf9ZKQ92swnCo8U1XW2WlBS9pz5AcZf7fWZ1QrT0MZ77G0Pf1wcL2MW/9EF6OOl7pNm3amFB2S81KlSsidtHPnztOuXbtp0aIldPTo77JLU6dO8trhUox69uzZqHfv96zdNq/ByEIFDu1BPUYPcNzWs2cvmTR8+FAvphy45s79P0e7i25flLFjxxMWGSq0bt1KSNMfV5dev7du3RK7oVPNNKgsvf76a+Y1R4ILAbc08emnA2jZshXiY/Yavfbaq1EeFDPqUYaQK2AEGIEYRMDN3Inv6O3bfwnzuWQx2FNuKqYQcEMTMdW3mGzHLQ4fffQprVy5miAh/eKLgXJNjXcGZnGffz5EDqFv395UrdpLjocze/Zc+vLLEXIdrZh/X4w6tFvXrdtAYALfeaeL0HotQ7//fozGj59I27cbPMqqVUspWbKI3+OoMOpffz2GlNr/7du3Jd+xdu16cfLSCTn+GTOmUvbs2WQ82P+4pYnFi5fSgAGfy+H16tWTXnzxBfkMgQHoBTwYtAymTJkQ7BDI/gVMou5vtFPfWEnZUj4ts1Qb9bxkxnGROcNTNLPlGpk+4ccRNHXjCBmPL2xNV7y9ixLFTxzuHpjNtd32U4J4Cem3S7/QG5M9klddqm+9Jyt6+Ce6GPWZM2fT8OGGk71u3TrTyy830GOYmd8AAEAASURBVJuVcaiyNW78itzxw8SBCUQFxahD0j5unDtnfZcuXRI7Zka7a9eu8MkcqzYj+nX7oowcOdqr6rfeetPr2nrhNL+1PF/HHAJuacIpo46NKH8nAESVUcfkh/9OfUdE1K/IPolA1WPXntux2dXFaYwAIxAYBNzOnZFpXZ61K9ZHWCNZg9O5xml+a3t8HXkEAkET0fm8QFdOv5GRH70npxsc0LfKlavL9fTEieOkWrOnRiK15oBU/aOP+uq3IozrUmpIX999931Zxo5RP3XqlFjXN5f3Ib2vUaOaWT/U4WvUqCOv27Z9nVq3bmne8xWJCqP+/fdrvVTf0QZMA5o0eUWqfyuNXF9tB1O6G5pA/9u16yhNb3v2fIes2ptg1ps1M57BkiXfUZo0aYJpyLZ9kXP7iROnwnD3qaey2GayJtpJ1K159Gtd0l1rXHGhEn1N3m5f5T1qVsiQnNabUIauXL0o0z+s/xVVzOEh9ONXj1KrCdXlPdimf9NsiYyP2vYFzd7ytYyXKVCFPqs2Usbx58G/D6jC4LzmtR6JDkYdC+O6dRvKF8HKgOttI75581ahHm+89IsXz6e0aQ1bCWbUPUhFxNh7chqxWbPmSGlttWpVxUvY2Os2NAyg0YBJsqyw7VcBvgTatn1TaiaMGPGl2JVdFeU6Tpw4SUOGDBPvUlbxkfhINSV/p06dLs6UXyvsqssIqXIbr3v4oMCBSKdOHalo0SJe94Ltwunkid3u6dNnymeAXWmopmXKlFEOS9/9RQI0RGAGsWPHTokHdj2LFy9K5cu/KLVBdCz8MeqwSxo71pgb3n23m5eZydat26XdEtTw4RuhRInisg04pLM6fnnnnZ504cJF+ZGH/RscO8H3AsaAfnXs2IEyZ86kd8tvHDT3zTfT5A47bOlQT/78ealevbpy11cVhgnMkSNH6ZVXmlLVqlVUsvk7b94CoZ2zmJ57rpBQ++tspmPnfsOG72nLlm0EVTxs+r3+emuh9pdQ0iXwxEKCAyPACMQ8Ak7nTvSwd+8PCYwAtOygEYeA93z06HHy/Ycq6+TJU+mHH3bIe5jPYNcKCeNaIUmD3xt8WyDpw3zx5pvtbOeso0LKBN86W7ZsldK3rFmzSAd2lStXFGU6ybonTRrvSoVYFuY/tgi4oQlUBKEM7Jhhf3vq1Gn5vKHK26ZNa+mEDHnAoLVt2wFRYVb5oa0kVX1rYMutpM5g/CGR3r59B33//Sbze9ehQzv57YRmXL16deRxurLyAPxxgwPei27dekjG9NtvJ4fb2F+4cDENGjSYQMuzZ0+PdC+xnu/SpTv9+ONP0lSvY8f2wuzEkMjbMepq7Y52ILG2Chjwfo4fP0GsGYoJCf3gCPvhi1GHo7xPPhkg54McOZ6hDz/8QNalbyrYMerIhOeMfsJZM+aARyG4oQkwtfAT8M8//1Lz5k3DmSngnahQwVhTTZ/+DT39dPZHAQoKiOq7v5Eqifq9f+5SpSEeu2ylwv5v2L/04he5ZRW6qvxf98WiPsFjBAdx9UcaDJbOkLeaUYuOn/mNIIFf2Wk3JYyXiG7cvW7axZf74n9SYmbtW3Qw6ocPH6FWrQzma8KEsRHaPsC+HAEfT/VSq5f9UZaoHz9+XEzk2+nPP/+0wu7oGriULl2KsmV7KlLl4LyuZ8/elCdPbsJiQgXdjh+MWJ8+hlkA7oPhws4bGK1582ZRIOoAc1S9urF7atVogCYFPqgY27JlC03JB5i3cuUqyS4vXDiP0qdPp7oflL9OJ08wldi8sAvr1nl8Jhw8+Ks0HbHLhzTrjrkvRh30BwYboZdQe6pVq4aM4w9MTwYO/MK81iNYePTo0V1Pojp1GsjNN2yubNq0xeseLsBoz58/WziySx7unjUBmxRvvdXFdHaDjzroQQXdJACL61GjxkrHMGPGeDYgVV5FS71796SaNY3xYVHep08/lcXrF8w+NkyiMrd4VcgXjAAj4BgBp3MnGoD0B1KgUaO+kg6hkLZq1Rrq1+8TwmL9xo0bco5CugqYW2rUqE5jxoxTSeYvGHiofOI7pMJJ4TDrtdfaetn2qntq7sD1hg2rTbVadZ9/o4aAG5rApgrWLvimIFi/JV9+OURuJONely7viI3vH21NznRnhbNnTxP1ZEURoUI+lL77bqGM63/wvXvuuWflGu81Yb6Gb1agghscImp7zJjxNHXqNOnboX9/b8GJv7Lq/cI7MmfODLFGj+uXUQcTDmYcWrT6xrlqY//+A1IohPqWL1+kkn3+2jHqYNIhdMLmATbc8YzVuiMiRh1rJfAnmEfgH6tcuRd8th1MN6KDJiBsweYnaHnNmuUxoi0SCEyjlVEvmvsFGlLbsANY//tK6jff2JlFx0e1nE0FMjxHdx/8TZWHFpRjUTbrq48spczJslC+9MI+XbvfqHQbequ0sQivPa4E/SmOfVMM/8XbF2j+vmnUoaSx2K4l7tsdCxcdjPr69RvFw+8rx7B+/SrhFTKJjDv5oxh1eKscOdLjZdJahz+v7bGt+g7Pm7du3bZ22dU1bHng6TYy4ebNm1SlisGw6MwfmBPYpCBYX0xIuLGQwbE3Xbq8Lb3oR7UOtIMFD6Sl+scSR9HVr98It2XQd/LUJJ49eza5G2vkCN6/bidPpYamM6RqlNB4aNGitVx4VKhQXi4AoJWABSSkBphc8fymTZtCGTNmkMXsGHVImrDDjoDdZl0a/dNPu+jtt7vKe9BcgHpawoQJhSTqR3r/fWNnunv3LsIpi8dBo2LUUQhlatWqKTb//qWNQhr/5ZcjZH8hwYDUOqKgGGkslEeNGk5PPplZ+nFYuHCJuZGhdushxYeGDoJVPUtX3VL5D2sbhdiQgk8IaC0cO3ZcSt6UjRwz6hE9Jb7PCEQfAm7mTn+MOnqKd/rjj/sKiWo6aY+KDWvFwEHlt2vXTtIuFj5yOnXqJgcH6Sok5QjI27Lla9IJFhh/bG7mypVTOKu9IjYhFxAcw6rAjLpCInC/bmhCMdIlS5YQEtYPpYkjniPSsebBGhLaaghr1qwTJpYf2UqVlcRZz6/SUBaaGfiGwr8QHCRjc+jixUu4Jf3MBDOjrgtp8H7AoXFkwvXrN+RaDXgOGvSZ1IC8c+eOX0YdjB/WKG+/3TGcRifa1L/n+vrUV3+sjDo0//BeY8MFwqivvhriJSnWGfUlSxaIk50Mr++wUT99+gzNnDlHauFAoj9kyCBTOOir/WBJd/NuWPsODCBlB91iDYh1GwJOwCpb1qNhay0XbNfRxqgnSpyYlnXcKSXdkI7XGFGE/tEcjM1ot0act/6UlILX/KoovVq+M71WVOwSIu/wwvRZw3FU8qkXSJe4d6rWlxoWMJi38kPzUtYMz5iq8E2nVqZ84gx35UX+9dn16fDJA+Hwjg5GXUkNsbNpVbGBGhEYaLuQIkUKc4daMep2+fQ0fxsBsc2oW23N9X67iTtRge/Q4S3as2ef8BMw1FQfB5OODxcmNzDPkCRgNxJB7TTr3jgDUYeaZF97rZWp4r5kyTL67LNBZj+6dOkkNggMRgwLodFCjfFRUUlyO3n6Y9Rnz54nJtDhUjUPGhGJxdyhAnaSsfkBBlXHzcqo46xhqKsh9O//sfiwvihinvDeex9INT47nJcuXS7KDJQSKqjQqaAYdUjlsYDVA7zYT5gwmcqUKS09iOr37OJDh35Fc+fOlxJ+a10//bRbquE/+2xBcfbq47J4587d5O45pPyQ9qsAx4vjxk0QG1Meuzv0A/3Bgmv06OFe6qn4UAE/SO+ZUVco8i8jEPMIuJk7I2LUrUdTwtvxnDnz5MYmNvKUxh5Gq+ZgmNS89ZahEq02MLEROmPGN0KjK70JTFhYmFSZxTcUgRl1E5qARdzQhNKosm5GQ613z549QkoYz5So4/tZvry9qu/bb3cRzMtueu+9d4X2WC05Jkjq9+3bL82uFI2owUKSjw11hGCWqIOxxaYUxoFv3tixI00NRjUWX79wQIfj1SB1hvQZISJGXT0Pu3UHyoMHKFOmPKLC9G2i3AiTFz7+qDUkjmdr3/4NIUjoIxltHL2G9arVMbPOqPuoUmredevWyZUQ0Ved0Z3u5t2w9qlixarmxiXuQVDSr18faQZkzRus19hoiBZGHc4nZrRdQ5mEVBze4NvNaUSHTuz1wkGpxN+6d4uaTq5Ii9pul0eudVnUmnb/tpU+bzKJSmQt48Wod3jpfWr6rDFRVB5ekGa/sUF6jV98cA4NXvoBVS/SiN6rYLxcSjXeq1FxET2M+ndip+pLr51M1S6kgjhixS7oL7bOqOOj6StAbVpnZPR8/2VGXUnI1QcExA1HIylTppDnWELjoWPH9sJupRnpTJ6+8RGIOpSEXGeK1I4rdrlxhIduq9StWw85CY8YMYyKFCmsP86gjLudPNUiEbvweEZ6UBsqWBhgEWkN04WN+8iRY+TuvrLL0p8h0pTmxODBA6XZhLUOmCTANEHfrFF5kI77CPpiVDHqn3zSjypVqqCyy1/Y7oH5x8ZPZLyH6pIK0OFLL1U2/VN4VfzwYtUqQ70V559C7VUFtXAfNuwLaV+PdLXp9M47XW2PNlQffp0mVX38ywgwAjGDgJu5U73vmAMwFyCoucFu7pk/f4FYzA+TkkBIBPWgvm9gysCcIUybZpjZQJPJTj14166fBVPfWebV50aZwH+ijIAbmoCEHJJyqFJDYwKnC1mZN71jA4W516JFS6TgAAIEBJwQVLNmXRlXmlnwll62rPGdw4Y1NCysQQkz1DrLet/ttRsc7NrCug+Sf4XPpEnjKEMGQwvPLr+eBq0BrM8QFi2aL5k6xCNi1NU6wZeUFhtepUqVQ1VS0wEb6v6C+l5jg/7ixYvS1KBYsSJi48D+DHedUQcjqmjh3r17XsfFYb7A+uhRcKAGfAJBE1gbwTwIfBi0JBDw3mDNWLx4UXn9KPyJFkZ93KvzKU86wx7907U9afXuBeGw+Kr5NHouUzF5NNuJa8coZ5rctOvsDuo6vYXMq+qAhL3qsEIyrV6JFtT1hT4yvu7oCul0Dox+zeFFpHrDKy90oHYlDPVW3cO83nh0MOpK5RYMNlRb9HDhwgW5I6anQbqLgA8jPpAIilGPymL6v8yoHzr0m1D5fcOUGgJjSBKxKwlPmy+9VNO0+cXZ7a+/3p4w+X311VCJP/4Eog587NAWJoX161cL6WZ8+fHLnt1QbX/11delcx/9HtreuHGNUFlKhGhQB7eTpz9GHWeA4mOjazfoICi7c+VPAPd0Rl3Pa7eY1NXg9Lx28UlCop8nj+EzQ32AJ0wI73dCSaKwmNGl8HZ1Ig300L17D6n1ofJgPGXLlpFMu3IUpe7piwOos6VJk5rgrLBp0xbyQwOVeCUtU7vGI0d+RYULG4t5VQ9+N27cJNX7ozK36PVxnBFgBJwj4Gbu9Meo41xmzJl6gG0xVKB1jRt1f8aM2TRixCgpPVWM+ocffiydyNltoKLc1aser9V2c6uqm3/dIeCGJsB0dOjwttx4Vq3im1W+fDn53HWtCNyHpiEYbF3jE4w7GPiKFcubjm/V9wVlNm1a56WZhTQEpbERrIy66h/66muzAfesAVJvrM2gNQBTSJhEqqB/i9WmhrqHXyVs8eVRXRcEWDVg9HpUXDHq6hq/Vm/y+j2dUbc6k8O4MCasv/CbPXs2aUIYE5789T66ibt5N3y1AxwOCj9Iy5evJAhNEKZP/w87kxvUeIJUWQcQ434YRtM3GbYyuNbDB/XExyRnTTPpwb/3qfqoIvS3sAdBmNl+rbBTz0qwPW84qqxMK5anHA2uZXhzlgniT4d5TeiXY7vlZdcaH1G9fIZEzpd3+uhg1PXjGVauXOr3zFMQDGyhsXCHmupzzxmbEMyoqyfq+XWi+q4k6IpBnjt3nrBBH2/aiqudYDiQWLx4mVywdOrUUTA+jc0GA1EHKlNnasKbfMKECaTjF2wWtBWe59UkDIko7PDbtGknJMDhF1xmp4Is4nby9MeoQ5qNj5lutqAPe+fOn6QqG3ZClTMWX4y63akLZ86cFY5ejHkBmzNx4sTVq5dx2H9B0wFSfSVJUIy61ZEdCjhl1FEGfd62bbvYlFtNGBNoVQVsKIEe48SJo5KkuQTMJtQxI9988630Zm9V34c6Ij7Cuu2pWYmIKNMcZtR1VDjOCMQsAm7mzuhm1EcKTaXpQmPJzpkm0MG8otSdmVEPPL24oQn0AlLCdes2iE2WNV6bv7ina2riGuuaevVelna6ijlRau/65jjqhJABQZcoy4SHf3r16iNPFglGRl2trdBVX5vW+lj0ONTdofYOYduYMSO8NinwnYZgBwFq9EmTJpVOiBMnNkz0hg8fJezAZ9uaC6CMEgDZCfJw3xr0cej3fG08+GPUVXndTt5XPSpvsPy6fTf89R/aDXDqu3v3z1KIBz8Mj0IIqES9d90h9FIuw9Zl7r5vacTKT3xi0KLcW/RGcY9zuU/Xvick79+Z+Te+e0iqwv96UXjnntJApqdPm5nmtt5g5tl+chP1nP26ea3U6e//c48qDslvpuuR6GDUdaahXbvXxc6ccU6f3q6K46gU7Nwh6LtrzKgrhDy/Thh1lFJHUEBNEHa8sFFSiwssRrAogXoSjraClNZuwgpEHUuXGjbPkFL8+2+YtB8eI7x3w8ZIqVdhgyBt2jSCOR0lvJTbqyx7kAiemNvJ0x+jruzH4TEVnlOtAbbdsPGGxOCzz4w5RX/nBg3qL6XL77zzniyq27IjAQuV0qVflPd077Yywc+fQDPqelPoEzQ7Zs+eKxdcuDd+/GgqUMAzb/388x55NJJSf1eLdivdKru6Bg3qSVrS20Fc4cuMuhUZvmYEYg4BN3OneufxTbOqvttt8DqVqENFWDkbsztaSm3yASX1LY05xEK/JTc0YUUFduhwigoP50pb0yoRnzTpG6F2PVEKC2D6UKNGHcmUrlq11OtYUqXdpmt7qvZg+12vXiO5qR5sjLpOp0OHfk4lhaM9J2GwMBeZPz+85q+vOvT3ERJaHAOnvtPWMthsh4+iyH5/dUYdpplQ38ZaNl26tOIZTw4nCIwMo44+Kc07O1M+a5+D4drNuwHtyf37f5HO9ooUed52GHBQDIzt5k/bArGciM2FgDHqb1X9gBoVNBjUVYeXUP+FhmMnX2N84olktKzDT/K2LjVHQqGcJWh4vanynpWBX95lNyVNmFTavlf+sgDBDgMhQYIEtKbLfsncWxl4meHhn+hg1FG1knZh1wwfPLtjtiBNh6oZdkJ1NV6UZ0YdKHgHp4w6zinv2/djyeyBudMdfSnJAD5SUPtKKaSzsPfXJZhoPRB1wNyhbt2X5cQNZ17wvKk+iKABaFTAdj579uzy3FonzKM3QjF/5WbyRC8Vo96oUUNpV6f3XDnUw4cOtvq6WhbwgmMY7IDi/E9IkxF0Rl1teKnFCO5bd9SVBMHKxCOvYpoRz5s3j6lSHkhGHWqF6HOWLE96+ZjAJAxpPz62777bzetsWl0SAu0MjMFO1R7OnnzZ6CtJAcYW2YUC8nJgBBiBwCLgZu6Mbkb97Nmz4qQLQ9sI/ls6djQkhxi5kgQqFJhRV0gE7tcpTcC0Do5VsW555pmnvdYvuunjtGlTzPPU0VulVQb192bNmki1d92poBoRGEowlljHYkM4U6aM8ha+Rcr2GwnBxKirNRv65ctOHPf8BUjEYSJmF/DdVhsgMDEAr9G9exfTKZzyS4SyOgOPa/An+L5fvHiJ7NY+yGMNilGHlh0kvmA+W7ZsIzdI7Dy3R4ZR1/PMmjWNcKpOsAen7wbGA7MQ5RPMl3Zzt26GXyic1tOmTetgh0H2LyCMevNyHaltccPhCCTgPRa8YTv4mzf/lItidVNJwHHdbm4j+vX4HoK3+EUdfpBnqD/494GUjIeJI5FU6Fi1NzUuaDjEmLb7axq/9gt5a2yreZQ3fUEZbz2zNv1+2rADV+XUb3Qx6vC42aBBY/kyYZKDIxfYiypGEC/b4MFfSs/T6It+fBeumVEHCt7BKaOuO0hBTUplGHEwRDVq1JXPB9d2nryRHog6UI/yBIp4xYoeOzBcK6k94tglhZqZCrAnGzBgEMFmGQ4vFNOK43KwyfPvv/+ID2ZfcyMIH6kJEyZLZ2dvvNFGVoOP+YcffiKYv7OS+cuXL6+qPsq/biZPNArbSNhIYoNqkrADh9q/CmoTBddgxOHwBmplkBRMnDhFOjzCPV2SbMeoYzGBo9Y2bdoiFxrTp08xHcmoXW/Ug485dlOBLaQEMJGAZBubN4sWzTPV3tww6r6enzK9wE7/F18MMDcDdLtAO4+wyrs86AQfezs7OIy7T5+P5BExGF/+/PnkIg0Se2CLcV27do0ZdYDDgRGIJQTczJ3RzagDCl0aifkZG6ZY2GNzVM0dyMeMOlAIbHBKE7du3ZJOctGLDh3aCQbO2LjGtdqwxfpz5col5ncM9xCUR3f1LZk6dRLlzJnDuPnwL9Tf4dsHzx8B38nkyZMLh7c75DdE0UOwMOrKPxT62r+/x+cTrn0FSM6hfQCtRjDDEQUcOVy5cjWZzc5GHTeAGZh5OGzD9z1t2rSSScf6BW0hRFYgY2XUUVZpYiJuNX3TmXCrjToERQcOHBS+mEbIDR48v+XLF6GaoA9O3w01IPUsYObYp08v03keeAAIhbDeQ9BNGn2t24Jl3R0QRl2df66A8vXbZ2Vn+n7fCvN2rqfy0/hG86QUHIlwHJckQRKKI/4hjNw6iOZsnSjj6k+SJI/R/HabhVT9CZkENXfYnMaPG19e7zm3kzpN80xeqpz6jS5GHfXDWQEYBSyoETBh5s2bWx6NpNKQbn3RkKYYdcT9BavUTc+r76iuXbvC9P6o53ESd/qixObxbGpcylkbrufNmykYw8zqlrRBgoQRwWrHZWYSkUDUMVocuYZJAaFv315UrVpVGccfpW6IeMOG9eXuLOIIvXsbZ3IiDoYWO7gI+LgMHjxMxrHLqj4w+oYA7O+TJk0qF1gdOxobZzVr1hB19pTlAvHHKU2oNtev3yj60VddSqcmEyeOF8eFGHZeyou6ygDJMZhMFXBUCo5MUcGOUcc9fFTbtDGOI8NHEzZlYPoRcDLDvHmGeQ3eTXVWu7IVHzzY22O8G0bd1/PbunWbUEt/T/YDbUMt6+7de/JsVCTiGlJza4A2RqNGzczkZcsWUapUKc1rFcGmxiefDDCZdZVepUolYTLwIjuTU4DwLyMQSwi4mTtjglEHHGAOpk+f5eU3A98eOJ1r1crYAGZGPfCE44Ym9LUFmG6YS8EGGcwcAnzhwCeONSiTPKTbaWap/PjmQCigpMgqvWPH9nTz5i3JeAYDo37tmue0FtVHX78LF84zhRtKBRx5t27daApDfJWNDKOum7SiHnzPDx48ZL5PEKIoj/u+2lHpdow67ikHgIjr6us6o457vgLWHfCNBM26RyG4eTcwLrwHPXr0MoVyoPUUKZJ7PQ9I0tu0edUUpPpatwXDuhuCmIAw6t+9tZXSPJYWGPkNvVa8RVv2G2dyqow5suajMQ1nUqL4xmIa6Tg7fcSWgTR/+xSVzes3iSC4qa+uoPRJDbUc3MQxcFB5f2+2vTRfVRCdjDrauHnzpvSMCcbbGrJnzyZtSLFjbQ1qN9Sabr32Z8+se7det26l3Ciwlndy7fRFCQZGXdmf6B5O1Zg3bdoipOy95KUvtRjcDEQdygEa6oOH7jRp0iAqw59//ikY99oybmUO16/fIJl1TC5g1KFmhQCHhe3bvy0lwDjm7emns8t07NZih1C3374jHDJis+HUqdOCufdmPmWhKPxxShOqKexmzpgxS57xqzat1qxZYZ4bjnwbNnwvvXJu2bJVFZPmC9WrV5XjMxNFBFoD6igZ67PU1Z/q1q0tNStQFmr08FXwf/auAk6Km4s/HIq7H1qKux9uxd21VKGUGqXUC3yFQltKi2uRolfc/aC4ux9yuFOsePnyMpeZ7Ozs3mZ2725vefn97iY7k2SSf/6T5CUvL2ifAPd8CYe2A3B1wtyBif168oSJiCOOLTIfkeSq/jAeqsjhZAF26sLhQKtx44asvjrrq+zimbiKlZAqVSpxTR1x3+qKGgI4g/7o0UM2EZEDsmXLyrdX9O79Ja2oWwFG9wiBaELATtspDEWigavixYvxnIqJXnlrlyjCggXaflkro5qzZoWwlbURLg3H4YDw5MkwuHLlCj9PPV++V9nE533dwFhsOZlEYBEbrnY4gX0pCt0TJvyhLwphWXPlysmMnnXVTxLCe7KTV+OttoDJYdF/585dLvTEjx+fqXnn5doVQjMOBR18l6+cHRyQp7hn3hMnC+rDhmnG35o3b8o1DiOLj6vStWppK+rmMYscF8cUAwYM4uMu+T5qhnbo0Fa+5dY/adJUZq9mAl/x//DDHg5hcS88fuPoQkJm8K10ly9fYVvmrHFA4RwncvBbbtGimT5Z4ZCon/6wwwlRFFy0HDLkdyejvQXY5CNu/ahVq4YIyq+uxm3+Mu5WFtSrDynAB7wOpfTBj1Qp00CJnBUh7NoROH/5tEcpopp8idwV4QE7ou3w6V0OavVWCeBxRqG9jlo90u95Qw49EebBTg9nui5cuMANG+DqXfLkmhaAHM6f/apY+IOg7s94epo3XC3GzlFsmxDxUNBEJ47lEvdRODMf7Yb8w/BC0Bdhvb2qcsL8PswX5hfLlph9v1YOhXDc558pUyanslqFt3MPO1+cMMEJFDN2dtKT47iqPxEGn+P5qNgeyFsAxHPVK66A4L5F1E4oU6a0U3Qx8YSG+tBgHzlCgBCIfgS8bTujIse4sLB3736edLlyZZzaQjHhbDXxHRX5ednS9JYTqA12ix2hly5dWpf9qQqmOJl8+/Y/rO/NqO/BluOLSWM06oqLA75y3uKgmg+rMZNqGlbhccyFfTFqOKRIkZxPnqCWIzl1BHzFCRxLoiYItmEJEyZ0mRFX47aYHnfjxJyyoN52Si24dPWcy8L684MsGYNgVpc1brPoK3K4fUkseaiKxeTJU/kMvC+Kh0JMly6dfJEUpeFDBFQ54cNXU1IuEDhx4qSunooz8Gh/AQcHOFGIewuHDNG2TPhau8JFdug2IUAIWCDgj20nbplB2y0o8NWpU4sfT4n7a3E1dffu3XwFHjWgrAyPWRSRbiki4G+cECesYDHQjlKJEsX4ogH2MaglGhIyh5cQjeP6YpJZwOVvOIh80TXmECBOaNjbEtTHbf8dpm0YGXO158WbZaN3rpIhchjIqGJx5swZdk70Nn1viJGSmg9nh8uXLw85c+ZQi0ihoxwBVU5EeYboBRwB2UAh3kCVNxx8C4fnJONqOmpqkCMECIHoR8Bf287Fi7XjowQi5rYD1UV/+mmgw/YtEZau3iHgb5zAPqNr13cc1LfNfPjqqz58Mti7kjvG9jccHHNHv2ICAeKEhjpqoSqvqGPU2Liq7slqOpaNyIEoaI6wEEjQVSBAnBBI+N8V7VwsXLgITpwI40I6WngtVKggM8JXGRo2rO9/GaYcEQIvEQL+3HaiIVzcG4snRaCRLnRoS6dEieLM+G17J5X4l6jaorSo/siJJ+xIMTSEu3HjZt2eCp4GgH0J7usuVkw7XcmXwPgjDr4sH6WljgBxwsDMlqAefvsM9Fn0TqxRgUchfXDj8ZAjtWaAyyi+s4/IYWBCWBhYkE9DgDgRO5iAx0UKi/qxI8eUS0IgsBGILW0nqsPjXk5xNGhg10rMls7fOYGrebh319d2XMyo+zsO5vzS76hHgDhhYGxLUBfRUQ1+3bHFcPXGxSgxMCfeY+eKBrcypssKNfI30s949yQdIoeBEmFhYEE+DQHiBDGBECAECAF1BKjtVMcs0GMQJ7QaJhwCnenq5SNOGJh5JagbyQSOj8hh1CVhYWBBPg0B4gQxgRAgBAgBdQSo7VTHLNBjECe0GiYcAp3p6uUjThiYkaBuYMF9RA4DEMLCwIJ8GgLECWICIUAIEALqCFDbqY5ZoMcgTmg1TDgEOtPVy0ec0DCzbUxOHfLYE4PIYdQVYWFgQT4NAeIEMYEQIAQIAXUEqO1UxyzQYxAntBomHAKd6erlI04YmNGKuoEF9xE5DEAICwML8mkIECeICYQAIUAIqCNAbac6ZoEegzih1TDhEOhMVy8fccLAjAR1AwvuI3IYgBAWBhbk0xAgThATCAFCgBBQR4DaTnXMAj0GcUKrYcIh0JmuXj7ihIbZixcv7J2jrg557IlB5DDqirAwsCCfhgBxgphACBAChIA6AtR2qmMW6DGIE1oNEw6BznT18hEnNMxoj7oFd4gcBiiEhYEF+TQEiBPEBEKAECAE1BGgtlMds0CPQZzQaphwCHSmq5ePOGFgRqrvBhbcR+QwACEsDCzIpyFAnCAmEAKEACGgjgC1neqYBXoM4oRWw4RDoDNdvXzECQ0zUn234A6RwwCFsDCwIJ+GAHGCmEAIEAKEgDoC1HaqYxboMYgTWg0TDoHOdPXyESc0zBwE9cyZM6ojGYAx7t9/yEuVLFmSACydWpEICzW8XobQxImXoZapjIQAIeBrBKjt9DWisT894oRWh4RD7Oeyr0tAnNAQjRs3rmFMjgR1ajDMHxp9KGZE6DdxgjhACBAChIA6AtR2qmMW6DGIE1oNEw6BznT18hEnDMz0PeokqFODYdCCsDBjQb+JE8QBQoAQIATsIkADT7vIBW484oRWt4RD4HLcbsmIExpyceLEoRV1M4mIHAYihIWBBfk0BIgTxARCgBAgBNQRoLZTHbNAj0Gc0GqYcAh0pquXjzhhYEYr6gYW3EfkMAAhLAwsyKchQJwgJhAChAAhoI4AtZ3qmAV6DOKEVsOEQ6AzXb18xAkNMzImZ8EdIocBCmFhYEE+DQHiBDGBECAECAF1BKjtVMcs0GMQJ7QaJhwCnenq5SNOaJiR6rsFd4gcBiiEhYEF+TQEiBPEBEKAECAE1BGgtlMds0CPQZzQaphwCHSmq5ePOKFhRoK6BXeIHAYohIWBBfk0BIgTxARCgBAgBNQRoLZTHbNAj0Gc0GqYcAh0pquXjzihYUbHs1lwh8hhgEJYGFiQT0OAOEFMIAQIAUJAHQFqO9UxC/QYxAmthgmHQGe6evmIExpmtEfdgjtEDgMUwsLAgnwaAsQJYgIhQAgQAuoIUNupjlmgxyBOaDVMOAQ609XLR5zQMCPVdwvuEDkMUAgLAwvyaQgQJ4gJhAAhQAioI0BtpzpmgR6DOKHVMOEQ6ExXLx9xQsOMBHUL7hA5DFAICwML8mkIECeICYQAIUAIqCNAbac6ZoEegzih1TDhEOhMVy8fcULDLMYF9Wd378G1BSvhzqpQeHLxHDy7d4vnLH7ytJAwa3ZIVbcmpG/6OsRPllS9lm3GIHIYwPkKi/v3H8CpU6cgPDwcbt7U6jhdunSQPXs2yJs3LySLxvo1Skc+Owj4ihN23k1xCAFCgBCIrQhQ2xlbay7q8k2c0LAlHKKOY7E1ZeKEVnMxtkf96fWbcO638fDPioXw4vlTtzyKEz8hpK7XBLJ99DYkTJfWbVhfPCRyGCh6i8WTJ08gNHQDnD171kjUwpc3bx6oWrUKxI8f3+Ip3fInBLzlhD+VhfJCCBAChEB0IUBtZ3QhHXveQ5zQ6opwiD2cja6cEic0pGNkRf3W2k1wpk8fePHssVJ9x02QBHIO+hHS1KykFE81MJHDQMwbLJ49ewbz5y+AW7duGwm68WXIkAEaN24I8eLFcxOKHsU0At5wIqbzTu8nBAgBQiCmEKC2M6aQ99/3Eie0uiEc/JejMZUz4oSGfLQL6hfH/AmXx/weUe8v2DWOMgcyd/8Ysr7XUTmepxGIHAZS3mCxbl0onDwZZiTmga9o0SJQoUJ5D0JSkJhCwBtOxFSe6b2EACFACMQ0AtR2xnQN+N/7iRNanRAO/sfNmM4RcUKrgWhVfb8asgTOD+wbUfcopGsuSVBBSN20ASTJlxuSFsjLbz44GgYPj5+GW/MXw6MLx0VQ/Zr9676QsVVD/bcvPb4kx4vnz+ERW1FOnCY1xImFK8V2scB96HPmzHWolldeScLV23HlHN3Vq9dg/foN8OjRIz0crqa3a9cGkiaNPpsE+svJ4xECdjnhUeIKgV789x8c+XMmYCNWoF1riJcooULsqAsaxtqs6wcOQrHu78ArGdJH3Yt8kPL9S5fg4Ng/IGP5spC7Xh0fpEhJEAKEgCsE/KXtdJU/q/v3L1+Bg2MmQIYypSFPw7pWQeieFwjY4cS1fQcA/9IWLACZy5by4u1RE9VO32wHh6jJPcDjf+5A/CRJ/GZMEVXl9Pd0fcWJZw8fwbOHD7kc5u9ltspf3LhxIc7Zs+e41Jw5c0arMD659+BYGBxt1w7YqJqlJ4R0bTU98/ufQNZ3O1i/hw3Gzw3/A65NGQ/w33MjDMt4wdmz4ZVXcxn3fOTzlhzPHj2Gk3Pmw9FJk+F2+Ak9V+nyF4P0JUtC8Q+6WQ7gz63fCAeGjdTDu/IUfLsr5K7/Ojy6/Q+seaubq2CW9+tOn8waoMSWz6xu2sVi8+YtcOjQYT1JFNJbtGgBeJUdGplDgf7xY2MbRLlyZaF48WJyMPL7EQJ2OeHrIjxn9g8mF8zPk22/dSckSR/19isiK8ODK1dhVqUKPFjBTm9Dhe+/iixKjD7f0OsLCFsYwvPQae8BSJg8WYzmh15OCAQyAv7SdqpgvKnPt3B87nQepcPOPZA4dSqV6BQ2EgTscGLn4CFwYPxIyN+6EwQP7BfJG6L/sZ2+2Q4OWLK1730AD6/fgHzt20K+lk1dFvbsyjVwaOwESF+6FJT7qrdTuIubt8KRP6bC9b274eHdG/x5mlwF2CR2OSj12ceQKGUKpziqN1TH+Jj+A7agFdr9Q8tXJc2aBdKXKglZK5aD1PledQqzd/houMhsRJld3IQJIV2xopChZHHIVjkY4pvG5ebwMfXbLidEfk8tXg57Bv8Ed6+E81vxEyaGAp3ehBI9u0MCBQPWiOOe33/maWQsWlYkD4hj6tfyQdoihblMFpU4RougfuytT+D+7o2sgI5CuihxZOrsd/cchBNvvRkh6GuxUpStDvnGaeCJdHxx9YYc2GCs7NQVboYZQqo5T4mSpoQaY8ZClgpGhWOYYzPnwOZvPzcHd/pdhnWcRd/pCjjTPbtyRafn7m6oDsbtYjFrVgjcuXNHz0qNGtXh1Vc1bQn9ZoTn6NFj8PffyA3NZcqUCZo0aSR+0tXPELDLiZ2DfoHwFSsgf5cuULhrJ69LZWcw4PVLI0ngv6dPISS4Kjy4dQWq/vw75G3m3zw+MnUGbO3/DaTMlhtarFsFcdgEKDlCgBCIGgTstp1RkxvPUj024y/Y/F0fSJEpB7TcsMZrzcAzK1bD7kGD2eC2GFQfPsSzTARwKDucsCuoL23ZHh7euAaVf/0FMjIhLaqcnb7ZDg6Y/2nFSsDjB9pYs+WaUEiZM4dlsQ4xIXw70+jNUqYy1Js5xSHMgfGTYOfg/znck38kTZMJak+dBGnzvybfVvarjvHxBXfOhsOcWtUjfVflwUMhX4smDuFCe34Kp5cvcLhn/pEuXxGoO2MqJEqV0vwoxn/b5QRmfP/o8bBryI+WZUhfsCTUnz3N40VLT3DESZ060yZB0oyaxrDli724GeWCOlp431/ndUNG58J6HKcsZ+7xKWR9p73TfXEj/KdRcH3GH+In395edO0aSJjGtzO8dsnxAo2nvd5QX0XP36YzBNWpxVSTSsMddizZxb83w4kZM+DOhdO8DI2Zimz6IoX08oiPGDvEykNdd2ApcmTnK/L4vqtsFUx2/167DqEfdee3ak+YAgllFXIGOTbOKoNxu1hMnDgJ0JiccJ06dWCr6a+Inw7XBw8ewLRpM/R7iRMnhi5dvBfk9ATJ41ME7HJiXY+P4czKRWySqQeU6dPL6zzZGQx4/VIPEkBh/cnd+5A4bWoPQsd8kIesfU6cOiXEoRMXYr4yKAcBjYDdtjOmQfFlG3F0+mzY8v2XgBqGTZbMj+mixfj77XDCrqAuhNo6k/6E7GwlNaqcnb7ZDg6Yf1Em9KPQ2XjRXMu+zJWgfn3/QVgUIeDmqF4PSn/Riwv7qC6Nq+ybPu/NJwJQEGu2eim+xrZTHePji2RBvf602bpwifk7tzYUzixcwBcGMGztcZMgqEZV9HInBMzc9ZpB4bffiLgLcPvkKTi9cBFc3Lqe30ubtxA0XbFYf+4vHrucuHH4KCxs0oAXI3e9plDik57wSvr0cHLeQtj2v2/5/SJvvQ9lv/zMo6IKHDOVrAjlvtO0JHHL5a0jx+D8mnUQHrqcpxNU9XWoPXG0R2mqBPqPaZZHuaB+cew0uDz6Nz1fiTLnhTRspenyqKH6PeFxK6yzzB6o2xqeXDsrgkPmD3pB1reZSr0PnV1y7BsxBnb/9hPPSZneX0PR995yyhWqxs5hq8vPnjyCnLUaQM0xw/Uw4iPGxqbJsoX6fRXP/cuX2Sq71gB33LMfEqVIrhLdKaxdLMaOZVsVJPfee+9Iv5y9quGdU6A70YWAXU6oCuo4EeVOeLQzGHDAiDW0uJdO1XZEZPlyeIevftjMK04axE2QwOtcxEiZvc41JUAI+BcCdttOuRSq36JqeHyX7XbDg3aKBHW5NgHscMJKUMe+DJ27hRgh1HoqqKONJZ5eHOeFNXe8stM328EByyvKhH50xdlW2lKf9tR+SP9dCeq7fh4K+8cOB1fC6s2jx2FBo3o8pebLVlqqmEuvceu1M8aXBfUuh45C/MSJHN6BdbSoUXO4ceIg5G3SGqoOGaQ/FwJm0bffhzJfOAulqIq/+u0uPHybjZshWebMelx/8NjlxNpuPeHsmqWAmhCtNq53sDWwte8AODJtIi+epzJSZDgeYhNf2wd8D6ha3/nAAbfjVju4RovV98NNu8DDs4YqeFYmxGbu0Awujp8Bl0f+6pRvd8L6pckhcClCGMaISXIXgULzJjml4c0NW+RgHdSMUuX43hazAG7Oy2n2sYd+qK16yx+HnY/YnPbLKqjvGzkWwv76C/KwffC4/0R2yzu8AQ8uXoCSbGYU9/YLh7YEFjVowkwfPIMGc0Lg+Oy/vE7j9okTsOUrpkqcKw/UnjRWvIpfd/86HM4sWgjZa9eBcl87bnFAIfbW4UNQtu/3EFStskM8f/uh+n0cmzUXDo0ZDQ+uXeITVNiYJc2QhRer6bLFDvujru3dD6cWLoHLGzdyzZTUOfJB5sqVIU+ThpChRDEHKNwNBoR6JUao+ONAh20muFctfOVqOL92FTx/+hgyl6sEWatVhQLtWzsJtcvadoZ/r1yCaqNGwMUNG+Hs4iW8Q8QyZC7DZlf7feukareq67tw98xpqMK0YkSecWAVvlybdXUohPSjcLfukL9tC+kOgKd5xX1sy9u0h7jx4kO9WdNga98f4NKmDXwloNnSFZCG7aOycuf/3gTbv+sL6YqXhGpSu4phsS3ZOfAXuL57J9y7dgGSpEgH6YoUZd9RL0hXqIBVcnSPECAE3CCg2naKpC5v2wn7R46GGwf28m8ate4ylCvH9tr2sdTcwW1xJ0LmwaUNf8PVAzsAt9tlKFkG8rZq4dAHYvp6GzdiGIQtWAzn2fYk3NNZ7pv+UPiNjnBh0xbY9s13TqrqO3/6FcKXLYPC73WDtIULwM4Bg5kRzV08y1mDq0PQ67Ud9gzfPHYcQrv1YLZ1buqqyrjlBl0wO3Y3c/ky3P+y/bPDCVlQz920MRxk3Li8cwuHLn3R0mx7WUcH46BoZ+DK9q26Nie25QlTpGD7a/NDzbEjeLzw1etYHQ6EjGXLQ4HO7WHPkN/g8vZN/Bn2kRUH9IVkWbLAibkL4dCo0bx/Frwq9/3XDv2gu77ZVf3awQHTEoJ6lnJV4NL2v3nyjVke0xcr4vAqV4I6jg8xnrv9/mgg9sn9+5CtaiVIEZTdIV2VH3bG+JEJ6vh+sT0FBdO2OzQe4P3IBEwMI/CzUp3H5zHpvOWEVZnQttfs4Ip8LIoaCp60O5Hh+M+p0zD39Vocqqiw4xEtgvru0sEOZ6bnHTceUpUtwQulKqzf3rwTTvUwBLG4CV+Bkju0j9NXhLJDDll9pgHba56pTEm32cGOFF3S9On02Rc7H7H5JTEtqIczFf9t23bAP//8Y86a0u/UqVNDeWbEI8jDRvHUkhWw/uP3wayeJO/jz1WnEdQY9buej0tbd8DyTm35rBs2br5IA20UzIiwPWCerQupWIULPNi5ddy3h019a7PUOGEwhQ100LVmwmDyrFn1PPqjR/X7cLf/S7aZcGXXHljatqXLIjeYxb6r0sZ35WowgMLt2h7v8nQqfPcDFGSDDuEOTZ4G23/4Tvx0uOZt3Aqq/jrY4d6MkmX55Js8CJADoMDemgm7SdKl0W//Vak6H+i+zt6VrVJFfn8jm5w8MX+mHsbKY9bCUcnrvfMXIKR6FZ5shsKl4dohbcCMN5ouWe5yb92pRctg/acfAMZptEAzKodxbhw6Aktbt+SdGf5OniEb5y760ZlV7LS79J8QIATcIaDadmJax2fPhU1f9+bJYnuTKFkqXdUV+5KmK5Y5rIThKTOLGjZx+F7lPJlVPkUbh/s2rx9h/VKEK/d1P25LRPSLZlX19R/1hlNL5zIV6lpwfuMaEc3hWrrXl/z0C7wpj5EcArEfuE3P3yeozXn21W87nBCCOtbZ7bAjejst56n68DG6sI6Tx1Z1JNep4BlOnjy5e1c3qCbSxD4gT8tWsM9CExYF/2YrlzGDrul4cFd9s0jL6moHB0xHCJrY3+4fNgqu7NnC+6vma1Y6LAK4EtRxwunAuBF8IrrR4vlROv6yM8b3RFA/MWcBbPziUz4h13H/Xh3eyARMDCi+/4r9foQCHdrocf3BY4cTTx/8C1OLFebZb7t5m+We8SXN2/IJzOD+gyF/+1aRFjUyHAX+OIHaalNopOmpBogWq++72GAXItRyMIPF2UpPfMninoqw/vTGLdhfq45RzrjxofSebcZvH/jskCNswRLY8JlmmbHzvoNKFgVFlsVHjCo4Def/JW47Xd1ZbY9pQX369BlMleuBU57t3EjBZnzxqDZPHM6STY+YHJGFP1zN3fyNMcjpfOiQrhqGK9zY6bzWqiNU+rE/t6LvbRqY1/m1G8CtM0cdhJm7587DX9LeoabMGmXaAq/xouFqybKObbixnqj4yD3BTyWMne8D03en+n4n/BwsqFeXDzhw4FeaqWmlYVZMb504CbuYETocZOAgtdmKlfqMttVgAFcF1nR/mxcn+IefHVaoz2/YBKve6syflfr4c9ZAt2FqZIkhnBmgQWEVXZnPv4Gi777J/fhPdGLoxzgFOrXn6vKnFy+DnYN+4Pk1D3ytBHWMb+XEJAbOhDdbvUK3LKuaV1lQx/dU7DsQstesColTptL2tFmoLmI4V4K6sAaPe7JqTRjNt9A8uXcfNn72BZxdu8ylmiCmSY4QIASsEbDTds4qW5EL5oW7duN7KlEVGfv5ZS3acGEc7wsNLVRZX9ykJdf6wTalym9D+fFd9y5dhuMzZsOBCaN4xmT7OHIbV6B9VyjQsS0kDwpi/WQciMesGkcmqGOC/F2//8a0jErC/StX4MCo8XAs5E/+LnnCEm+Q6juHRf9nhxNCUMdE0Ap15aE/QQpmAfz8pq2w95df4cax/dxAaMv1jhMoQqi1Un0Xgjqmidsva4wbyYXW88y20rru7+qTAbiPO3hgf67Jgc+E6nTlQb/qGhRWfTOm687ZwQHTE2WqxwyjpsiVE+bWrMHzal4hdyWoyxNIOMbI16o95GxQDzIUL8L57y7Pqs/sjPE9EdTFREym4uWZdugMPVuRCZg4Ib+waUMe3tPVZT3xaPDY4YRcpjdPhOnjfTm7Gz7tA2GL/oJCnd+B8t99KT+y9Asc8zVrB2UiTgx48ew5/HvjBlwI3QD7RvzGOWf1XVkmqHgzWgR184p60VUrIGEGbeZN5NdTYf3JtRtwoE5dEY2tRieCUrs267994bFDDjHgxlnH1lscV/hxL49YQTfnL0natPqsn/iIzWHMv91NBMS0oG7ea27Ou+rvyPa2y+ktbNiMd1Dy7Dwe3YGCBa60o/DciM08ZihelEdDC6hX9m2DmiPHQc4ItRVfpCE6UXkAJaxri3zIKw27h46AfWwLSMEOb0KFft/IRfJLv53vAwviTlAXFjpxRrLZqmX6N4Hxnv37EObXqc9XqWXczIOB6wcPwep3u2IUqPrLMMgb0QHxG+zfCnYsBxpPscL56LRZsKXvV06DGzGIRWMsZgvFaMUeB75mK7KeCupCowPzJ0/c4G/VvMqCeqUfh7DJp2aYTKTOlaAutD/Mkx04W31pyza+r182WhPpiygAIUAIKO9Hxi0ts4LLc+Rarl0PKXME6Sjev3QJbhw8wlcxhQVvPFt7ccQRVU3Y4kG6wgX18OgRA/qCHd+CCn2/5s9EG5enYUun7S8YwBNBvQnTzEkXcVQmxsH90svbvwGXd22CXK83hhojDRtFJKgjQoaz05+KMQYKlu2273A4VvM2m9yeF7HFr8uBww59qRBqrQQKWVA3H3cqjujD95n34Ip+vUC7N6Di/zRtNXPf7MnRqXZwQBRFmVBQz1KxvD75jM/ksaArQR3D4WT9mm7ONqVwEiRvyxaQq2Fdr+094XvsjPFlQR3bAFxcQPff0ydwYcNmOD1/IR/H4r3qw0Y7bG0RAiZOWhT/qAcG4e5u+Hk4s2QZMyg3j29DQS2KlqGrdS1PES6mr3Y4IcY0VrKYKI9YpMtaoRrU/fMPcdvlVeDoKgB+FzWYXa6oMtCIhuui3JjcvqoN4Nmdq3oZcw8ZCmlqOu/D9URYv7H6bzjb+1M9rfipMkPx9b61VmiHHAfG/QE7f/rBcqVJbjj1jEd4ZIFC/oix4l25Drt2OzS+criXWVAXH1/hN7vzczLRyMbUIkW4qmDpr77iGg8lmPHBkh/3ALkjkSc+fJGGWCGXjQKKAVJ9dtTNMqZqI898Lm3dkatr1Zk4FbKzPVD+7ux8H1gm0aFbWX0XEyolen4GJT963wmCPb+Pgr3Df4GcNevre+rkOkSBUmhO1BgxFnLVre2UhujQ5ckaEUjesiAPbsQg1mqvE57Ruf6THoD76JuvXSGSAk8EdZy4W1C3Hu8krY5xU82rLKir7JESnZpZ9X31W93h3IaVXJWu7LffQ446NX0yUNFBIg8h8BIioNx2sgHalEKF+GoN9iclv+gNWSuUc7KlIaA8PGU6t2os9y/imauraOPkCWs5bGSCOmrdNAiZJkfhfmGLx6wOSoK6I1TKnGDRhaCeuXQlqD9rqkOCOEnyR768/J7ZPonoV9wJ6ub+DBM6OH4y7BjcH3ALWL3pk/GW7sSYKU+DFlAt4qxpuW82C/16RJPHDg6YhCiTENTxnhhr4NaQlqHrIHGa1OBOUMc4D5m2LtpoCJsdwhd18J5wOB6v88cUj/YzizhWVztjfFlQt0pT3LNaHY5MwMS4KNDWnDhe1/AU6fnD1Q4njv81HzZ9ySz34+SDSaNElEkY/nbVdolw4irjiO2ZcP/euqprmuC9Up/0geI93hOPfXaNlj3qR7t8CA/2GwYOMrzRDYI+1tRTzSWJTFgP/2UMXJ82QY+WrFRlyD/R2Xq8HsCGxw45hMotftBdjhxxeOu9CxdgTVfHysPVXXTyIF18xLKA55CQBz9eZkEdDZEtZiuJAr8ru/fC0jYtuJGQUp99DNPLltKPhLmyk+2HbteSq401nDdLR9YXaaD64Z/FivEPuPP+Q0x9KgFMYmruYsAyr2ZdbohFfoYZ6MJWR9wwYaiIAAAU6klEQVRta9AzGcMeO98HZll0nlaCulDvdDVYFPvOZWMp8mBAhkQWtMV92VaBuOfq2pAZYRIrVGIQa2V3QqinmzuEyAR1zPeSpq24emr+Nl0gmFkLlZ2dvMqC+lthp+Xk3PpdCeo4ubi0VWvd6BMmgtogQfXrMdXAppA8Wza36dJDQoAQcEbATtspvlE5NRxg5m7SCHI3rKdvl8Hn69n2nFNL5lhqDcnxZb/explsgIgwkQnqVhpKGFc2sCS3ySSoC2S1qx1OCEHdrN4tUp5SsCAff5g1tYRQ605QtxL+DzIbAjsG9QNUe681fqR4Db/uHT4a9jAB3Z8E9cd37sLc6rX4PntxZFZkgrpcKIx/lY0nw5khVtm2TAtm/yZVntxyUCW/nTG+LKjj+Cdu/Pj8nU8e3NP7Z6vxCQYSAibKJUlSGVrMaBwWHWoD1pky3ucq/jxxH/yz822IsT2+3tVYaNOX38Hxv6a5NSAoZ1/gaGU9H7fdHmKTOPtHa/avqg4Zzqzva0fDyWl4448XL17Ur6hfnDATLo8YouczfvL0UHTtQojL9j9ZOVfCeqZ3e7Jz1GfC8/s39GhZPvwMsrzZVv/tC48dctw+GQbz6ml75zts321piVXkDVXhpxYtyhtSef+WnY9YpCmuL7OgLlbQ8eg7FIIPjJ3A9qD/pu8VF2rtHXfthSN/zuCdC+47Lv5BNwEf+CINTGw1m5g5t3E1m4WdCvESJORG6wq98R6U/6aPPhtea8xESJQqJTegZtU56pnyM4+d7wOL4E5QFwMIWVVNLrY4RoQb4oswluJKULc6deGf02dgLlsVRofqbFZH2OA2CFyJKtf3G11l1N0g1q6gLjoJNATUcO5Mp9UxO3n1taCOOKFRqpPM2u3p+fP5lhK8J5ysCSTu0ZUQIATcI2C37cQ24WTIXDi7ZCnfAiTegu1hvdmzdGORYkBptbom4piv7to4DBuZoG620SHSl9ukjrv36RMKJKgLhLSrHU6QoG5gKMYO8oo6Pr20ZTss79yOB8TtYE/v3YPtA/s6bVUzUrL2iQUffFq2z3dQ5J03rAN6cNfOGF8W1OXj2f69dh1mVizH31qs+0dQutdHTjkQ7YFZwNzabyAbA0/gdn/abN4CiVOncorrDzfsfBuPbt6G6eVK8ey70i5c3q4LXNq5kWne9oXCb3aOtKiucJQjin3vaGOpzqRx8iOf+KNc9f3fk2fgSCtHy3ppm3eAXN994rIAroR1tvuJxdGsZWPkQvPmsiPaDFUElwkqPLBDjuePn8DkQvn5W4p1+xBKsxVcV07eRyarBdn5iM3veJkFdcRCnJ+IEyC7Bw7mq5ZiNl+oT+M+nmNTp/P9c1b7+HyRhtjzjKvHL/57AQcnjgLRkVzesZurv+dv0xmSZs4Eu9mxWN52AGYeROVvO98H5sedoC72ZJstn4tyHBg7EXb+PACyB9dgM8CaRo0sqFf7bRSbaY4H6z7QNFfkveyYhqwO2Hz5Kkj9qqYaKNJ3dXU3iLUjqAu1LBxgN1u10tIiqZ28yoNiV7PIVmUUq3Vm1XdzWLQTgPv4DgwfqasFvnH4mMP5pOY49JsQIAQcEbDbdsqpoIruyXkL4MDI4XxFTVZzFyufKhO/7to4fG9kgrqVOjTGE/t+ZS0ovE+COqJgODucIEHdwM+VoI4htvX/EQ5PHc8FUjQSd2T6Hw6COo4hLjAjs+iys2Nx4yZIwP3mf4sat+QnIlidDGMO6+63nTG+K0Ed3yPOgEd/W2ZIMGmmjOjVnSsBUz6izJVGjJ5IDHrsfBuYXaFRIi+EimLgQun0UmV424kLadmrRL7d1BWOIk28CjtU5vZODmPXHy171Hkh2r4H/x7bbeSTWSHON2EipCilGfYyHhi+i2OnweXRhhES7QkK6ujiQNJC5aDAdEc1HO2Zd//tkmPXkN+5+gOqmTRfvdrymAckyRomvOFqq7lC7XzE5pK+7IK6EITyNW8PJ+Yx4yJMtafezCkcJmENElW08FgZvjorHZMmsPRFGrjdIaRaFcBV02cP7sO9y+eg0/79vCMQGhV4zE7KPK/ymT0V4VHkM6audr8PIai/1qIDVBr8P4fsC4N6iFej+bO5sTIRAPFazI7TwKODivf4lO0D0iy0y4K6mPASR61gXLN63+KmrfmxZWYhHsOiJgWqTKHLVLqEfmSiu0GsqqAu+IfvQFsFmdlWDFdONa++FNRx68bNYyfY6YFxtPPSJYvxcvvSZOFSOk/dVQXSfULAAgHVthMH1PcuXOSnyKTKldMhRXEkkLzdTqh98jHIylWQPLvjFhWcrL68cRME1X0dirzdhafnro3DAJEJ6viuFmvXsSPiMjnkTxzfFlS5NtSeNFZ/JgR1d8ae9MAvgUeVEwiJt4J6tV9HQJ7G9R3QFcbkrCZ5xARQbFF9FwXDMcLCeo35VkNxTx4T4vNpxYtz7VZ5G6oIK67CuKr5uFfx3NOrnTG+O0H9KTthaXZwJS50Wk0iuBMw940cC7uHasfRmg1VelqeqA5n59vAPAlD0fLxgyKv8rG3VpMbIpx8dYejCBfasxecXj7fcnuICGP3Gi171DFzD8+ehyMtWrEB8TM9r3HixYcMXd6B7B8wS83syBErd3EcW/nUz200hPQ48RJAYXbmYaIsjp2DVRqq9+ySAy0iz64YzD8a7LxqjBmnWQGMGOji3tNNn3/FLU9jnsxnEdv5iM1lkwfS5nO8zWE9+a2KRUxafcfyyFZy8beDagszzDOteEleP/jMypI33vdFGpiOaNzRbx6siFV7fIZnkLbfswO93KFV7U2ffQ5pChWBmmOG6ULrgytXYe27uEL/nFmYHKFPBOHEwv6hQyFHg4YOx/SsYWHvnzsHFQcNhEwRR9eJd3hzVeWEeNdmdi7vsdlT+ARV0xVLuYEX8UwWYnGGt0yfXtxgIq7k7mDW1Y/OmMSDyhoQVoI6rkbj6vyl7X/zWfTm0mBV7FHDhFCrAg3OoQr8s4eP2Mz7QL5nCSdv2u3cru/ZcjeIVRHUccC94PX6/Jilsl98rw+SRfnNV9W8Riaou+KU1Yr643/uwDQ2WYGu+PufQKlPe+rZE8cdYvvWcd8+HSc9AHkIAULAJQKqbef5jZuZpfZOPL26zFBc1uAK3I+Tl39//jU/Ykhe0ca2bF7NOvzYNrQpUYdZNMZVNpyIPL1sFTd+iQkI7S70u2vj8HlkgjqGQbswdaZM5BboMW+H/5wJ2yNsb8jHdmHY00yjKbSntt1Mbs/x2cvoVDmBGNkV1OdUqwV3LpwG3B5W7fchDhpRgSioI1Y3jx2HBcyWg3CyoI73tnzbH47OnMwXboIH/+RgiBbHH5u/7QdhC0N49NahfztNfol0PbnaGeO7E9TxnfJYwXz6gjsBE9uKkOAqDvv49TKwsfK6Dz6BG3v3QIUf2VGvEUaO8Si7TeyI1sTp0kHtP8bqNpVwkfL03LlQ5P33+fGOejpeeux8G/hKoc2D/nLf9IdCndrxcfSt4ydgWes2XAbIVacR1Bil7SvHcO6cKxyxrcOjLw9P+pNvJcA0ouI8+mhbUccCXJm1CC4M6o9eB5ckqACkbtoAkryWB5IWeJWdO/AcHhw7BQ+Pn4ZbbI/kowvHpPCa2nt29nFlbOE4IygF8sprlxz40iu79sC6d7tx8uNvHNCmyVcY7p07q9/D+1bqJuIjxufunLuB/ssuqCNuwlgb+luwc7XllQixjwSfudtn64s0tn7/A1e1wncF9x/Mzu02tn+I1RB8hmczVmZq3cIJC/H4WzZsJiyv4n3ZuqQ8IYD773Hf+4VNW2DlGx0xKJuQaMqOFvuV+33xz+73EcaOC9rw2Yd6FtC4XtOVSyFB0lf4PWFFXQRAI204qBCu2tCRkKeR0eFaCeoYFg3BLKzXkA9W0YJtYzahF/+VJDyZjb2/1o3D4LeZImsuuHvxDJ9RxwBmi/HuBrEqgvrmr77XzxVOm7cQz4v5X/7OnR04opLXyAR1V5yyEtQxXzJ3cSIpfbES8ODyJbgZdphnu9h7PaF070/MRaDfhAAh4AYB5baTDZjn12mobzfBNjFlrjxw8/BBPumHrzLb9bgTfg4WN26iT0hjnAfXLultnHkPpbs2DtOPTFAXR45iWGzTZUvI5r4Nw9y7eBFCqhqn/qBmYRV2BnuWCmXx8UvnlDnBELIrqJv7lKzB1XXjcIEqqCOhxPHJ6DcL6njM4Yr2nfWxBk7Wp8z1Kjx/9FDv7zCelRFcvK/i7IzxIxPUUQMuJLgqbw/MVsxdCZgiz2LiHX/LWn7ywomMl8yfWqMnQI7a7Mx6JvBPKVKQJ8m1VCNsCIl3eHO1823w97F2c827H0B46HL+E8cwr2TIqNcn5rPpimVMCyizR9kTOEYWGMfalX/+kR2hlyiyoMrPo3yPupyjC6OmwpVxw+RbHvpxNV0T0jN3/xiyvqcJIR5GVgpmmxwRb8HVs81ffgtn1yx1ei92ZBUGDYBslSo6PZM/GqeH0g13+5lli9Gd9h5wOF9TSsJjryoWMb2ijgXbPvBnNss4mh87YT7T/vTSlRD6UXdefndG/3yRhjCAhi9rs3GLg2rgw+s3YUaFMjwfZuEwjE1Obej9ET9eotnKZfqsNxosXNqyFTx/+hgazJmnn1u7+9dh3GievH8bVaLm16nHhVVz+vylXvxT5YT+KtZ47kH7AFMm65NWspEhDBfG1KlPzgrh2wFEPOwoXm3b2smSJnZQaE0fnbku5SMR5fOBcQZ0z7BRcGHNWm6/QLwDVaTKfP2l02BRWKOXJ0xEHLHaZT7ORqxayCtgQi1KxLW6mlevVfIqD36t9qi74pQrQR1YXR1hq2L7fvtNryvMM7ZfJXp9CnmbNbIqAt0jBAgBNwjYaTtxVW/7gMEQNj9EF7bxFdhmBbOxhPmsdHyGA+2DzJjqxb9DdYEdB6v52rdnGjIfOhjTdNfGYVrimDXcltR40Ry8xZ1QbS/5UW9IHhQEO/r109sKbCeC6teHMr0/ttz3i2keGDZCHziLQb9I+2W62uGE2JuMdm6CB/R1gkvs0TUfz4YGQncN/hVOLZ7LuYSTLM1Wa+NUsXggC2YiYbFqa2WoVahQy6rX7vpmkab5agcHTENMNNX7c5ZT/y3egZp2y/AYXGYwVtZAEc/FyvnZ5YscvjF8jhNJZb791mGRQMRTvdoZ4989dx7+qlGVv8qVXRgcN23opWm+ydoyYtzhapIBNW3mVK3FDVTKdmpw7LGgXhM+QYhH3+Zv24K/X4xpEZOmbGwqjNCte/8jOLNqMRTu2k3X6lTFxiq8XU5gWmg3bFu/AfoCiUgf280qQ3/22E4RxhM4ijTkK7Z1adkpT1mrVIbX2ClTUeHiMs3PaBXUsRB4Fnr4l1/Ci2ePlcoUN0ESyDnoR3YGeyWleKqBvSGH/C5sHO6cOQt3Tp+FhClTMmLk0Ykth/NnvyoW/iCo+zOenuYNGxk81o1tFHaIgg0oujgRR3SIhziraT7aDfn3Hwsfz8XpCiKu6lWVE+b0MV/YMaLauVjpNofBjv4u25uZku2xNJfVHNbu78d378HDmzchWaZMTtjZTTOq4vkir1acQmF8a7+vQe6kzWV4cu8+/Hv9uoZThGaCOQz9JgQIgcgR8LbtREvPTx8+ZJpAWTxrF9mEm7bHPZnPxx6yoF6ipzb5jQsFbG8WJMuSJXIwWAicUMY+TmhVeRQpwAJ5ywk7cOA44gnrg3Hlz9fjAzv5wTgxgYNTXtn3cocJxndOnWFjgiSQtmB+/bQCERb7Q1yFV3EJUyT3ePVWJd2oDvvs0WOn1WHUZMQj4swn51iNQb3Nny84gW3mrRMnmYbEY0gRlA1S52Na27HQRbugjhg9vngZTvXq62hgzg14r+QvBXmG9IVEWT1TVXCTVKSPfEGOSF8SSwKoYjF9+kzW4N73SemSJ08O7du39UlalIjvEFDlhO/eTCn5EgHswELZTDiuMriy1+DL91FahMDLjkAgtZ1WgvrLXr92yh9InLBTfhEntuAgNA9Evj25Zq1QDeoyexHk1BCILZxQK5V66GgzJucqa7c37YC77O8BO/f66ZWL8OzeLR40fvK0kCBTFkhavAikCC4DqStF3/4lIodRW6pYhIeHw/btO+H27dtGIjZ8adOmgbJly0JQUHYbsSlKVCKgyomozAulbQ8B2aATpvAyq57aQ5BiEQLqCARS20mCunr9W8UIJE5Ylc/Te7EFB1T/3j1gkKfF4uEyVawAFfp9oxSHAvuJloWfVESMrKj7SdktsxFbGgzLzPv4JmHhY0ADIDniROyvRNwjuvGzTyBlUB4o+PZbkK9l09hfKCoBIeDnCARS24l2XMKXLPa5pWc/r0KfZy+QOOENOISDN+gFZlzihFav0Wr1PbZQichh1BRhYWBBPg0B4gQxgRAgBAgBdQSo7VTHLNBjECe0GiYcAp3p6uUjTmiYxbjqu3rVRX0MIoeBMWFhYEE+DQHiBDGBECAECAF1BKjtVMcs0GMQJ7QaJhwCnenq5SNOaJiRoG7BHSKHAQphYWBBPg0B4gQxgRAgBAgBdQSo7VTHLNBjECe0GiYcAp3p6uUjTmiYxcjxbOrVFb0xiBwG3oSFgQX5NASIE8QEQoAQIATUEaC2Ux2zQI9BnNBqmHAIdKarl484oWFGe9QtuEPkMEAhLAwsyKchQJwgJhAChAAhoI4AtZ3qmAV6DOKEVsOEQ6AzXb18xAkNM1J9t+AOkcMAhbAwsCCfhgBxgphACBAChIA6AtR2qmMW6DGIE1oNEw6BznT18hEnNMxIULfgDpHDAIWwMLAgn4YAcYKYQAgQAoSAOgLUdqpjFugxiBNaDRMOgc509fIRJzTMSFC34A6RwwCFsDCwIJ+GAHGCmEAIEAKEgDoC1HaqYxboMYgTWg0TDoHOdPXyESc0zGiPugV3iBwGKISFgQX5NASIE8QEQoAQIATUEaC2Ux2zQI9BnNBqmHAIdKarl484oWFGK+oW3CFyGKAQFgYW5NMQIE4QEwgBQoAQUEeA2k51zAI9BnFCq2HCIdCZrl4+4oSGGQnqFtwhchigEBYGFuTTECBOEBMIAUKAEFBHgNpOdcwCPQZxQqthwiHQma5ePuKEhhmqvv8fAAD//4Ko3RgAAEAASURBVOxdBbwVRRc/IOlHKS1KKCFI2ICUpICClHRKd0mHonSndEp3d4mUAQoKKqiESTcIKO+b/6yzO/e+ve+93Xvfe/ddzvDj3dnZyf+enZ0zJybe6dNnw0iELFmews9DHy5fviYxePzxlIwFY/HQ04A3APx+eCPC14wAI8AIRI4Az52RY/Sw5WCaMJ444/CwUX7k42WasDCKx4y6BQZiTBwWHoyFhQXHDASYJpgSGAFGgBFwjgDPnc4xC/USTBPGE2YcQp3SnY+PacLALCwsjJhR96IfJg4LEMbCwoJjBgJME0wJjAAjwAg4R4DnTueYhXoJpgnjCTMOoU7pzsfHNGFgxoy6De0wcVigMBYWFhwzEGCaYEpgBBgBRsA5Ajx3Oscs1EswTRhPmHEIdUp3Pj6mCQOzBw8esETdm3yYOCxEGAsLC44ZCDBNMCUwAowAI+AcAZ47nWMW6iWYJownzDiEOqU7Hx/ThIUZq75bWMgYE4cFCGNhYcExAwGmCaYERoARYAScI8Bzp3PMQr0E04TxhBmHUKd05+NjmrAwY0bdwkLGmDgsQBgLCwuOGQgwTTAlMAKMACPgHAGeO51jFuolmCaMJ8w4hDqlOx8f04SFGTPqFhYyxsRhAcJYWFhwzECAaYIpgRFgBBgB5wjw3Okcs1AvwTRhPGHGIdQp3fn4mCYszJhRt7CQMSYOCxDGwsKCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYMbO5Gxoh4nDAoWxsLDgmIEA0wRTAiPACDACzhHgudM5ZqFegmnCeMKMQ6hTuvPxMU1YmLFE3cJCxpg4LEAYCwsLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEhRkz6hYWMsbEYQHCWFhYcMxAgGmCKYERYAQYAecI8NzpHLNQL8E0YTxhxiHUKd35+JgmDMzCwsL8O0d92ufjaOcP6+jcxd/p33//df4korHEI488QunTZKJSz1aiFgU7RrklJg4LqkBhcePGTTp58iSdPn2aLl68JBtImzYtZcmSmXLkyEHJkyezGuVYUCMQKJoI6kFy5xgBRoARCDACPHcGGNAQqI5pwniIjEMIEHOAh8A0YQDq2kb9zJVT1GNtc/rj3NkAP5roqe6J9JlpWOXplOWxbJE2wMRhQeQvFvfu3aNt23bQqVOnrEptYjlyZKfSpUtRggQJbO5yUjAh4C9NBNNYuC+MACPACMQUAjx3xhTScacdpgnjWTEOcYdmY6qnTBMW0q5U32vPLRNnmHQ1VDDrixttV5c+f5k4LGj8weKff/6hZcuW06VLl60KI4ilT5+eqlWrQtCE4BC8CPhDE8E7Ku4ZI8AIMALRiwDPndGLb1ysnWnCeGqMQ1yk3ujtM9OEga8r1Xeou8//dFL0PqFoqr1+ibaRqsEzcVjg+4PFtm3b6ccfT1iVRSH2/PMFqGjRIlHIyVliCwF/aCK2+sztMgKMACMQ2wjw3BnbTyD42meaMJ4J4xB8tBnbPWKaMJ6AK0bdlzQdktCS+d+i/E+8RE+mzEo3712nH88fo/VHltC1axFLVVOkSEXFcpWnF58sSL9cOkG7ftwQqcQ+UaJE9ErOElQkWym6ff8W7Tyxgb4/dYTCwh74pK+oSNUDSRywLbh27RqlTJmS4seP77NfwXrDLRawQ1+8eInHsB59NCmVKlWSMmTIINP//PMv2rFjJ/39999mPtBQgwb1KFkytlk3QQmyiFuaCPQw8G6tX79RvO9hVLFieUqYMGGgm3BV386du8QG1UmqVesdevzxx1zVwYUYAUYg9BAIlrkzqsjG1hwbW+1GFZdA5otrNBHIset1BQoHtZ5MkiSJXn1QxR8m+vYH+IeJJiLDybHqe8lRucM5jmtRphvVfaEpxY8XnhkNozBa8e0CGr/pQ9u+VC3UgDoX7xfu3r4zu6nXkhbh0pGQ7clcNLP2KkoQ39Om+eLtC1RnRmm6qzF/egVgBHd1/V5PChf3lzhgl71163Zavnylh0T5+efzU968ealOnVq2C/gvv/yK5s9fGK4/3gk1alSnYsWK0I0bN6hv3/e9b0d4PXz4EEqcOHGEefSbbrHYs+czOnr0W7MqMOm1atWi//3vUTMNETiZA0N/9+5dM71w4UL00ksvmtccCS4E3NJEoEdx//59Kl68tKx206a1lCpVqkA34bg+bFBVqlRVlmvYsD61bm0/fzmumAswAoxAnEcgWObOqAIZW3NsbLUbVVwCmS+u0UQgx67X5Q8Od+78TXPmzKUvvzxE33//g6w2b97nqESJYlS3bm3HQrI7d+7QlCnTpfPjJ598krp166J31SP+1VeHxLp9kRQQNm/eVKzxn/O4b3fhhr4HDBhImzdvldW9+urLZrVJkz5KuXLlpDx5clPBgq+Y6aEQCSaaiE08XUnUiw/PGa7P05usolxpDQK9cucy/XH9V3oqVVZKkTilmXf83qG0fP8s8xqRci9Wo75lhsq0+//eo2PnjlLWx5+hVEkMSdTuX7ZS/+XtPMqkS5ORljbZJTcFsAlw4sJxSprwf5RZtIeA9t+eUEjG7f7s6R6xOrY/xHHlyhXq3r03fffdMbumZdpjjz1GQ4cOpPz583nk2bRpC3344SCPNLuLrl07U40aVYX39IuCKahml8Vn2o4dm+nRRz2ZZZ+ZxQ23WHzyyQKpSaDqLlu2jJxM1LX+e+zYcdq1a7eZlDFjRqpe3WB2zESOBA0Cbmli5szZtHv3Hvlsq1Sp7Pd43Hzs/G40kgrgl6F27fr0++9/0KBBA6QGSSRF+DYjwAg8JAi4nTudwBPIeTa25tjYajeqOHfq9J7wvXOJevfuQblzPxvVYrb5YoImbBsOskS3OEBg1aVLd3PNjfXt7du3zdGVK1dGCLR6RVnj7ocffpQCMHzDEbJnf4Y++WS2WZ+KgEanT58l7i1QSTRixJAomW66oe+KFd8m8BcRBayrunbtFDJOmYOFJiLCPKbuOZao2zHqQ2tNFyxzGA3f0ouuXDWO38IAsmbKSfPqrZdjASNeelRej3Gtbn+AHk+amnCv8pRCdOvWTYoXLx4tbrWDMiZ/UtZZbnwBDwn5B9XHU6lnyst6OqxqSN+cPCjj3SoNpkq5a8h49/Wt6ODxnR5tqYvoYtRxPF3Tpi1NKTok56+9Vojy5ctLf/zxJx06dJjWrFlHP/30s+zKvHmzxNFk2VW3SDHq2bJlpT59eprp3hEwslCpRXuYVPQAx209evSWSePHj/ZgyoHrs8/mcrS76PZFmTJlGoFpUaFJk0ZCmv4/denxe/PmTbEbOs9Mg8pSs2bvmtccCS4E3NLEwIFDaMOGTdS8+bv07ruN/R6Um4+d341GoQLQ/a1bt4W5S4oo5OYsjAAj8LAg4HbudIJPIOfZ2JpjY6vdqOKsmKaJE8cJ7b8XolrMNl9M0IRtw0GW6BYHJWlOly6tYJSHyjU1vsEwixs+fJQcZf/+fahChTciHDHU0RctWkoTJ34s8ymG345R//XX36hfvw/Mtb6qOCYY9XbtWgvJ+auySbwnEHR99tle+uKLr2Rajx7vUSAEIWpMsfkb2zQRm2PX2w6YRF2v1Ds+r/lmyvrY0zK5wqQXJTOOi0wZstCihttk+owvJtC83RNkPIGwNd3U/hAlTpAk3D0wm9u7fEsJH0lEP144Rs1nW5JXXarvfU9W9N+f6GLUFy1aQuPHG072unTpSO+8U11vVsahGlurVj2544eJAxOICopRh6R96lR3zvouXLhAlSsb7W7fvsknc6zajOzX7YuiJjtVf7t2bVTU9tdpfttKODFGEHBLE04XkNiIiugEAH8Xc5j88N+p74jI+hUdD8FtX7FgcXrkoZPxOckLXJzmjw4suU5GILYQcDt36v2N7B1yOs/qdXvHncyxkfXLu+6I8vtqN6Iy3vXj2mn+qJYJRkbdyVidfk+c1G33HKKS5ubdAHNdtmxFuZ6eOXOqVP/W21LvAqTqAwb012+Fi69evZaGDRsp05s1a0KZM2cW6/MBthL10qXLyzYzZ36KBg/+SGhW9KOzZ3+NEYn6hAlj6eWXw5uGoq84CrlEieJSYzfcAONgQmzTRLBABjoPiEQ9ogHpku5KUwsKlWhDfaNVuZ5U93lDclp1RlG6dPm8rOb9auOodPYKZpWnLv9EjWZUlNewTZ9bd52MT9o/gpbsnS7jRfOVo8EVJppl/nnwD5Uamce81iPRwahj4qtSpQadP39B7tzpDLjeNuKffbZPqMf3kslr166gtGnTyjgz6hIG+Scyxt7KacQWL14qpbUVKpQXNkm1PG5DwwAaDS1aNJO2/eomfAm0aNFGfswx+W3evMXvOk6fPkOjRo2hLFky08CBA1RT8nfevAViIt0u7KqLCqlyU497+KDAQ36HDm3plVcs+yOPTEFy4XTyhF3VggWL5DOAShp2qp94IqMczfTpk0l3+gINEZhBfP75lxIP2F7B7qpkydelNogOga/FHPLs27df2pghDvsy3cxk374D8j7U8OEboVChgrKNisIhnTdT+957PejcufPyIw/7NziKg+8FjAH9atu2NWXK9ASaMUOfPu+Lj/ZZqRUDDRaEWbPmiHF9auaxi9SrV4fKly/ncSuqfYUmTadOXeXGxpgxI2jSpCm0f/9BqSq3aNEnlDVrFo96vS+AF/Lv3btPzmGws8ufPy81btyQkidP7pEd783cufPpwIGD0h4QWOTNm4eqVq1Cr79e3CMvLr799jvx/BdL3KC6ly1bVnrhhefl+8haB0CIw8OCgNO5U+ESlXcosnlWLFHEO9daVvn++30lA6LqV7+DBw+T7zQcYb71VkWKaI5FmRMnTgqHsLvE3HFAagqCgcB8iu+wnSNNCBKgLnzkyFHJ2EAK+txzeYQmYhN65hlDmIN69XbXrVtJq1atEXP2AflNyJYtq2BUXqKWLZvZCiJgErhly3Y6ePAgffXVYTnO114rLBi60rZjRntRHceYMePp8OGvTa1I9D9FihSyXmDqJrilCbTlZN5Gfmi0bd26TaiJH5eMJuZ5rEdgx+29KR7VZ4V6AxHc4IDvLNTe4Vga6uneY1DMNxjqJUssFXW7/i5Zslx81z6RjDf8SeF7DQbcTqJeuHBxeZRw+/Zt5PoFwrdAMeq+1i5qc8gXow4NgkGDhkoT0zlzZtgNMc6lxTZNBBNg0c6oK4n6vX/vUplRll22UmF/ILy0vz7CsPPRVeVv3xeL+oSPEhzEVZtYRGKmM+SNFlaiU7/9SJDAb+5wmBI9kpiu371m2sWXGJFLSsy8wY4ORh0TfaNGBvM1Y8YU+fHxble/xscEAfbqanIJBUb91KlT8oN69epVfbiO48ClSJHXImUwVMVwXtejRx9pLzZr1jSV7GHHD0asXz/DLAAZwHC1bNlWMlrLly+mQNQBRgQTKoK3RoOazDG2DRtWSxMP5APjU6JEGURp9erllD59OhkP1j9OJ8/ly1fJzQu78eg+E44f/16ajtjlQ5r3jrm+mNOdyWFBBwYbobewIaxU6U0Zxx+YngwdOsK81iNVq74tNtC66kn09tvVJeOKxcyePXs97uECTOqKFUuEIzvLF0fdug3p1KnTglkeRy++aKhFjho1VjqXDFeBluCtheOkr3/99ZdglGvK2rBYxgJVhYUL50nmWF17/2IRrFQEve9hE2LMmJH01FNPylvYaGnXrpPpsAcLICxQVPA2a9iyZRt98MFH8jaweuyxVNJ+Hwl4D+bOnWFuVKo6+JcRCFUEnM6dwCGq71BU5lnYVn/++RfUpElDuVGm43zt2nWxUfiWTFqwYC49/XQ2D4ZZn2ORSV/z6PUgjg3WyZMnUNKkSc1bPwmTP3xvMYcgeM8dY8eOkpufuKfP7b7mXjBTmJv0jd4bN24Is7lWHnMS6kPA/INviPempZNxYBMWm7XeAX2ZPNkSFHnfj+jaDU2gPifzNvKPGDGaVq5cjShhg+H+/X9Mm2d8pyZMGGNqlzl5VrLCAPxxi0NETU+ePI3mzZsvfcXAZ0xEAWNWGy/IFxGjju+rLtVWazt/Vd8jWrtExqjjGw6awCZbp07tIxpqnLkX2zQRVECdPn02DP+jGooNyxEW1f9d1jQ1q93x0yaPckf/PCzv/X3/jpl+6fZFmbblxLqw7/76Jtz9CXuHmvVVmlJQlkO9COdu/hX28f6R5v23/rvv3Vczg4/IpUtXw/DfSdixY1dYoULF5H/xIXJS1My7ceNmWV58aMLEERM+/5sFbCLnz583+yFsv21yOEtyisXs2XPCJkyYFJD/Ynczyp29fv26Oe5bt26Z5cTmh5leqtQbYUKFy7wnpILyntgll2mBqAMVNWnSXNZ78OAXZltCom/2A3Ty88+/mPfEhoG8V6dOAzMtmCNOaUKN5aOPBstxCmdHKsn8FY5bwvB8gE3v3v3DBKMbJtR95C+ukY77wFEFscEh03FPbJDIZCHZMdPw7PUgTlUw7y1cuDhMHJsYJry7hokPspm+bNkKvUhY5crVzHsoI04pCAOdrF273uyvkBB5lMFzRJ+ETwqPdLuLZctWyrzVq9eSdas8Tvv655+e9LVu3YYwzAUYH3D0Ffbu3W+Ob9q0GWFCihIGXNF+zZp15T2xAWYWF6p1Mg24/PrrbzJdLKrD1DgwbuCjAsaFNNSt3j30S9WNdA6MwMOCgJu50+k7FNE8u3Xrdvk+ok7veWHjxk3ynhA4mI/Dbo7FTcwTFSpUlvmFjXAY5m+832KzNUz1V583UEYwEjJ/587dwtTaBN9qsZEn07HuUUFvF/MH+o25DO0K7RyZH+nCNlcVCRNmPmEdOnSW9+rXbxwmNNRkn3777bcwocEo0zFv6fOTm3GgQTV2wbCZ7buNuKEJp/O20AozMcM3EgHPX2g2mOlffPGlOQQnz8os5GfEDQ4RNYlnCxox6GdbRFlt7+3cuVuWBS1FFtT3TKfHiMro9B2VtQvqUjS3Y8cuScOg4wsXLsp3DmtYjBN5kBYqIdhoIjZxjTaJemLhFGxD2y+lpBvS8TcnvEz/ag7GFrbcJs5bzyKl4G+Ne4Ual+xI774idlyRd/xLNLjGVCqcpTjpEvcOFfpTjXz15UZHydF5KHOGZ0xV+DrzytJz4gx35UW+2ZJqdOLMd+E2RaJDoq52s+1UbMQHjMSkEa4fSMCRUmpHWEnUbTNqiTt3bvHYqdZuyXZi00bd29Zc75ubuBMV+Nat29E33xwVfgJGm+rjYhEhj7TInftZKQWEShB2+xGUdGHkyGFCel9YpgWiDqg5Q73v3XcbmSrugnESKlXDpMQfx4d06tRB7HzWkG3Ca+jHH0+V58e3adNSpgXzH7e7nAMjcCYHtbOxY8dLyS80ItQ7ARxwLuq777aQUmodN13qAmkPzi6H+jfCoEEfil3010XMCj179qVPP91ji7NSG/NWc1MSdUjlIZ3Xw0zhxX7GjNnSyyt20lWwk6ire/qv2KCREiakKQmWuu+0r7pE/cMP+ws1T0NDQ9Xn6xdmIdAU8PaXgfwwQxCbTrLopk3rpNbA6NHjSGxmSC0FbzwgZYApQYEC+SlZsv8Jr8iXhfpsFVkeWgfK3AEJMBE6efInqR6Ld5MDI/AwIOB07nTzDkU0z+IYq1KlDPOauXNnUs6cOUzYoYUEiZ46VQY3vOdYrFcQcPQstIRgUjRx4lgPj9pnzpyVJ18g35Yt66VqOOJK6ggVcd3EB3365ptvhDT3EVuJetu2rah+/bqowgzvv/+hPAIXtsRQm0dQknFIzqFFpGumoY3GjZuGU092Mw60paSbseVMzum8raTDkBivWbMCQzAD5mHMx9myZTXnaCfPyqzIz4jTdyOi5vAd6tChi9SaBI1OmTLR1GCMqJx+T2HmvSbQ86i4wsutRD2ytQvaUTSn2vT+xXcU656MGTN434qz18FGE7EFpNhUix4bdThoWthiGz2R4inpub3l0pr0w+kjHuNUKvE3792kOrNL05oWB+SRa53WNKHDP+6j4bVnUaHMRT0Y9dZv9KI6BYyJuez4/LSk+S7pNX7t8aU0cn1fqvhyTepZaqBsR6nGezQqLqKHUTc+XLD5gd2tHvQPl56OuM5Q6Iw6Pja+AtSmdUZGz4cNgYeVUYcN+OTJU6VHcajggrjhaATqtm3atBI2w/2FTbHx0dcXIPrGRyDqgD0hbN91p4B9/lOZA200b2547Rw71nBc0kXYWMHeF6pnsL0L9uB28oxoAak2VODRFLba3mGBsHGfOHGyXOApW0D9GSINdSCMHDlUmk1416E+dPpmjcojdrVNk4Vdu7aa75di1D/66AMqU6aUyi5/wfSDocbGj24TFhVGHaYvDRs2laqHUMkrVaqkR91O+6oz6lu3bghnV+5RuXahnOJMm/axPJ1Cu2UbXa053MG79MYbZX2qrovdZ4FZBanqincBfhmgIpogQQLbujmREQh1BJzOnW7eoYjmWeA7VJj+wKxGV38X0jnxLhtq7xs2rDHty/U5Vld9xzGyWK/oG6f6s1NzoM7IKmdXMHnp3LmDPBHH10kwervYuM0tmBA9wOcFhALlyllOwtRmK1Tlhw0brGeX8bFjJwg75WViHrJOHXEzDlSm5md9fOEajGKCU5pAtU7nbZhi4ZkgYNO5Zs0a0icAnDPbBSfPyq68mzQ3ONi1g3UfzK2E9pc0r5o1ayplyOCceY0pRj0qaxeMU9Ec4tmyZRV/DWeJuukZ3q2BAz8wTe6QJy6HYKOJ2MQyWiTqUxuvoNzpDHv0gdt70NbDq8KNcVz9+fTCE6/Ko9lOX/mFcqR5lg79/jl1XtBA5lV1QMJefszzMq1qoQbUuXg/GRcq79LpHBj9t8a/LBmzesVbU8tCneV93cO8TPjvT3Qw6mC0wHCBwYbdrR7OnTtHvXoZfVbpkKoi6It0xajrDJ7KH9Xfh5lRVxJAhR8whiS2du2aclGChYiyJcORFrBle/XVl2ncuNEmvIGoQ6jgyUXPbWGLt3PnViFtSCCc2JWSkyt2+hs3biad4uj30IHdu7dR4sSJzb4Ea8Tt5BnRArJGjdrSdlnXbtDHr2y3YDMNfwII+mJOz6sz2iodjHGlStXUZYS/+sJQMep2fie++uoQtW/fOZyzGbVI1W3U9QbRb9h5Q6IOx4ft27fVb3v4VfC4YXOh+qoz6gcO7LHJGT4J81KVKu/IG9u2bRRS8GThM3mlgKa7du0uNVfULTyTYsWKSqZdOc9T93bv3iPmPk8nS4ULFxILzZLSO20yIXnnwAg8LAi4mTudvkMRzbPAGVpn0BzDe7ts2SIpaVRrj6JFi0jP1ep56HOszqir+Vrl8/XbsWM7+f3F/TNnzop225t20UgDA16yZAnJcKdPnx5JMvhqV91XG4aYRwYOHCCTR44cI/yFhF9nqjLqVx+jm3GgHsU0xQaj7mbexobPhAkfi+PHligY5FoVmxrY7Hj11VdMX0nI4ORZmRX6GXHzbtg1CYd/S5cul7fgYA4ScTchphh1vW92axd1X9GctzM5PFtoRAiTU+nYEfmxRsL7HddDsNFEbOIZcEZ9WK0ZUmUdg5p6cAwt2OMpYVaD7Vt1NJXLYeziIu2fB/ep4qSX6e87d2SWRa22U6YUmen8rXNUY1IxmfZq7hI0spLh6V0miD+tl9emY78YzpM6vzmAqj5nSOTszntHmehg1OF9slYtQyV/8+b1EZ6hDFX4cuXelJKmjz8eLz0go1/qY6kYTaQ5DQ8zo64k6IpBXrZsuZCwTxMq1YaTGqXWDqZk7doNcmLr0KEt4bx7FQJRB+rq2/d9OWliUk2UKKFUcVYSDKUaD+/c8BjbtGlLqXoPJjUuBLeTZ0QLSPUR0s0WdCyEzbRUZcOO8caNa+QtfTGn57VT4/7tt9/FcYnGvIDNmXjx4utFZBxOlnA+KaT66uOuGHVvR3Yo4JZRV5IdOPAZN25UOAmzm766YdSFXSlhoYqga5XIhAj+APf9+w+I+Wor4bngfVMBm2J4p3RJDcazZctW+T5AsqMCniU2M7Jly6qS+JcRCGkE3M6dTt6hiOZZgItvXNWq78jFvVJ/V2rvQ4YM9Di5QZ9jdUZdzdeoT53pjLgKai6Firuu5g7J/Y4du6TncWwY6EHXLvTVrspvx6gL23zxbTCEJHZ9gqYbnGJCaw1zPILbcahyscGou523MV5ggHUmNn+uCC0yFSDAwPpD13CI6rNSdfj76/bd0NtVayuk+ftsYoNRt1u7qPEpmvNm1NV9/KqNp/fe60zVq1fVb8XJeLDRRGyCGFBGvU+VUfRGzkpyPMuOfkITNhsef+0G2KBEO2pesIN5a+D2nkLyvtK83t3tB6kK//15Ycs5p7pMT582Ey1rYnndPHBmD/VY0swso9Tp7/97j0qPymum65HoYNT1D0vLls3ksUZ6m3ocx3BBqoqgf/yYUddRMuJObNRRQqmygQGYOnWGlFqqXcoF/6lPDx8+RKj+rZX2eHY7roGoY/1646iM5kLNTvhsEd5mZ0uvsPggfvfdMan+jg2CtGnTCJv6ScJLedyZWN1OnhEtIJVNtrfnc0URsIuGfTSkLzi3FEF/54YNGyQlAu+911Pe81bJxOK0SJHX5b0lS+bLM1LlRSR/As2o43i+/v0/lCp5oL3UqR8P1wM3fXXDqOtqtXYbEeE6ZpOAvkI7BSqlWIAjRKRGf/XqNXEawg5xXN1cuVDEglqZgNhUz0mMQEgh4Hbu1EGI7B2KaJ5V9SiGBpvH+A5BcABtQGyC6lpd+hyrr1WUjbT3qRqq/qj8wveIcLgqvXIrDcM9e3ZIe3df7ap67Rh19Y3AxoAyj1L5ff26HYdimvxlBtEvpzQRiHkb7f76628E3zmfCB85CPCP06BBPRn3/hPRs/LO6/baKQ7e7Sg/UUgfPXo4QXPLnxBTjHpkaxc1BkVzETHqyoSwWrUq8lhaVTau/gYbTcQWjnjnA8aotyvfl2rmN+xghNd2GrTa86gj70EmT56CNrT+SibrUnMkPJ+jEI2vOk/e82bgN3Y6TMkSJZO272XH5pPHWyFjQnFM27ZO30rm3puBlxX99yc6GHVULbyUy7Ob8cHzdmai2oc0HY5QsKjV1Xhxnxl1hZL165RRV4zQO+9Ul06vdDW3n376WXyImogjtypJGz1I9GDvr0v/0HIg6lDqaZCaCs+28qMIxzoJEiSQ57ZjYQTb+WzZssmzq50wjxY6sRNzO3mqBSTs42CjqAcsFj4WDvWAl35MDPLgnYFjmMOHv/ZYTNgt5sAATp8+U1btvYhq376TPLbMm4lHZsVwIp4nT25TDTCQjLqiP7QBXwXwZ+ErOO2rG0YdbXfpYvhHgCQcaqp6gDrdUGHPCmygXgo19dOnz8gNEkimkiRJYmbHhwQaC5D2dOvWRZ4xe+PGDfrrr3OCAUgq5rpMZl5EYD8IO0g7UyGPjHzBCIQQAk7nTjfvUETzrIJSae1gDdK0aWOxwT1YSuCwYawHuzkW99VaB6rTQ4cOCvcNxVwnvLRTliyZpdYYzMGgTYNvLc5L17+5uhbg/Plz5H1f7aq+2THqOJu9Vat2chN02bKFHtJhlMMceeHCRbE5mtp0muZ0HKp9xTR5ayCo+05+ndIE6nY6bwuv7wQJOcbufb698PAuj/UqJfykDBo0gJw+KydjjSivGxxUfWrNhmsIYooVK6Juuf6NKUZdbYBFtHbBIBTN+WLU8c4ID/XSYWKvXt2FrypLW9k1CLFcMNhoIlbhCMTxbFMPjhVrNSMcP3c0DEej2f0XZ5ubR7Hh2LRTl39WxcJaLH1H3iszLl/YrXvGEVv3/70fVny4Z5nFR+aYZT45NM2s79hfR8z0xgsrmekxcTwbGr59+455hEIpcZTUl18e8jgCRXyQwnBkiToyQj++C+XV8WzCERkuXYXYPp4tUEezqXqcgnDx4iUTX+AszpU0qxAMh/l8cG/QIOuoPzOTiASiDtRX87/jrdBWnz799SbCxM6n2U8cGaOHr78+IssKRsY80gr3xSIjrE2bDmGtWrUNEwyQWWTr1m0y/zTtqCsxactjznDUjpDgm3kDEXF7ZMb48RPlmHF8D45G04PwPGviMWnSFHkUD+7jSB7hRM68h3wq2B1xIph68ygevIN/imPLVAAtqHdvz569JrY4ClE/3gT1qqCOZxNSY5Vk/qoj1LyPb7E7nk0sksxji1asWGXW4SvitK8YpxqbXZ2+aErNOSgrtEDkEUcoj+cDWkN6u3YdzSpBe0jrLI5YEgs6M10sws32cSwSAo4uUn36+mvjqE2ko9ywYSPlPRybxIEReFgQcDp3unmHIppndZyxzlDvJ36Fzwz9tozbzbG4gXdclZ02bWaY8LIt8+MbKwQO5r1ffjkl08WGg5k2VxyLqgeVH/O1mnt9tavKqflR/65i/aXm644du3ocwyYk9ua3XzD5qhrH41AFMedj/PiOqz6re05/ndIE6nc6bwuNPtlfHN+F9Y0K+C6p42SFXbdMdvqsVF3+/rrBAW3u328dybpjx84odUN4+5e0Imz2feaP6ePZIlq7oJPqeDbwFXrAu4f3UR1BCLoU2hJ6ljgbj0ma8LVGsqMVHC+JNRCOlPzll1Mmvjg2Eev+ESNGm2mBigREor66/QHpfT2yHYd+mzvSp0c3mdlyZslL02oul1JwJMJxXNKESSme+Icwcd8wWrrPkJDJBPEnadJHaUXLz4RUPblMgpo7bE4TxDe8CX/zx5fUYb69Cg8KRJdEHXUfP/69dJ4EaRQCJEZ58jwrd7lUGtKhYuR9FJeSqON+REFJrOzy6LvT27dvCrerbFcmojSnO1qxeTybGody1obr5csXeUjz1O4x7uk2cbjWQyDqgIRYqZX1799bHIFV3mxCSRSRUKNGNeGgq5N5r0+f94XNsKFGrJyF4aZg8IQd2RiZD9JPSEER1NEgiCunYIeF9Llt245IEkdkvSk83veQ8UD8cUoTqk3x4ZOe99V1tmxZhUnANPE+G5JZ5UVd3YedOCQzKgwdOlA6IFPXvqQuN2/eElKiFvKdg0d2HM2ipL84UkhMvLIKvJuQ+MBxzu3/7KxHjvT0GB8oifq4cRNp8eKlsl1fkvRq1d72oBEnfY1Mou6LptAhnJSA0w4QgAmOUdNx16X/+/btF2YahnkB8r788oviSLZ7BJtUBFxjxx9BfKCk/wWl1ornCbzhsBGSdwRfPgnkTf7DCIQYAk7nTjfvUGTzrIJ0/foN4hto+EXRHcup+/j1NcfiHr5RmFdUgGM4aNBcuWLYPjdsWE84j7OOG9W/hzgmLF++vARJr9hIllW0aNFMOn3FRUTt4r5gtoVn95HSKSW0fVSAr6AmTVqY83m2bFmlRtbZs7/KLDA9g/PYRIkSqSKOx4GC3nNz0aKviVNH+pt1Ook4pQlVt5N5G+vC2rUbmLhAcy158uSmjxFoF86bN5PSpEkjq3fyrFR//P11gwNorWLFt6PU9OrVy80j+5TXfBTct2+3OBowvM+amJaooy8RrV0wTvVuIa+vEBF/4KtMsKbHJE34WiPZ0Yo+9+nznPKDBTwD6dAPWo0BYdRXtttHaR5NG+nz7r2pHe39dqtHvuyZn6PJNRZR4gSWKiXOTp+wdyitODDHI6+6SCoWifMab6L0yTKqJKkKD5X3nkuMs3/NG16R6GTU0RTU1eB5Eoy3d8iWLau0R8ZE6R02b95qHjHlfU+/jsieWfduDe/zWEz7E5y+KMHAqOMMc9jgZc78lLCdNRgQhYGQpBLs0hAicvoXiDrgaAsq2wjr1q00P4K4vnr1qmDKKiMa7jgxNQmAsQGjDpMOBCxCWrVqL8+qBvP09NPZZPq8efOl0zzdfhtqh9hswALFm/mUhfz445QmVFNYdC5cuFh6ZFWbVtu2bZIq1SoPPo5wCLR37z6VJM8pr1ixvLRPNxNFBCp68KaP4P0swXzXrm04d6xSpbJ45u/JfGLXWpxZvkj6JxASJJmGP1jAYVEJR456UM5Z9A0Tdf/Qoa+FU6KO4Y5ng3kFGN3JkyeIegvI7IMHD5P2gKqs3S/ax6SvgpO+YsFbpUoNWfSAjdd3XzSFAvgILBE25nv2fObhzR1M93vvdZHMteoTfuGQCAtV+NpQAQtvqNo1btzQNBvAPdg2CombNDVRmyFIB95duxoe83HNgRF4GBBwM3c6fYeiMs8Ca6xTYIKF0KpVc2rUqIGM638immORD/MK5mucyqECmP4mTRoJBqq8h4o7+iW0dmjGjFnSkZ3Kny1bVnECSxOPIyoja3ft2vU0ZMhw6bHcm0HGxiDs1YXGosnYYB2EEzbq1q0tNoaTqqbNXyfjQKHr16+LTeY5cjyY17BJgW+Em+CGJtCO03kb34jRo8eKeX6v2U3ggk2GLl06eThAdvKszMr8jLjBQd+gjqx5nVGHXyB4wI/IllsJDryPX7Vrp644+g6mHb5OrfEuExF9+1q7KKGBd124xuY/TlwpV65MlI5ZtasjGNNikiZ8rZHsaOXSpcviRKnmcmMFTqHVunHNmnXSXBDrm0mTxttuALnF2TGjXnJUbrlL6bZBX+VSpXycXsj6Gv10/jj9+ucvvrJ5pCdOkoReePo1uiWOaDv2y1dy8vLI4HXxyCOP0K6u33ulel66IQ7PGowrTKSQHP32229y9xLSJOxixqXgFItgYNTjEr6++gqJQgJhz67b8iEvmDcE0LEehPqThxMg3AP9Ib9i9PX8/sSd0oR3W+gX+ouxJRHvr13Ahwx2/hkyZAg3Vrv8btJuCd8B2DCBFEF3oOSmruguE4i++qIpve9YdMJZVfr06SLFHfUJUxs5p+H0gsjC5ctXJOMelbojq4vvMwJxEQF/504n71Bk8+yff/4lGBVDK2vFiiWm3bYbXDFfQ5qeMmWKKK1xMM9gLGnSpPb5DXDTD70MmE30Cd8/OO30/pbqeVXc6TjwfcVGCiT0br+z/tIE+u5k3gZdCDM6wUTEE85sIxeuxcSzwhgCgQPqiWqwWzNFtSznixkEYpomfK2R7GjF11r83r17Hho7gUAKc5ljRr323DL0x7mzgWg/xut4In1mWtxoe4TtxjRxRNiZWL7pFIs5c+aJXaabAek1NjXsdvkDUjlX4hoBpzThuiEuyAgwAoxACCEQLHMnFpk4fQJSJEjhvKXSIQR50A8lWGgitoFiHGL7CQRf+0wTxjNxxahP+3wczf90UvA91Sj0qH6JttSioGG76ys7E4eFjFMsTp06Jc5YPmiqnFk1OYthp71QoUKUNWsWZwU5d7Qj4JQmor1D3AAjwAgwAnEAgWCYO7t06U7wkA5JKcKCBXNNM6o4AGHIdTEYaCIYQGUcguEpBFcfmCaM5wEtGMcSdRSNi1L1qEjTMTYmDqBgBMZCIcG/CgGmCYUE/zICjAAjEHUEgmHuLFy4uOww7KrhxPS55/JEfQCcM+AIBANNBHxQLipkHFyAFuJFmCasB+yKUT9z5RT1WNs8zqjAg0kfVnk6ZXnMcMBlDT98jInDwoSxsLDgmIEA0wRTAiPACDACzhEIhrkTdpjwcWLn6dr5iLiEvwgEA034O4ZAlGccAoFiaNXBNGE9T1eMuioONfidP6yjcxd/Nx1dqXux/YuPUfo0majUs5UiVXfX+8rEYaHBWFhYcMxAgGmCKYERYAQYAecI8NzpHLNQL8E0YTxhxiHUKd35+JgmLMz8YtStakInxsRhPUvGwsKCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYcaMuoWFjDFxWIAwFhYWHDMQYJpgSmAEGAFGwDkCPHc6xyzUSzBNGE+YcQh1Snc+PqYJAzPXzuScQx53SjBxWM+KsbCw4JiBANMEUwIjwAgwAs4R4LnTOWahXoJpwnjCjEOoU7rz8TFNWJixRN3CQsaYOCxAGAsLC44ZCDBNMCUwAowAI+AcAZ47nWMW6iWYJownzDiEOqU7Hx/ThIUZM+oWFjLGxGEBwlhYWHDMQIBpgimBEWAEGAHnCPDc6RyzUC/BNGE8YcYh1Cnd+fiYJgzMwsLC3J2j7hzyuFOCicN6VoyFhQXHDASYJpgSGAFGgBFwjgDPnc4xC/USTBPGE2YcQp3SnY+PacLAjG3UbWiHicMChbGwsOCYgQDTBFMCI8AIMALOEeC50zlmoV6CacJ4woxDqFO68/ExTViYseq7hYWMMXFYgDAWFhYcMxBgmmBKYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkDM1Z9t6EdJg4LFMbCwoJjBgJME0wJjAAjwAg4R4DnTueYhXoJpgnjCTMOoU7pzsfHNGFg5sGoZ8yY3jmSIVji5s07clTJkiUNwdE5GxJj4QyvhyE308TD8JR5jIwAIxBoBHjuDDSicb8+pgnjGTIOcZ+WAz0CpgkD0fjx41vO5JhR5wnD+0XjF8UbEb5mmmAaYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkLM9NGnRl1njAssmAsvLHga6YJpgFGgBFgBNwiwAtPt8iFbjmmCePZMg6hS+NuR8Y0YSAXL148lqh7ExETh4UIY2FhwTEDAaYJpgRGgBFgBJwjwHOnc8xCvQTThPGEGYdQp3Tn42OasDBjibqFhYwxcViAMBYWFhwzEGCaYEpgBBgBRsA5Ajx3Oscs1EswTRhPmHEIdUp3Pj6mCQMzdiZnQztMHBYojIWFBccMBJgmmBIYAUaAEXCOAM+dzjEL9RJME8YTZhxCndKdj49pwsCMVd9taIeJwwKFsbCw4JiBANMEUwIjwAgwAs4R4LnTOWahXoJpwnjCjEOoU7rz8TFNGJgxo25DO0wcFiiMhYUFxwwEmCaYEhgBRoARcI4Az53OMQv1EkwTxhNmHEKd0p2Pj2nCwIyPZ7OhHSYOCxTGwsKCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYMY26ja0w8RhgcJYWFhwzECAaYIpgRFgBBgB5wjw3Okcs1AvwTRhPGHGIdQp3fn4mCYMzFj13YZ2mDgsUBgLCwuOGQgwTTAlMAKMACPgHAGeO51jFuolmCaMJ8w4hDqlOx8f04SBGTPqNrTDxGGBwlhYWHDMQIBpgimBEWAEGAHnCPDc6RyzUC/BNGE8YcYh1Cnd+fiYJgzM/GbUZ341kXaf2EjnL/1O//77r/MnEY0lHnnkEUqXOhO9nrMiNX25XZRbYuKwoAoUFjdv3qKff/6Zzpw5Q5cuXZYNpEmThp566knKnj07JUv2P6tRjgU1AoGiiaAeJHeOEWAEGIEAI8BzZ4ABDYHqmCaMh8g4hAAxB3gITBMGoK5t1M9ePU19NrahP8+fDfCjiZ7qMqbLTIMqfkyZU2WNtAEmDgsif7G4d+8e7dr1KZ0+fdqq1CaWPfszVKJEcUqQIIHNXU4KJgT8pYlgGgv3hRFgBBiBmEKA586YQjrutMM0YTwrxiHu0GxM9ZRpwkDatUS9wcKKcYZJV0QFZv2TuhvVpc9fJg4LGn+w+Oeff2jVqtV0+fIVq8IIYunSpaPKld8iaEJwCF4E/KGJ4B0V94wRYAQYgehFgOfO6MU3LtbONGE8NcYhLlJv9PaZacLA1xWjDnX3RXunRO8Tiqba6xRtFakaPBOHBb4/WOzcuYtOnvzJqiwKsfz581HhwoWikJOzxBYC/tBEbPWZ22UEGAFGILYR4Lkztp9A8LXPNGE8E8Yh+GgztnvENGE8AVeq776k6ZCElsz/FuV/4iV6MmVWunnvOv14/hitP7KErl0z7JJ9PfgUKVJRsVzl6cUnC9Ivl07Qrh830B/nIlarT5QoEb2SswQVyVaKbt+/RTtPbKDvTx2hsLAHvpqhqEjVA0kcDx48EGO/TilTpiAcWh/XglssYIe+fPkKj+E++mhSqd4OyTnCuXPnaffuT+nvv/8284GG6tSpRf/7H9usm6AEWcQtTQR6GA8ehNHGjZtktW+8UZYSJkwY6Ca4PkaAEWAEAoaAm7kT30hseNeoUY0ee+yxgPWFKwoOBNzQRHD0PLC9CBQOf/99V3YsSZLEge1gNNZ28eIlOnjwc0qePLlYIxeLxpbiVtWBogmMWjK78eLFLQD+6y14x3inT58Nw3XGjOmjNIg3JhQI5ziuRZluVPeFphQ/XnhmNIzCaMW3C2j8pg9t669aqAF1Lt4v3L19Z3ZTryUtwqUjIduTuWhm7VWUIL6nTfPF2xeozozSdFdj/vQKwAhuaX9ETwoX95c4YJe9Y8dOWrlyDZ04cdKsH9Li557LQ7Vq1bD94B46dJgWLlxi5vcVqVbtbSpS5DW6ceMGffDBQF/ZbNMHD/6QEieO+gTmFot9+/bTd98dM/sAJr169eqEXz3AyRwY+rt3jckV9woWfJWef76Ano3jQYSAW5oI9BDu3/+HypQpL6tds2YFpUqVMtBNcH2MACPACAQMAadzJza8q1WrKduvX78ONW/eNGB94YqCAwGnNBEcvQ58L/zBAcKeefPm06FDX9MPP/woO5cnT24qXryoWG/XFEIy9wza2bO/0rhxE80BDxo0gJIkSWJe6xEID1avXkP79h2QyQMHfkBJk3quefX8Kv7NN0eoY8eulDVrFpo7d6ZK9vl7+fJlatGiLV24cIHAV0BoqcLjjz9OOXNmpxdeeF44an5GJcfJX39oAgPetm0HHThwkI4e/U5ilS/fc5Q797NUs2YNSps2bZzCxDGjXmZs3nADnN5kFeVK+5xMv3LnMv1x/Vd6SjhuS5HYWjyP3zuUlu+f5VG23IvVqG+ZoTLt/r/36Ni5o5T18WcoVRJj53j3L1up/3JPj+3p0mSkpU12yU0BbAKcuHCckib8n+koDu2/PcG3+vT2Tt959MH7wh/iuHLlKvXu3Y+OH//eu1rz+rHHUtFHHw0gEI0etm7dRoMGDdOTbOOdOrWnqlXfJuzCVa9eyzaPr8RNm9YKZvlRX7fDpbvFYvHipUKT4JpZX6lSJSlHjuzmtR75/vsfaM+ez8ykDBky0NtvVzKvORJcCLiliTlz5onnvFfSbqVKb/o9KGbU/YaQK2AEGIEYRMDp3Ak/Lw0aNKE//viTBgzoR6+/XiIGe8tNxQQCTmkiJvoUG224xQECq+7de5trbqxvb9++bQ6hTJlS1LNnN1cadzjJql27TmbdqHT9+lVS8m028F/kypUrNGTICPr88y/MW+vWraIUKZKb174iThl1zAd16jTwVZ2Z3q1bF3rrrYrmdVyLuKUJrA0nTJhEa9assx0yeLDBgz8ibObElRAQRn1oremCZQ6j4Vt60ZWrl8yxZ82Uk+bVWy+vwYiXHuXJ5K9uf4AeT5qacK/ylEJ069ZNguH84lY7KGPyJ2Wd5cYX8JCQf1B9PJV6xpCkdVjVkL45eVDW363SYKqUu4aMd1/fig4e32n2Q49EF6OOl7pVq3amFB27NoUKFaS8efPQn3/+RYcPfy1e8o3imLJfZHdmzpzqseOlGHXsqvXo8Z7eZY94xowZpEQe7f344wmPe3Dc1qdPf5k2evRwj9084JorVy5Hu4tuX5SZM2cTFhkqNGhQz+cGwa1bt2j+/IUqq9ytbNQo8knILMCRGEXALU0MHTqCNm3aQu++24gC8XyZUY/Rx86NMQKMgJ8IuJk78R0F45EiRQo/W+fiwYiAG5oIxnH42ye3OAwaNJS2bt0uJaRDhnwk19R4ZzZu3EyjR4+T3erTpweVK1fWcReXLVtBEydOlmtXxfzbMepffPElvf/+R/I91TcKYoJRnzx5gqkliz6Cz9i5c7c4aemMHC8k9OAp4mJwSxPgs0aMGC2H3L17V6lZgecCTEAv4MFy5sxB06dPjhOwwIQ6IIx6RKOd13wzZX3saZmlwqQXJTOOi0wZstCihttk+owvJtC83RNkPIGwNd3U/hAlTmCol+j3wGxu7/ItJXwkEf144Rg1n11VlsEfXarvfc/MJCLRxagvXbqcJk2aIpvq2LGdUFmrojcr41Blq1+/sXyhMXFgAlFBMeqQtE+caEww6l5Ufy9evCik7LVl9o0b1wpb76hLz+3acPuiTJ063aO6li2be1x7XzjN712er2MOAbc04ZRRx0YUTFV8BX8Zddgs4b9T3xGR9ctXf73TA1WPd724djs2u7o4jRFgBAKDgNu5Myqty8WcWB9hjeQdnM41TvN7t8fXUUcgEDQRnc8LdOX0Gxn10Vs53eCAvr35ZhW5np4yZaJUa7ZqJFJrDkjV+/Xrrd+KNK5LrbEB0KuXYZ7rzaifOnWaGjduJusrVqwoNW3a2LyOCUZ927aNHqrv6AhMAerXbyLVvZUGLtLjWnBDExhju3Yd6dtvj1HXrp3kSVL6uMGsN2pkmBCtWLGE0qRJrd8Oyjjm9Ghn1HVJd6WpBYVKtHFcV6tyPanu8+9KYKrOKEqXLp+X8ferjaPS2SuYgJ26/BM1mmGob8A2fW5dQ51h0v4RtGSvwRAWzVeOBleYaJb558E/VGpkHvNaj0QHo46F8Tvv1JUvhjcDrreNOOy3e/c2pN4rViwWhJJGZmFG3UIqMsbeymnEli5dQZs3byE4FatV6x2P29AwgEZD06aNpW2/uglfAm3bdpT+FsaMGUFbtmz3u44zZ87S2LETKHPmp4T/AGNiV+0tWLBY+i4oWvQ1IVVurJLlLz4o8GfQtm0reumlFz3uBduF08kTu90whcAzwI4vdjahFYLw8ccThAaF5TMBGiKffrqHvvzykMQDu56vvPKSdEKYK1dODygiYtT37z9IM2bMkvk7d+7oYWZy4MDn0m4J5hZ3796jV199RbZRvnw5SpDA0+dFz5596Pz5C/IjDx8ScOyEDwDGgH61atWCnngio0e/IroAzc2fv0iqx8GWDvVA/QqmHrCnUwEmMD///DPVrl1TSALKqGTzd9WqNbRu3QYqUCC/sG2zTIOgdvfpp5/R/v0HCGY42PRr0qSRVPsDXQJPqAFyYAQYgZhHwOnciR5CUvfrr79KLTs1B+I9nzp1hnz/odo6b94C+kJI9RAwn7Vr10pKGCFZW7RoiZxLoe5ZoEABatmyme2cBSnT9u075dyBxexTTz0p1jTVqVSp16X9LOqeOnWSKxVilOVgj4AbmkBNEMrMmjVX2N9+K+jjN/m88+R5VjCJDenpp7PJxsCwtWnTQcb79+9DdpJV61vzjil1BuMP7beDB7+gzz7ba37vWrRoJujjoFwn4ZsVSBNFNzjAfrxHj96SUZ01a1q4jX18I0eOHCNpef78ORKHqPzBev6993rSV18doipVKovvfHMqX94wx/Rm1LFua968NXXp0lEyhVeuXBHmfYZfiUAw6spkEP6dPvpogNCmTSVNYZTqux2jjjHiuYKnqFu3tnznozLuYMvjhibgJ2D27DmETRysn5In9zQ9wDvxxhtvyaHOmTODsmXLKuPB/CdGGHUlUb/3710qMyqfiYdSYX8Q9oBeH/GsTNdV5W/fF4v6hI8SHMRVm1hE3tcZ8kYLK9Gp334kSOA3dzhMiR5JTNfvXjPt4kuMyCUlS2aD/0Wig1GHV9ZmzVrJFqCKEpntA+zLEfDSKalhKDDqZ86ckZP71atX5fjc/oF3W5gNgOGNSvjss33Ut+/79OyzueRiQpXR7fjBiPXq1V3dkgwXdt7AaC1a9In4IPlfB5ijKlUM8wtvjQZoUuCDime+atUyU/IB5q1sWWMjatmyRZQuXXA7uXA6eYKpBJNoF3SfCfBVANMRX8F7x9wXow5GHAw2AtSe3nzT2vTTVaK828GiAx9bPdSoUUduvmFzZe9FS1/jAABAAElEQVTe/fotGQejvXjxJ+JUB8sXR7hM/yVgk6Jz526msxsshEEPKugmAVhcT5kyXTqKmTBhjMpi/ipagolMxYrlZToW5QMGDDTz6JFy5cpI9UAw7m61dfT6OM4IMALOEXA6d6IFSH/AOI8bN8p0sgonSQMHDqFnnnmarl+/IecovTeYWypUKE/Tps3Qk2UcTpSg8onvkApgeFq2bCs3UlWa+lVzB663bNngsbGq8vCvewTc0AQ2VWA7jW8Kgve3ZOTIoWIj+WV5r1u3nmIT5ytbkzPdWeEnn8w211tQGV+zxhCIyUr++4PvXYEC+cRG9+fSfA3frEAFNzhE1vb06TPlxjh8O8DHQ1SDer/wjixYMFdqFPhi1K9evSY2xa+YDB8cvQWKUYcJKTbhgPvYsSOF6aohrNCl/XaMOtZGzZq1lPPGwIEfUDEh6Y+LITpoAsIWbH4C0w0b1jgyBY4tDKHREq0S9VeeLU6jKhsfi50/b6YPVhi7exjwpIZLKF+GF+juP39T2dH5JQbKZn3ryfWUKcVT9Fx6YZ+u3a9ZpCm1K2Koi1eeWoiuimPfFMN//tY5WnF0PrUu3FXWVUnctzsWLjoY9d2794iHb3i137x5nYdtuOxMFP4oRh1MPl5KXyEir+2xrfq+YMFCgif3QATY5OGotqiEGzduCKcZhhmEzvxBmgubFATvFxMSbixkIDVo16619KLvbx1oBwseSEv1j+Vff/0lJP31cVsGfScP3vEh2cdud1Q8fqo6YuvX7eQJrQFfNuqQtr/7bgu58MBHFQsAbNJgAQmpASZXPL/Zs6cRnA0i2DHqkDTBsQxCnz49PaTR8AjbpYshTYbmQvnyb8ideEii+vX7QJbxNllRjDpuogyYfuzU7tmzV9quYaEECUaTJg1l+Yj+KEYaC+Vx40ZSpkyZpB8H7PqrjQy1Ww8p/jvv1JHVeatn6apbKr++UYgNqYYN6wmthSfo1KlTUvKmHNwwox7RE+J7jED0IuBm7oyIUUdv8U7369dHbvAePvyN3LBWDBxUftu3byvt27/+GvOfsVEN6Wrp0iXlYJG3adOWUkoHxh+bmzlyZCcwcdhkXbhwsQkKM+omFAGLuKEJxUjjhJz33+8rTRzxHMeMGS83ZLGGhMAIYceOXfThh4NspcpK4qznX7t2PY0aNVaWbdu2tfyGwoTy2LHv5ebQhQsX5D34mQlmRl0X0vTv31vQeynZ78j+XL9+Xa7VgOegQR8SNunv3LnjU6LuXV+gGHWse+bO/USueyZOHCs35VRbOqO+cuVSsY4xjqZFn3/77XdasmS51Np79dWXhfr/IFMYqMrHlV8374b32G7dui3WbP+KzcyL4lSAw3LdhjyDB3/ooWHrXS6YrqOVUU+cJAltaPullHRDOv7mhJfpX83B2MKW28R561mkFPytca9Q45Id6d1XxK4u8o5/iQbXmEqFsxQnXeLeoUJ/qpHPYHhKjs5DmTM8Y6rC15lXlp4TZ7grL/LNllSjE2e+C4d3dDDqSmqInU1vFRuoEYGBtgspU6Yyd6gVo26XT0+LaCMgthl1b1tzvd9u4k5U4Nu37yzVwEaPHm6qjytHI5C0g3mGJAHqvwhqpxkTWeHCBWVaIOrA5IpJtlGj+uJD1ljWu2HDJho+fJSU+KMf7du3kefi4iYWQlOnzogzKkpuJ8+hETDqy5evFF46P5abFVOnfmy+E8AH56K2bNlG7g7ruHkz6idPnpTqaigzYEB/ev314oiaoW/fD6Qan50q2KZNm8UHbaT8GEKFTgXFqINBxwJWD1BJmz17Hr32WmHh7fUj/ZZtHEe8rFy5WjL73nUdPvy1VMPPnz+vWHT9T5bv2rWHVL2DlF9XMfzkkwVCrX+2OJrOsrtDP9AfLLjGjx8j1FMtFX58qIDfr0J6z4y67aPhREYgRhBwM3dGxqh7H005fvwkWrFilVzgYyNPaexhgGoOrlOnpjTbQZrawMRGKDaQ06dPh2QZoAI8eDBUaLfLa2bU/wMmgD9uaEJpVHlvRkOt98iRo1ICrCTq+H6+8YZxyoouIMAQoOGFb4/uHVzZ9+o0ooYLST421BGCmVG/e/eusE/uLrUm8c2bMGGsqcGoxuLrV22CQAo9cOAHMltMM+pqDQmJ/rhxoylLlsyyH+qPzqirNO/fihXLU4cObV0JDb3riq1rN++Gd18rVKhsap7gHgQlffv2EtpJhnDYO38wXmMejhaJOnYAFrbYRk8IqTi8wbdcWpN+OH3EAwOlEn/z3k2qM7s0rWlxQB651mlNEzr84z4aXnsWFcpc1INRb/1GL6pToImsp+z4/LSk+S7pNX7t8aU0cn1fqvhyTepZylD/VKrxHo2Ki+hg1LEAx0Jc35lU7UIqiCNW7ILOUOiMOj6avgLUpnW7Xj3fw8yoKwm5+oBA8glHIzhfGww/NB5gawQpvc7k6RsfgahDSch1pgiqNpAKY5e7dev2wo7wZeGV0pD0QwIMiefo0SPEBsML+uMMyrjbyVMtErEL7+31XW2owOYbCwTvsGjRUqEKPk3u7mNxgqA/Q6QpzYkhQwYK5rmQdxXSJAGmCfpmjcqkmyzoi1HFqL8vpBalSr2usstf2O6B+cfGT1S8hyrpBQqDDsuWLW36p/Co+L8LpX73/PMFpNqryqMW7iNGDJH2qEhXm06dO3eQNnUqr/pVH36dJtU9/mUEGIGYQcDN3KnedzvVd7u5Z/XqtVKyCkkgJIJ6UN832LWDOUNQZja+1IO//vob6tTpPZlXnxtlAv/xGwE3NAEJOSTlYOSgMQEzwYgcB8MD9vr1G6XgAAIEBF3yqzSz4C29dGnDlAob1tCw8A5KmKHWWd733V67wcGuLaz7YBai8IFfhfTp09tlDZd2/Pj3cn2GG8uXL5JMHeIxyahD8g+VdzzbSZPGSc079EEPOqMOxlM9+3v37knNGJUX8wPWQ2nSpFZJceo3EDSBtRHMg8CHQeMAAdhizag2s4IdlGizUZ/aeAXlTmfYow/c3oO2Hl4VDotx9efTC0+8Ko9mO33lF8qR5lk69Pvn1HlBA5lX1QEJe/kxz8u0qoUaUOfihq3Jjp82SadzYPTfGv+yVEmtV7w1tSzUWebVPczrjUcHo65UbsFgQ/VaD+fOnaf+/QfoSaadqn42qmLU/VlMP8yMOhyRtWjRxpQaQnINNfSaNauLjZL6VKlSVdPmV03IL7/8klDzss6uD0Qd+NhVqlRNTgpbtqyXDsrw8VOq7XA8Agck+j0Qx9atG8xjNjyIJcgu3E6eETHqcIyCj4+u3aAPW9mdK38CuKcz6npeu8Wkrgan57WLT5v2sWkLphh1O78TShKFxYwuhberE2n4SPTo0Udqfag8GA8W1GXKlDbbVPf0xQHU21KnfpzgrLBhw3flhwYq8UpapnaNYTLzwgvGXKnqwS9U9fsJ9X5/5ha9Po4zAoyAcwTczJ0RMerQBMOcqYc1wrYYUkFd40bdX7JkmXDgOVWerawY9Y8+GiydyNltoKLclStXxObfO7IKu7lV1c2/7hBwQxNgOjp06CwdhqpWoTVYokQxqeKta0Xg/tGj3wqGvrOH+jsYdzDwJUuWIOX4Vn1fUGb79k22jgOVxkawMuqqfxiDr80G3PMO0HzF2syw/28tTSJVHv1brDY11D3vX30DxKkzOb0u77Wpfk9n1L1t1DEOjAHrLfxi3Tl79vQY8dyv9zEQcTfvhq92gQv8IG3evFU64kU+bw0TX2VjOx3rvIBL1IfVmiFV1jG4qQfH0II9k23H2bfqaCqXw/C+hwz/PLhPFSe9TH8LexCERa22Czv1zATb8xqTism0V3OXoJGVPI/+ar28Nh375bC83/nNAVT1OcO2s/hwTy/RMoP4Ex2Mui41X7duZYRnnoJg3hL21Fi4Q61FqWAwo66ekPXrRPVdSdCBK5jg5ctXCUnnTNNWXO0Eb9iwWjiR2CQXLLA7rlnTcP6GVgNRB+r54IOPaNeuT4VkY4T42CWSx0XAbrhp0ybS7giq8ZCIwiNlK+FAzW7BhXqCMbidPCNi1OGAD1Jt3WxBHztsi7p06S4Z1NWrl8tbvhj1cl7HHiLz77//LkwLDMc3+ADGjx9Pr17GvxAOd6DpAKm+kiQoRt3bkR0KOGXUUeb+/fvC2eLn8oQBjAm0qgI2lNq0aeWhogdzCdCqOmZk/vyFgqZnhTOTgDoiPsq67amqF7/KNIcZdR0VjjMCMYuAm7kzuhl1aCpBY8nOmSbQwbyi1J2ZUQ88vbihCfQCUsJdu3bLTRYw4nrQNTWRDk/YNWsapxIp5kSpveub46gTAg0EXaIsE/77A6ETThYJRkZdaY6hq742rfWx6HG1wQVh2/jxoz02KfCdhiYkAuzFYZ72xBNP2Gq2BopRR1s9NGexuFYhIkZd5dH93DjZsFDlg+HX7bsRUd+hRg4NoW++OSKFePDDEBdCQBn1PlVG0Rs5K8lxLzv6CU3Y7Nt2s0GJdtS8oOVcbuD2nkLyvtLEbHe3H6Qq/Pfnv6WWc6rL9PRpM9GyJrvMPAfO7KEeS5qZ10qd/v6/96j0qLxmuh6JDkYdC/AyZQzv0s2aNRES3Hp6kx5xSFOxc4eg25cxo+4Bk7xwwqijgDqSAmqCOJ7r22+PmZ5qlfr04MEfyh01SGntJrBA1KFsniGlwEcS9sPw3p0/fz46/p96FTYI0qZNI9SbpghbMXuV5fCIxH6K28kzIkZd2Y937NiOqlWrEm6QyrQEEoMPP3xf3tcZ9YEDB0jpcq9efeU93ZYdCXgGJUuWlfd077YyIYI/gWbU9aawKQRagH0+NnUQoOqWN+9zZrZvvjkqjkbqIr09g6bVot2bbpVdHY6SAS15B4UvM+reyPA1IxBzCLiZO9U7b6f6brfBqxiOqErUoSKsnI3BmanS0lGoqE0+XDOjrlAJ3K8bmvBuHXboX3zxpfBQvsjU1vSWiM+dO1+sd+ZIYUGlShWllgSYUgiWEiSwfJoo7TZd21O1B9vvWrXqyU31YGPUdTodPnwwwdGekwBHfDAbiWrQ30e9jL+M+qpVS2nZspWmE0c7bb6oMOrok9K0szPd0/scrHE37wa0J48dOy6EYMnoxRftTUmVoz67+TMYscDmQsAY9Xbl+1LN/Ib34y0n1tGg1Z7Ol7wBSJ48BW1o/ZVM1qXmSHg+RyEaX3WevOfNwG/sdJiSJUombd/Ljs1HsMtASJgwIW3r9K1k7r0ZeJnhvz/RwaijaiXtwuSHD146m2O2IE2HqhkW5roaL8ozow4UPINTRn3Hjp1i0TFYMntg7nRHX0oyAPu89es3SumsfkyaajkQdcDcATvYsC/Gbiw8caoPotKogO181qxZ5bm1TphH1c/Y+nUzeaKvilGvXr2qdHKi91851ANe0EKIL3xcqAC8unTpLndAW7ZsJqXJuKcz6mrDSy1GcN97R11JELyZeORVTDPiuXM/ay5WA8moQ60QG3pPPvmkx048JuG6dRtK1f8uXTwdx+mSEOCCMdip2uunG3jb6KuFO8bGjDpQ4MAIxA4CbubO6GbU//jjD+EXxFi3wX8L/GeooEzB1DUz6gqJwP06pQmY1uHkD9it4rx0/Kqgmz5C3Vmdp477SqsMDo9xvjTU3u0cxiktLqxjsSGcMWMGWT2+RQMHDpa230gIJkZdrdnQLwhiihR5DVFHYenS5cJE7DPbMlhrwJQSASYGcNbasWN7eTqCdwF/GHVlHok1D3wXfSXOcYc99cyZ06Tpm2orKoy6nicurS/VGPHr9N1AmahoNyu/UDitB6f2BHsImI16/RJtqUVB4wxiSMC7r7Imex2EGzeuykWxSlMScFy3XFaTvj/1DcFb/JrWB+UZ6v88EM4thGQ8TJy1rkLb8n2oVn5DjXX+4ek0bfsIeWtKo+WUJ31+GW+yqDL9/OsPqojHb3Qx6vC4Wbt2fbnbiElu0KAB0l5UTaTY6Rk7doL0PI0OjRxpnXWJa2bUgYJncMqo65MkalIqw4iDIapa9R3TrsvOkzfyBaIO1KM8syKu24HhWkntEYczEKiZqXBUqLHhY4kzM+HwQjGtOC4Hkg8wlf369TY3gvCRgtfvUqVeN73M42P+0UdDBPP3h5SwwslhoIKbyRNtwzZyibCRxAYVPLunSJHc7JLaREFC3bq15SIADhMhKYA2AhweIeiSZDtGHYuJfv3el2ee4x2cM2e66UhGP3YGH/PChQtLFXhICaBOvmzZCvlRXLZssfwQoz03jLqv56dML7DTP0R4iVeSK90ucMaMKeE+/sq7POjkwoULQm2rvaDjt9E9M2DcOEMdDgsR8LyxSINNFrDFxx6mBcyom5BxhBGIcQTczJ3RzagDBF0aifn5BeHn4vff/5Cbo2ruQD5m1IFCYINTmrh586Z0kotetGjRjOrVq212SG3Y4tu3du1K8zumMiiP7upbMnPmVMqe/Rl1W/5C/R2nhIDRQ4DUMWXKlNLhLb4hih6ChVFX/qHQVzstAKR7B0jO589fJM0eawqTs8jCrVu3qGJF45sbnTbqilFHf65duya0H1rJbz6+22PGjDKfp86Ee9uo45SX48ePy2PIsKGD56XMBSMbZ7Ddd/puqP6rI5Jh5tirV3fTmR54ADjUhEksgm7S6GvdFgzr7oBJ1NX55wooX7/9NnekT49uMm/nzJKXptVcLqXgSITjuKQJk1I88Q9h4r5htHSfAapMEH+SJn2UVrT8TEjVjYU+1NzjxYtPCeIb6jvf/PEldZjvW/U8uhh19A8L4379BsiXC9eYMLED9+uvv5lpSAczAumgHhSjrqfZxb2lbnoefUd148a1pjdIPY+TuNMXJTaPZ1PjUs7acL1o0TxpS6TuKRVhXHvbcak8+A1EHThyTZ1B27t3D3FEiqF6jfp37DDUDRGHqjdUvlVQHuJxrTs2U958kQ67GvWB0TcENmxYTcmSJZMLrI4dDY0WHNMBW6dABac0odrdvXuP9LyvrvFRgkfWJGJjDkF5UVf3n3nmaclkquuBAz+gYuLIFBXsGHXcw0cVEzXeOXg9xdEs6pQEdUQa8uHdVGe1Q+sBwVsa7YZR9/X89u8/KD4ahmo+2n7xxeelNhBs4xGgpgWpuXeANka9esbGJO6tXr1MfHwf884mNzWGDBluMusqA1RgS5Qozs7kFCD8ywjEEgJu5s6YYNQBB+x7Fy9e5uE3A2uX997rTM2atZKIMaMeeMJxQxP62gJMd968eQhafDClQoAvnIYNw6+BlUke8uD7io1vu4BvDjQ/lRRZ5YG2BTYKwOQGA6Oun9ai+ujrd9myRaZwQ6mEI++uXVtNYYivsrHBqKMvJ0/+ZL57ug8JnVH31WekY50xfPgQuUEfUb5gvefm3cBY8B707t3PFMqB1rHZBHpWaz1I0hs3bmBqpPhatwXLujsgqu8r2+2jNI+mjfR5997UjvZ+u9UjX/bMz9HkGosocQJjwY6bODt9wt6htOLAHI+86iKpIMB5jTdR+mQZVZJUhYfKe88l9tJ8lTE6GXW0cePGDeGM4mMpIVdtql8wJ7AhhYqvd1C7od7p3tcR2TPr3q3hfR4vqj/B6YsSDIy6sj+Bitf8+XM8hr93734hpe4v0yJy+heIOpQDNDQGD91p0qQ2+3L16jXhvMfYyfVmDiEVxaSByQWMLEw6EJSn17t37wnp9HjKli2rSIXJxSK5Q6jbb9+5c0duNoBZ9a5fFvLjj1OaUE1hVxALQZzxC8kwwsaNa6RjFpUHTmrglXP//gMqSZovlC9fTnq0NRNFRD9KxvtZ6upPlSq9KRebKAuVMvgqOHjwoPRfoOqD7wBIJ7BzrQdlr6dvmKj76tgi7yOSfD0/lPvuu2PyGEf4qVABC6233qog/VooKbu6p36VJAQe4r2PXFJ51C80BPChunPnb3H+6lPyeBfg2atXP5aoK5D4lxGIBQTczJ1w5AatmPHCwVWBAobGoNro1U271HCU5pCdU82lS1cIPxiTfTqOg7bWTz/9LJi+c4KpSSe0e3JIxkw5GIsrJ5MoLOLCrxuawLd048bNUpNOfUsxVqwvoc6Lo/bsgi6NtzMB8y5z/fp1Ye/7vbBhf0RI3rNL6azSjAOjg7YCFdzgADqtWTP8hoRdn3RGHX6BoOquM792ZVQapNQVK1aWl95rFpVH/eqbB5FJ31WZI0eOCnPALrabJ7paf//+vaVX/7/++kv4CjCO2VN1qF+s+bFxg+cFzTs7E1yVN9h/3dCEGhOElmPHThROfz2d9mLzsVatd8Idt+tr3RYM6+748eM7t1F/Y0IBueBVgATqN1XKx+mFrK/RT+eP069//hKlaqEm/8LTr9EtcUTbsV++8lCrt6sAC+Et7Y/Y3TLT/CEOsxIRgToqVI9hGwTv3mAc8RuXglMsgoFRj0v4+uorbJkTJEhg7vapfGA0EbwZOjBniRMnVtnkL+gP+RMmtBzFeGRweeGUJrybQb/u3v1basEoSbd3HjDh58+fl2rr3mP1zuv2Gh/fa9euCtuv1OGwc1unKufr+Vn3/xGbFeeF9kNyDxMAdd/pLyQgUHNLmjQJvfTSi+GKq40nb+2NcBk5gRFgBKINAX/nzujoGAQLR458K6t+5ZWXws2FasPZbuM7OvrzsNXpL01AQogj9FKnTmNqjvmDITaTIUjAEW85cmQPV5XaNIZTVwgHAhX8xcFpP+zWTE7r4PzRi0CgaALaJtikwhyWKFEin532tW4LhnW3Y4l6g4UV6c/zZ30ONphvZEyXmT6puzHCLgaKOCJsJI7cdIoFvI7ihQhEwKYGTAQ4BBcCTmkiuHofmr05qanItW3bSuz+l5cmEFCRgw3f2LET5MADrV0RmmjyqBiB6EEgGOdO+AGB7xYwfDCTad26hdD+SiOO/7pOhw9/LWxdp0gNKDvHY9GD0sNVa7DRhDphBU9h5MihUosjQYKEQg37pPBRsE1qxOFeVM8IR96ohGDDISp95jzRiwDThIGvK2dyM7+aSIv2ToneJxRNtdcp2oqavmzZA9s1w8RhoeIUizNnzgjG4Eu5w2vV4jyWOvXj4kzrV6UNsfPSXCI6EXBKE9HZF67bQkB3UIhUqMApeyxcQ82vQ4e2HkfxIJ0DI8AIxAwCwTp3btiwSTowVSh4zx1QF4XJjW6+pfLyr38IBBtN4JvRokUb6eNFjcybHrp37yoc2hnHEas8/v4GGw7+jofL+48A04SFoWOJOorGRal6VKTpGBsTB1AwAmOhkOBfhQDThEIi+H7h52Ldug3SzhQLLnh8zZ07NxUvXkScqVo++DrMPWIEHiIEgnnuhCPcefMWCIe435tOmOBLB//r1q0VTiX+IXps0TrUYKQJHHkMR7j79h2gE//5U8FpADhNpEqVysLXSd6AYxKMOAR8kFyhIwSYJgy44JPCFaN+9upp6rOxTZxRgQeTPqjix5Q5VdZICYWJw4KIsbCw4JiBANNE3KAEHBepPOrHjR5zLxmB0EYgrsydUIeHLWf8+NYZ3aH9ZGJvdMFOE/Apc//+vWjfqAl2HGKPQh7elpkmjGfvSvVdJxuowe8+sZHOX/o9WhzM6W05jcMJVbrUmej1nBUjVXfX62bisNBgLCwsOGYgwDTBlMAIMAKMgHMEeO50jlmol2CaMJ4w4xDqlO58fEwTBmZ+M+rOoQ/+Ekwc1jNiLCwsOGYgwDTBlMAIMAKMgHMEeO50jlmol2CaMJ4w4xDqlO58fEwTBmaujmdzDnfcKsHEYT0vxsLCgmMGAkwTTAmMACPACDhHgOdO55iFegmmCeMJMw6hTunOx8c0YWDm2kbdOeRxpwQTh/WsGAsLC44ZCDBNMCUwAowAI+AcAZ47nWMW6iWYJownzDiEOqU7Hx/ThIEZq77b0A4ThwUKY2FhwTEDAaYJpgRGgBFgBJwjwHOnc8xCvQTThPGEGYdQp3Tn42OaMDBjRt2Gdpg4LFAYCwsLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEgRkz6ja0w8RhgcJYWFhwzECAaYIpgRFgBBgB5wjw3Okcs1AvwTRhPGHGIdQp3fn4mCYMzNhG3YZ2mDgsUBgLCwuOGQgwTTAlMAKMACPgHAGeO51jFuolmCaMJ8w4hDqlOx8f04SBGUvUbWiHicMChbGwsOCYgQDTBFMCI8AIMALOEeC50zlmoV6CacJ4woxDqFO68/ExTRiYMaNuQztMHBYojIWFBccMBJgmmBIYAUaAEXCOAM+dzjEL9RJME8YTZhxCndKdj49pwsDMQ/U9S5annCMZgiUuX74mR/X44ylDcHTOhsRYOMPrYcjNNPEwPGUeIyPACAQaAZ47A41o3K+PacJ4hoxD3KflQI+AacJCNN7p02fDcMmMugEKE4dFHIyFhQXH+P1gGmAEGAFGwC0C/D11i1zolmOaMJ4t4xC6NO52ZEwTBnIsUbehICYOCxTGwsKCYwYCTBNMCYwAI8AIOEeA507nmIV6CaYJ4wkzDqFO6c7HxzRhYMaMug3tMHFYoDAWFhYcMxBgmmBKYAQYAUbAOQI8dzrHLNRLME0YT5hxCHVKdz4+pgkDswcPHhCrvnvRDxOHBQhjYWHBMQMBpgmmBEaAEWAEnCPAc6dzzEK9BNOE8YQZh1CndOfjY5qwMGNG3cJCxpg4LEAYCwsLjhkIME0wJTACjAAj4BwBnjudYxbqJZgmjCfMOIQ6pTsfH9OEhRkz6hYWMsbEYQHCWFhYcMxAgGmCKYERYAQYAecI8NzpHLNQL8E0YTxhxiHUKd35+JgmLMyYUbewkDEmDgsQxsLCgmMGAkwTTAmMACPACDhHgOdO55iFegmmCeMJMw6hTunOx8c0YWHGjLqFhYwxcViAMBYWFhwzEGCaYEpgBBgBRsA5Ajx3Oscs1EswTRhPmHEIdUp3Pj6mCQMzdiZnQztMHBYojIWFBccMBJgmmBIYAUaAEXCOAM+dzjEL9RJME8YTZhxCndKdj49pwsKMJeoWFjLGxGEBwlhYWHDMQIBpgimBEWAEGAHnCPDc6RyzUC/BNGE8YcYh1Cnd+fiYJizMmFG3sJAxJg4LEMbCwoJjBgJME0wJjAAjwAg4R4DnTueYhXoJpgnjCTMOoU7pzsfHNGFgFhYW5t856tM+H0c7f1hH5y7+Tv/++6/zJxGNJR555BFKnyYTlXq2ErUo2DHKLTFxWFAFCosbN27SyZMn6fTp03Tx4iXZQNq0aSlLlsyUI0cOSp48mdUox4IagUDRRFAPkjvHCDACjECAEeC5M8CAhkB1TBPGQ2QcQoCYAzwEpgkDUNc26meunKIea5vTH+fOBvjRRE91T6TPTMMqT6csj2WLtAEmDgsif7G4d+8ebdu2g06dOmVVahPLkSM7lS5dihIkSGBzl5OCCQF/aSKYxsJ9YQQYAUYgphDguTOmkI477TBNGM+KcYg7NBtTPWWasJB2pfpee26ZOMOkq6GCWV/caLu69PnLxGFB4w8W//zzDy1btpwuXbpsVRhBLH369FStWhWCJgSH4EXAH5oI3lFxzxgBRoARiF4EeO6MXnzjYu1ME8ZTYxziIvVGb5+ZJgx8Xam+Q919/qeTovcJRVPt9Uu0jVQNnonDAt8fLLZt204//njCqiwKseefL0BFixaJQk7OElsI+EMTsdVnbpcRYAQYgdhGgOfO2H4Cwdc+04TxTBiH4KPN2O4R04TxBFwx6r6k6ZCElsz/FuV/4iV6MmVWunnvOv14/hitP7KErl2LWKqaIkUqKparPL34ZEH65dIJ2vXjhkgl9okSJaJXcpagItlK0e37t2jniQ30/akjFBb2wCd9RUWqHkjigG3BtWvXKGXKlBQ/fnyf/QrWG26xgB364sVLPIb16KNJqVSpkpQhQwaZ/ueff9GOHTvp77//NvOBhho0qEfJkrHNuglKkEXc0kSgh4F3a/36jeJ9D6OKFctTwoQJA92Eq/p27twlNqhOUq1a79Djjz/mqo6YKnT+/AVavnwl5c+flzfIYgp0buehRSBY5k4nD+DixYu0dOkKyps3DxUvXsxJUc4bBQTc0MR33x2jnTt3y+9e9uzPRKGV4M/iBoeYGJVkkuLFc9QUymDdj3WsG3NOvHP79h2gFClSUMmSJRy1HUqZA0UTMMHFejFJkiRxFh7Hqu8lR+UO5ziuRZluVPeFphQ/XnhmNIzCaMW3C2j8pg9tQapaqAF1Lt4v3L19Z3ZTryUtwqUjIduTuWhm7VWUIL6nTfPF2xeozozSdFdj/vQKwAju6vq9nhQu7i9xgCi2bt0uF8C6RPn55/OLj11eqlOnlu0C/ssvv6L58xeG6493Qo0a1alYsSJ048YN6tv3fe/bEV4PHz6EEidOHGEe/aZbLPbs+YyOHv3WrApMeq1ateh//3vUTEMETubA0N+9e9dML1y4EL300ovmNUeCCwG3NBHoUdy/f18sHEvLajdtWkupUqUKdBOO68MGVaVKVWW5hg3rU+vW9vOX44qjqcCIEaNp5crVsvYdOzbTo496vp/R1CxXywg8lAgEy9zpBPwxY8YLRn25LLJ16wbh+DW5k+KcNxIE3NBEjRq16fff/xAbrPlo6tS4qd3qDYsbHLzrUNdnzpyl0aPHqksaOnQwJU0adSYNG9jLlq2g7777jr755iilS5eW8uXLS4UKFaQ336xA8Xww7pcvX6EZM2bRqlVrzLax7m/TppUsbyZGEjl8+Gtq27YjZcuWlRYunBdJbpLmpe++25zQb7QHIaYKqVOnply5ctKLL74gHDdnV8lx4tcfmoAAcPHipfTll4cIeCJkzvwUlStXhrA2CxbBTlQehCuJevHhOcPVPb3JKsqV9jmZfuXOZfrj+q/0VKqslCJxSjPv+L1Dafn+WeY1IuVerEZ9ywyVaff/vUfHzh2lrI8/Q6mSGJKo3b9spf7L23mUSZcmIy1tsktuCmAT4MSF45Q04f8os2gPAe2/PaGQjNv92dM9YnVsf4jjypUr1L17b/GCH7NrWqY99thjYuIYKCdZPdOmTVvoww8H6Um28a5dO1ONGlWF9/SLgimoZpvHV6LTxbhbLD75ZIHcUVT9KFu2jJws1LX+e+zYcdq1a7eZlDFjRqpevap5zZHgQsAtTcycOZt2794jn22VKpX9HlQwMurwy1C7dn25iBo0aIDUIPF7oNFYwbp1G2jw4GEEqczcuTPjpNZPNMLDVTMCAUXA7dwZ0E44rGzTps1iXTJYMg3z58/xe47Yt28/TZkynXLnfpZ69+7hsDehl90NTWCdiPUihD4dOrQNCVDc4GA3cJw+1apVO481uJMNpiNHjlKXLt3p9u3bdtVT2bKlqVevHuEYf6z9mzdvLb/9dgVHjhxGRYoUtrsVLg2MZdu2UWfUsWmDzZvIQq9e3aly5bciyxY0993SxJ07fws+rCd99dVh27Fg42LkyKGUJk0a2/vBmOhYom7HqA+tNV2wzGE0fEsvunLVOH4Lg82aKSfNq7dejhuMeOlReT0wWN3+AD2eNDXhXuUphejWrZtyt2pxqx2UMfmTss5y4wt4SMg/qD6eSj1TXtbTYVVD+ubkQRnvVmkwVcpdQ8a7r29FB4/v9GhLXUQXo44JomnTlqZdNibR114rJHfS/vjjTzp06DCtWbOOfvrpZ9mVefNmeexwKUY9W7as1KdPT9XdcL9gZKFSi/Z++OFHj/tw3NajR2+ZNn78aA8JGXYBn302l6MPrdsXZcqUaQSmRYUmTRoJafr/1KXH782bN2nOHGvXEOopzZq965GHL4IHAbc0MXDgENqwYZP4mL1L777b2O8BBSOjjkGB7m/dui3MXVL4PcaYqODq1atSSsZOHGMCbW7jYUbA7dwZ25gFco7ANwDfAkj+Jk+eGNtDi/X23dIEtLfSpEkd6/0PVAfc4uDd/pIly2js2Aly7auY7agy6lCPfuutqgSmGxtJYGyffjqbZNphajB06AjZXPv2balu3VoeTUO7dceOXQRB3HvvdRJar0Xp559/oWnTZtKBAwaPsmXLeqnO7lHQ5sIfRn369MmmivetW7ck37F9+05x8tJp2RIk9NmyZZXxYP/jlia6detFe/fuk8P76KMPxAbJa8IE4RGhHXGEevbsK58nNCP69u0V7BDI/gVMoh7RaOc130xZH3taZqkw6UXJjOMiU4YstKjhNpk+44sJNG/3BBlPIGxNN7U/RIkTJAl3D8zm9i7fUsJHEtGPF45R89mW5FWX6nvfkxX99ye6GPVFi5bQ+PGGGlKXLh3pnXeq683KOCbXWrXqSUKpUOEN6t+/j5lHMer+qDNduHBB7JgZ7W7fvsknc2w2GknE7YsyceLHHjW3a9fG49r7wml+7/J8HXMIuKUJp4w6NqIiYh79ZdQx+eG/U98RkfUrOp6E275i08CNjZz3GGJjzN594GtGIK4j4Hbu1Mft9F10mh9tuZ03ojJPMaOuP02iQNCEZ43GVVSehV252EoLBA66ZHnEiCEEhg0hqow6NGEhFUfYtm1jOF9J8+YtEJtLU4UJ63MEhliFs2fPinV9fXkJ5g9MoApQh3/zzbflZYsWzahJk4bqls9ffxj1Tz/d7qH6jkYgYa5du55UjVcauT4bD6IbbmgCgr+yZSvKUYAGvJ1Tnzhxkho1airvQ4swZ84cQTRi+65gAykgEnX76o1UXdJdaWpBoRJ9Rd5oVa4n1X3ekJxWnVGULl0+L9PfrzaOSme3CP3U5Z+o0QwDeNimz627TuabtH8ELdk7XcaL5itHgytYu7P/PPiHSo3MI+95/4kORh2TYpUqNeSL4M2Ae7f/2Wf7hFqGMYGsXbuC0qZNK7Mwo24hFRljb+U0YrBFwQKgQoXy4XY6oWEAjQZMksWEbb8K8CXQokUbqZkwYcJY2rx5i991nD59hkaNGkNZsmQWUoMBqin5i0kenvCLFy8qPgbGRKEygImFPwOosb3yyssqOSh/nU6emzdvpQULFslngB1u2EE/8URGOTZ99xcJ0BCBGcTnn38p8YCKUsGCrwiHKq9LbRAdkIgYdaVeifzdunXxMDOBkxbchxo+fCPA7gxtVKxYPhxT+957PejcufM0YEB/aesER3HwvYAxoEzbtq0pU6Yn9G4JbZj3CR9uaMVAgwVh1qw5YlyfeuTzvqhXrw6VL1/OIzmqfYUmTadOXeXGxpgxI2jSpCm0f/9BKRlYtOgTypo1i0e96gKqYePGTZALjx493lPJ8hebftOnzyKoAp49+6u003vuuTxCa6gJPfOMsfHqUYAvGAFGIEIEnM6d/2fvOgCkqLnwExFBUVGposCpKEVUFBAF6SAgRYo0pffeRKpU6UV6Paq0g0N674iIoPwKCgoIghUsgIBKEf58GTOTnZvd25nbg73lRbnNlGSSb94keTWqsgMHvhRj6EI59kDbFxWVjfLmfVbOaU6WO3CLW79+I+3Zs1f8+1Rq+XA/xhd9DkT9aozr06eXCFC2VY6L0L5169aFKleuKP074acODaNuqj5jxmw5VtepU0u6zkyZMo3+978vUKVYHL8kF8gwE1YJdfbu3U/606IPSCoQ2ttvd3blw6vqjIRfLzSBWEZ4v1AG2U2ZsQ7asGGjMP0+KBVCYCqx5sB7CiT4vtlYesFBbzPW4JgDQe9wnWzduoVwPXtF3hIso47YSl279pRzemzsQr16mcd8jO8lffp0wjp2iXldrd3hAw2NtR3nmTPnCM16tFgzFBDa/hFmOX+ZQIy6ciHEGgQutNDg6wIKJ0Ydz1HuEgjW3KpVc3+PDqvzXmgC1gtwXQA+ELY4KWIaNWpGhw59LQNXJxUsEp1RVxr1y/9eolIj85iEoEzYr4ko7cWG55DndVP5v66IRf0ddxECxFUdbzBYOkNef35FOv7DNwQN/Lp2+yjF7XfSn5fOmX7xRYc/KTVm5gP/yyQGo35Yk9JER08mLGgDJUykSPjI1EetPvakrFE/fvy4jFYJU7mEJOACcxV/DIa9bjXAYjExY8ZU87Luxw9G7J13DLcA3ACGq3nz1uagHIo6sAApX96QntotGmBJAYYHfVu9epkZkAQCg6JFS8k2L1sWSxkypDfbH44Zt4NnbOxSKbxw6svmzVYAs4MHD0nXEaf7cG769CmUK1dO8/KVK87B5NRkihuxsKxY8VWzzPLlK03zNfPkf5kqVSoLAVpnn9OVK1eTwjcsdHbs2OlzDQeYDJYsiRGB7O4zr9WpU0+amU2YMEYGcMGFkSNHy+CS5k0OGbsVjpu2/vLLL1SlSg1Za758z/n4ZgUydYOwonv3XoQyEFapdFS45+DbUKaDWICAdlUaPXqkFFSoY/5lBBiB+BFwO3aiRjBkffsOkJVjvLn//jSmHyzmktmzo01hP276888/pVZQ/15l4f/+QNDWpElD85Qa4xBsCgyCShDcIZaImhftpuowA8YYhZ1cwOA7pQ4d2glNY3V5SV8j2e+Fm164C6jtbQ7VsReaUAH+mjdvQg0aWBpaPTgomMkrV65KYS3aivc7btx7joxLqPqSkHq84KA/T30n+CYWLZov1tXJXDPqCEBWvLghLLdrWyEIGDhwiFTm1Kv3hggUazG7YMLBjENwgnncniBog1IIbVuzZrn9cpxjf4y6eg7GgfHjR0vhGQrHx6hfEWslaJEhLANzX7RokTjPDMcTXmgCQQBHjRoTx+pB79+LLxr9Bw7AIymkRGXU8+coQiMrRUsctny7jvouaWdiMqFeDOXJmJcuXf2HSo96Wp5XPusbjqyizPc+QrkzCP907XqNQo2pTSEjAEmlKQXprNj2TTH8py+eoiX751LLF43FdkVx3WlbuMRg1OG/0rNnb6OfW9aLQBOpzH4Gm1GMOiSg+Aj9pUBR22+26fusWbPpwoWL/pru6jy2pkB0xmDS+fPnRTRHgyHTmT9oc/v1Mz5EDG66hE2ZMWEbrQ4d2soo+gmtA21V0jqdmcFWdFWrGowU7pk3b7b0fUJeDeJRUcFF+ESZm5m8DJ5obyDTd1g81K3bUDKGWPjBjx1WCSdOnJRaXSwE8f7mzp1FmTJllN3H5GOP+q6kqbgBGiJdQ/3pp59R27YdZVlYLsA8LUWKFLR79x7JrOJC584dRFAWK0CjWsTiGspUrFhBbv8IBnf06HGyvfaFrxOjjvJOSQkxoJWfNWu6MLUz4ji4bavOqOM5PXt2k4w0IjRjvPAXpdYfo64WfC++WJAGDOgjXWjAtOM8vimMUbrpn1Pf+BwjwAj4IuBl7FQRvhs1qi+tWaAhwjzfpk0HKTzDeWWhBZP11q3bSyE0xhQIpvGtnj79q2AQ1oqI1DNlg/T4OPoY9+abdYSPbnk5xmLMQGTk+Bh1VIhnwY0vd+5cMsjtwoWLZcRlXNMFljhm03egYCUvNOHEqOMd410ijRo1jDB2g7nE+gJCV6RwFoh4wUF2Svw5d+5Pub7CHDV06CBpQfD333+7ZtRRHwIhT5w4RVbdpk1LYfXxuHDXvSh3ctq+fYc8HxMzV0QQzyLz+NOzZx8prHLyXcd1WOXB4hZJX5/KEw5/nBh1WLfBMg/rIET6V9YoKK4z6itXLhVzvhH1He3+/vsfaMGCRdJPHhr9kSOHmspBh0eH1SkvNKHW0+jItm0bHXe5Uox6VFTSWHOjL4nGqN+ZMiWtbr1XarqhHX91XD76V0wkKs1vvlHst55VasErjMlPDYq3p0b5hRYH9459ngZVn0IvZi1Cusa9XbneVD2PwbwVH5WLsmQU0Yr/M4WvPac05RZ7uKso8k1iqtLhE1+qx5m/icGoqwU3NE8xMfPMZyEDHzFMrE4pTZo0ZuAHxag73aef2xJAEHCzGXW7r7nebi95NybwLVu2kVtp6JMRmHQwFtC0w9Rl1qxoM/p8hw5vCRPrPSL6oxWNMxR1YDDFoKovoFauNKJrq3bomgY1MSQVkyQvgyfefSBGPSYmVjC+Y6VJJywiUoqxQyVIuSH8gDRYx83OqGPvcpi+IQ0c2F9M0sVEzkoIIoKJ1gnnVavWSGk5Jr/33zcWsyipFrHQyutmn7gGEzQsfOEDBV8olYJl1PfvNyw6UE4X3ODYbVt1Rr1//97CR8uw0EBdgZI/Rl1Zf9iFHfB1+/zzz4VW5nbWqAcClq8xAg4IuB074dJSocJrsiZY7iiXIZwAY3bkyFEZWBbzChLcpxo0aCLzGMf0xTxOKoZC1wiqMQ7ac7v7C8oEw6jbxy/4VMK9D9ZNEIjqQZuYUQeqVnJLEyjpxKjDtapHj3fimGXjftAJ6CUqKpsPDeFauCQvOKi2Dxs2Um6JpmtIvTLqqFO3ylPPwG9UVDYp7Ehrixau5kundQfKgQcoXLg4snJ3lfj8ou2MulpTQiM/ceLYOJamOqMuH+Lwp0KFV4W2v50nJaJDdTfklBea0NeFsHDo2LGdj6JCjyWGTuzatd3n+g3pmMuHJJqPOqS+85ttpIeEVhzR4JsvqkFff2f4L6k2KpP4C5cvUO2ZJWl5s4/llmsdljekfd98RMNqzaCCWQr7MOotX+lOtZ8xzLZKj32aYppulVHjVxxcRCNW9aLy+WpQtxKGBlWZxqvnqd/EYdQ/kOatTz0VV9MErSC2bHJK+oetM+qQmvlLq1cv82Fk9PtuZUZdacgbNWogNbIgbgSVgKkg9rGExUPr1i0IWgP9Y9YFH6GoQ0n0dBcGtUCCFhLBSnRfpU7CnwaaYJil5cv3vP46wzLvZfBERwIx6kqgAgk2fLXtaZ7wcR8/fpLUkIN5RNLfIc4py4kRI4ZItwl7HeWFS8KZM2d8hDXqHpzHdaStWzeY35daxCJyaKlSJdTt8hdMPxhq+NFDAKRSMIw6XDLq1Wss2zNwYNxt3Ny2VWfUg/XHQ3v9MerwJd24cbM01cNEh90r/O3aoPrNv4wAIxAYAbdj53WhES1Vqpy03MF8As05TND9BYhcsWIVDR48zGd+CdwiSxipC6z1MvEx6tDcQoNrTyoWT1SUr9aKGXVfpNzSBEo7MeoQZGPuQYJguUaN6jKWiD9rKnljGP3xggOarweAg984TP6RvDLqSukmKxF/oHxDEGho65Eg/MIaI0OGDPIYf9Q6YdiwwXFiQOA6vuOXXiqKrLREA58QKO3bZ23PBqE7TN7BpE+dOpEefjhznKI6o47+q7kabpW4plJS25bMK00gnodSGmJLvJdfLizHTCjm1LoGaz7wWbBwSAopUTTqUxosoZzpDX/0dzd1pQ37lsbBYsybcynvQwXk1mzfnTlG2dPmoM9+/IQ6zqsr71V1QMNe9r1n5bkqBetSxyLvyPzmo2tl0Dkw+hXG5iMwZm8UaUnNCxrmrXqEef3hicGoK5Nbpxd/6tQpYVprtFm1A9pdJH2Rrhh1ncFT9wf7eysz6ghE1rBhUxk4DKZBwBia2Fq1ashIm6+8UsHcEgZ7tzdp0oIKFMgngmmNMuENRR0wP8SzMLBv2bJBmA8mFwNFCSmNhb8wNB7QfOjX0AB/Zjpm48Ik43XwDMSoK/NOf4tFJeGGiaUK8qIz6jo0OqOtzuuxCtQ5f7/Q6CsNlZqAo6Pjxp1Q5ul2LXx8jDraDbNVaNSxxQvM5fTkpa06o/7xx4Z5nl6nv7w/Rh3CxZYt25r+jSgPTIoXLypcTEr5LFL81c3nGQFGwBcBL2On+kb1msAclyxZXPqapv7PXQbXhw4dQcuWrXC0GtLL63k1xtljgKh74mPUnSyUUBYmtzVq1JHV6GMyM+oKWePXC004MepgBseNmyjMnGPMB2A9ivgqGLMLFMgf1ibPXnCAphrrqaNHv5Xui3BjVMkLo44gs2+91U1WgTgOUBqkTGlY90EQAmUPfjHnY02g3FCVssVfRHUwhRC+I61du0LEtEkj8/7+KEZdv25fq+rXdEbdHkwOGAEfrL/wGxWVTboQQpka7skLTag+YcvnKVMsBYo6j3cHOkG8AcRtgGtOUkghZ9SH1oyWJuvo/JTd79G8HdY2BjogvaqMojLZK5inrl67QuUn5KN/hG8J0oIWm4SfehaC73n1CS/LcwVyFqURFafJvPrTMrYWfXVsnzzs+Go/qpLb0Mg57feOmxKDUde3Z1i3blXAPZTx4cAXGowczFjy5jWEEMyoqzdq/boxfVcadMUgL14cK7bSmCpMqo3AV8qsHX7qK1asFpPaBOl3jP3uVQpFHahL7amJAF0pUtwhfcSwLUezZk2knxFM4xGdG374jRs3FxrgF6UJvmpHOP96HTwDMeqYxDCZ6W4LOgZ7934q3lUnn2As/hh1p10XfvjhRxHoxRgXMOHddlvcSQrSVlg6GH5pj8nHB1rEemXU4dseE7NYThJjxoyMox3z0tZQM+roPIJSbd68VUYQ/vzz/frrcHQt8LmBDxgBRiAOAl7HTowJ69dvkN8jGAWVoGXDQjMqKps8NXDgUFq1arUIHFWXWrRoKs/F9yfQGIey8THqjRv7BqdTz9PHpI0b15rxN5hRVwgZv15owolRV7XCog9rSQh4MKeqBEsMCMKVtlWdD5dfLzgsXbqcYPYOgcSkSeNkTAXVH6wDoYxBmjx5vNxqDYJ+xXir+/RfRHSHUqBatSqCYTcUfvp1zIlQwiBhbZcv33MyP3bsBCkgAWOP9YM9KQWQkyLPfi+OnRh1nO/Zs6twhXkVWZ8UiFFXN+p+8k5uMeq+cPr1QhN6+xEbCnunY+ca7I4BiwK4R8DyCHSDeESdO3fQi4RtPqSMes/XRtIrT1SUnV28/30at86IVurU+7pF21DTF6zgcu9u6iY07x+Yt27r8rU0hT90WvhyzjKCZGRIl5kWN7QijH58Ygd1jTF8slBQmdNf+fcylRz5lFmXnkkMRl1nGpo3943EqT8bed2PTJeuMaNuR4rEoNcq7skAZ9QWFFi8QJoGraWS5s/7z3wa5knLl6+QA7LTgBWKOlatMnyemzZtJCw9rkt/5kmTxkuNvjLVgoAgXbq0gjmdICcFTA5JIXkdPAMx6son2x75XOGhInlCoztokDGm6N/c0KEDpbZAScN1X3bUAQFMoULFZHX2QDDypJ8/gRaxXhj1jWJ7vt69+0uBA2jvwQcfiPNkL23VF8Wh0KjbG4U4AQi6N2fOXGmpgus7dmz2WRjZy/AxI8AI+CLgdezUazl79hxt2rRZCHxnS0ZMd6P64INlMuCjG8FvoDEOz42PUYfGFgG87Gn3bmEd2bGLuauKus6MukLC+PVCE4EYdb12WDWsXLlaBkjDeWxFBQuIcExecBgx4j2x60pca11//cO6EFpUf6lkybJSgaa7pNrvVcGCdTc9WLHAmsWfhhbvYNCgoaa1p71O+7HOqK9atUwK9hHLCMnJui8YRh1lVf+cXPlwPdySF5oIpg9KkebPAiKYOm7kPbCWCRmj3qZsL6rxtOEjs/7wShq4zAjs5K9D99xzL61u+am8rGvNceLZ7AVpbJU58pqdgV/TYR+lTpFa+r6XHp2H4IeBhAilGzsckMy9nYGXN/z3JzEYdVQ9e/b7QnI3TUr3YOLstM0WtOl9+vSXknHdjBflmVEHCr7JLaOuGCEEkQBzpwf6gtkPIotjb9jly1dKZgn+/nYfrlDUAXeH1157XQ7cF/+LvLl+/SqpPVUWFfCdj4qKop07PxIDsW8UUV8UwuvI6+CpGHX4zsHvWU+YhCaKSKuY6OxbyAAvaNMxeekLDZ1RVwIvLF6nTZsuqx4/fgw9/7w1Kbdt20FuW2Zn4nEzmGO4QyBhCzi1ZWKgRaxbRl3RH56BWAWB/NTctjWUjDpcN6C1w3fxmNgvXf8+dNeauSICP65zYgQYgeAQcDt2nj9/nn755ZRYU6QSDK+vbyp8LRFLPIUO1QAAQABJREFUQtfSKZcunJs3bxZlzJjRp2EQVu/d+5k0h65a9TV5LdAYhxviY9TxLMxf9gBbQ4YY27fBRP/dd/uZ7VCMOnx/7YF3zZtuoYxbmgA0Tow6NKbQ+D744IMywKAOIbSH0D6XKFFculvq18Il7wUHmPnDcsApYX2gXEzhtgX+ANrTQIHc1LzrTzOum9OPGmVE1sezVVwi5O3CAPAnsOZDMD+ntQ/K2JNi1KOijPgOWAN16tRF7hF///33C4H5DPG9PWgWC4ZR1+9ZuHCu3FXHrCBMM15oAtbNUPxgDQerVfu4BO16ixZtZI/h858nj7NCN9wgCQmj/mbR1tTshfayb9CAv73U2ezq/PmzclGsQFAacBw3X1yDDh3/nBAtfnnL3XIP9avXrkrN+PXr11QRal22J9V8ur48nrtvGk3dNFzmJ9ePpVwZnpb5hgsq0bfff22W0TOJxagjInK1ajWllBuTF6TMYBTUQhe+pyNGjJaRp9EeZZKt2saMukLC+nXLqOtRclFL167GXrDIQyr16quvmeZgTpG8cV8o6kA9KhIo8vbFitLa41r69OmE4GAJsjLBzHjw4KGUI8eTcosx5UuEgCYQ8ly79i/17dvbFARBsBAdPVMGO9O36enTZ4AIJPIjdenSSW6bo+pP6K+XwRPPhKvB/PkxUsMCP3CY/aukM7GQ+CNiPkzUoMmdPn2W8KmaL2/VLSCcGHUw3NgXfMeOnXIBO2+etVhVUm9UBKsKaJ2A7aVLl6SLBMzRMQkuXx5raooDLWLdMOpYcCN+AiZLvA+1SEZbnJLbtsbHqPujKeX/mk+Y8Kl91C9cuCCDMKJd2Cu2Xj1LA4MdFBC0D+PbunUrTZyc+sDnGAFGwBcBt2PnZ5/9T1iVGesqmPY+++wzskIs3EeOHC2ZL12jjbGsXr1Gcts2MCcY57Cgx7j44Yc75QIWFSjrLuQDjXG4Hh+jjnsQV2eI2I8Y4yfaBg0iNIxI/fv3EeNJSZnHHwimu3TpLo/18dy84RbLuKUJwOPEqOuRwXVrLcw97dt3lkyrP4u1cIDcCw6B2o1tgkuXLidvsQdY9beWgnZ+hNDSI0HbWrVqZXPfeazf33mnr9xZCNftMYWUph3m1dgFJl26dFKJiPULLNGQglXI2Bl1lIUlTf36jSTDj+9t/PjR5vyrM+F2H3Uoir788qCIxTROCuDxja4JYi93PPNmJ680od4FrI2wC45aa+IdQukDRQRcQTAOquRvjeTvPNazO3fukmukYsWKyGrgojRgwCCxdr1TPLdvQBdo9dxgf0PCqKv9z+N76Dvr2tP2/WvN257I+hRNrRErteA4icBxqe5IRbeJ/5DGfzSUFn1kaMjkCfEnVaq7aEnzD4VW/R55Cmbu8DlNniy5PP78p73Ubq61uFTl1G9iMeqo/+DBQ5JRgPQMCQvaXLlyyIlTncN5pwAsilHH9UAp0EJf13ht2rQ2wf5Ibj8UFWkxUPvdXHPLqKNuFawN+djYBT6aCCVZxrVA5k2hqAMaYmWu1Lt3DypXriweK5PShuCgus1PpmdPY09OXNMDm+mTSPv2bWSQPNyjCwTgf586dWofHyf4NMG3KVTJLU2o527Zsk0GY1HHUVHZBBM+VXzPRrAWFUVdXUfQDzDwKmEhCP8ilZwYdVzDBN24cTP5zWHShH+a8kvD4jY21nCvwbep9mqHPxvSiBG+EeMDLWLdMOpjxow39xX2p0nHokCnETdtjY9R90dTTow6cNBpF4IkSJ2hsfnyy69wWcZaQMwFTowAIxA8Am7HTgiXG4sYJkoziDERYxZ8XrE4R7LH9fjpp5+lb+6ZM2fkdZTBOTXG2bWqgcY4VBAfow6BgGpfVFQ2OU6oZ9nnNtSn+8riGJaF2IMdzMetmNzSBDByYtSx9qtVq675nmGdds899wgLik/lOUMTOz2OhjFcMPeCQ6C2B2LU/a2loDEHo4Wt7pCwRsD3A2EHmDuV4H4HNzw96S6tOA/h98GDX5vvA0oUKCCCSU6MOsodPnxEMOuNZRUQ9oMXQNIZdXnCzx/0B1rmpPKteaUJ+KW3FMFwkdDnZ555WghcbpPururczJlTRUT/LPIe/PG3RnI6D4a/YsWqsizGP6zTkZSwDHmMaYiXFIoEQWtIGPUP2nxEae8ytkUI1LAea9vQzgMbfG55PEtumlR9Ad2Z3Fiw4yL2Th+3cwgt+XiWz73qIJUAf06DtZQhdSZ1SprCw+S9W4yzNl/dmJiMOp6BjxoDKRhve4qKyib9kTGI2pPSVtnP248R5KJaNWd/Zp2ANm9eJ4nUXt7NsdsPJRwYdQRqwwfjZFoHTWvXrj0kBIGC/oWiDhUADQ9bufIDnwny7Nmz4iOuJNthZw63bNkqBw1MEBgAYLKFBJOeFi3aSg0wTKcffTRKnoe0FkHzdP9tTDgQNpw8+X0c5lMWSsAftzShHoVF5/z5C2nRolgpFcZ5PcgQjjFBrlmzTmpdcIwE94Xy5cvGmRhhoo1o+kj2d3lCRC1XWyLq+wND2wPzTwSM2b//gCyLP5CuQntsn8BUNHpdYKIKKW0XhAH69mxwr4CAQdeAwT8NWqZAya69dtNWffHr5KPuj6b8Mep4V4izEB09w3xXaHtUVDbBBDSUJpQ45sQIMALBI+Bl7IRV0dSp06W7lmKA8USMWdD6YZ6wJ4w/MTGxYpzbZVqQQeBWpUploXF/09QSolygMQ7X1TZrWLNMmDAGp2RSpu3YtSJTpozCQnCsOVZERWUT43UxudtK8uSGAsUoZfxFnZi3lOBv1CjLjFi/71bIe6EJJ0YdWGEeGDVqtLQoU9iBUSlc+CVhOt0hpBo+VX+ofr3gEOjZ0CRja0Mk+zrD31oK92LuW7hwkVyH4DtSSTF88E1Xay91Tf1iTYFo4lh36QkKpzfeqKWfCphXjCa+bVhH6Em5ZuIctMWlS5ciBE2rWrWGfpuZR7shaIfJP/gGJ5dc8+YwyySEJo4dOy6DOusCFnQPwYQ7dmxP2bJl9emtvzWSv/OdOhnbKuuulBBYYkcf7AYwY8aUOK5HPg90eeCaUS8+Mqc0b3L5nHhvT3PfA5Q320t09PRB+v7nY/HejxtgJp/30Zfootii7atjn/qY1TtVAL+FrZ0POV0yzyWEOMxKRAZSEEi6fvjhBynZhCQcEs6klNxiEQ6MelLC119boS3GAke5Taj7wLwhKR9qdR4mj2qrEHUO9If7FaOvzif01y1N2J+HdqG96FtK8f06JTDh8POHj6W9r073ezmHiRwCE/gw2bHzUl9ilglFW51oChGiESlaN3239wPMwR9/nJEmtP7el70MHzMCjEBcBBI6duI7BOOOxXYw4yKYDoyjiPQd6rWHzqhjm0kkKAoQPBVCgWASBMokrCeVVVUwZSLtHi80oVznENkfEf7tCXPsr7/+JrWIMMFOCskLDgnpl7+1lF4nvjWYM+P7yZgxQ5z1mH6vyqNeMIcQmtx77z1SuJ06dWp1mX9dIBAKmsCYeeTIUfktPPTQQ9KCx18TnNZIuNff+csi/kCKFCl8qguGrnwKBHGAcdw1o15rdin66dTJIKoPv1seypCFFtbfFLBhoSCOgA9IQhfdYjFL7F0IH9dQJCwsnCahUNTNdXhHwC1NeH8Sl0xMBDCBYcH3idiWzl+8hsR8PtfNCNxqCETS2OnEqN9q7zMU/XVLE9AGN2zYTJpU9+nTi8qWLROKZtz0OtzicNMbzA1IdASYJgyIPTHqUz8ZQ3O3T0j0l5QYD9CD3vmrn4nDQsYtFsePH6ddu3ab5nZWTe5yadM+SAULFoxjnuKuFr47MRBwSxOJ0QauM2EI6AGdUNOtbHqaMCS5NCMQPAKRNHYyox78ew90pxuaGD9+knTdUvXZXerU+aT46waHpNg/brN7BJgmDMxgIeNao46iSVGrHow2HX1j4gAKRmIsFBL8qxBgmlBIJN1f+Ij27TtA+tphyxg9KnPS7RW3nBEIbwQiaexEHBfs547AuBUqlA9v4MO4dW5oYsKEybR69VoZoLhx44aUUwSyipTkBodI6TP3IzACTBMWPp4Y9RNnjlPXFU2TjAk8mPShlaZR1vuNAFxW9+PmmDgsTBgLCwvOGQgwTTAlMAKMACPgHgEeO91jFuklmCaMN8w4RDqlu+8f04SFmSdGXRWHGfyWr1fSqd9+TJQAc+o5Xn4RbCVD2sxUIkdFc4/3YOph4rBQYiwsLDhnIMA0wZTACDACjIB7BHjsdI9ZpJdgmjDeMOMQ6ZTuvn9MExZmCWLUrWoiJ8fEYb1LxsLCgnMGAkwTTAmMACPACLhHgMdO95hFegmmCeMNMw6RTunu+8c0YWHGjLqFhcwxcViAMBYWFpwzEGCaYEpgBBgBRsA9Ajx2uscs0kswTRhvmHGIdEp33z+mCQMzz8Hk3EOedEowcVjvirGwsOCcgQDTBFMCI8AIMALuEeCx0z1mkV6CacJ4w4xDpFO6+/4xTViYsUbdwkLmmDgsQBgLCwvOGQgwTTAlMAKMACPgHgEeO91jFuklmCaMN8w4RDqlu+8f04SFGTPqFhYyx8RhAcJYWFhwzkCAaYIpgRFgBBgB9wjw2Okes0gvwTRhvGHGIdIp3X3/mCYMzK5fv+5tH3X3kCedEkwc1rtiLCwsOGcgwDTBlMAIMAKMgHsEeOx0j1mkl2CaMN4w4xDplO6+f0wTBmbso+5AO0wcFiiMhYUF5wwEmCaYEhgBRoARcI8Aj53uMYv0EkwTxhtmHCKd0t33j2nCwoxN3y0sZI6JwwKEsbCw4JyBANMEUwIjwAgwAu4R4LHTPWaRXoJpwnjDjEOkU7r7/jFNGJix6bsD7TBxWKAwFhYWnDMQYJpgSmAEGAFGwD0CPHa6xyzSSzBNGG+YcYh0SnffP6YJAzMfRj1TpgzukYzAEhcu/C17lTp1qgjsnbsuMRbu8LoV7maauBXeMveREWAEQo0Aj52hRjTp18c0YbxDxiHp03Koe8A0YSCaLFkyK5gcM+o8YNg/NP5Q7IjwMdME0wAjwAgwAu4R4LHTPWaRXoJpwnjDjEOkU7r7/jFNWJiZPurMqPOAYZEFY2HHgo+ZJpgGGAFGgBHwigAvPL0iF7nlmCaMd8s4RC6Ne+0Z04SB3G233cYadTsRMXFYiDAWFhacMxBgmmBKYAQYAUbAPQI8drrHLNJLME0Yb5hxiHRKd98/pgkLM9aoW1jIHBOHBQhjYWHBOQMBpgmmBEaAEWAE3CPAY6d7zCK9BNOE8YYZh0indPf9Y5owMONgcg60w8RhgcJYWFhwzkCAaYIpgRFgBBgB9wjw2Okes0gvwTRhvGHGIdIp3X3/mCYMzNj03YF2mDgsUBgLCwvOGQgwTTAlMAKMACPgHgEeO91jFuklmCaMN8w4RDqlu+8f04SBGTPqDrTDxGGBwlhYWHDOQIBpgimBEWAEGAH3CPDY6R6zSC/BNGG8YcYh0indff+YJgzMeHs2B9ph4rBAYSwsLDhnIMA0wZTACDACjIB7BHjsdI9ZpJdgmjDeMOMQ6ZTuvn9MEwZm7KPuQDtMHBYojIWFBecMBJgmmBIYAUaAEXCPAI+d7jGL9BJME8YbZhwindLd949pwsCMTd8daIeJwwKFsbCw4JyBANMEUwIjwAgwAu4R4LHTPWaRXoJpwnjDjEOkU7r7/jFNGJgxo+5AO0wcFiiMhYUF5wwEmCaYEhgBRoARcI8Aj53uMYv0EkwTxhtmHCKd0t33j2nCwCzBjPr0T8fTtsNr6PTvP9K///7r/k0kYonbb7+d0j+YmYo9UZ4a52sT9JOYOCyoQoXFhQsX6dtvv6UTJ07Q77//IR+QNm1aeuSRh+nxxx+n1Knvth7KubBGIFQ0Edad5MYxAowAIxBiBHjsDDGgEVAd04TxEhmHCCDmEHeBacIA1LOP+smz31HPNa3o59MnQ/xqEqe6TOmz0MDyEylLmmzxPoCJw4IooVhcvnyZtm7dTt99951VqUPu8ccfo6JFi1Dy5MkdrvKpcEIgoTQRTn3htjACjAAjcKMQ4LHzRiGddJ7DNGG8K8Yh6dDsjWop04SBtGeNet355ZMMk66ICsz6+3XWqEO/v0wcFjQJweLq1au0dOky+uOPM1aFAXLp06enSpUqECwhOIUvAgmhifDtFbeMEWAEGIHERYDHzsTFNynWzjRhvDXGISlSb+K2mWnCwNcTow5z9wU7JyfuG0qk2msXbhGvGTwThwV+QrDYsmUrHTly1KosiNzTT+ehF18sGMSdfMvNQiAhNHGz2szPZQQYAUbgZiPAY+fNfgPh93ymCeOdMA7hR5s3u0VME8Yb8GT67k+bDk1o8acr0NMPPU8P35eNLlz+k745/RWt+iKGzp0z/JL9vfh7701DLz9Zlp57+AU69vth2vrNavrpVGCz+hQpUlD+J4pSoagS9NeVi7Tl8Go6dPwLun79mr/HUDBa9VASx7Vr10Tf/6T77ruXsGl9UktesYAfemzsEp/u3nVXKmneDs050qlTp2nbtu30zz//mPeBhmrXrkl3380+6yYoYZbxShNh1o2QNuenn36ir746RPjNmjUrFStWJKT136qVHTx4SLrOlC1bhh577NFbFQbud4QgkBTHzt9++52WLFlKuXLlpJdfLhQhbyJ8uuGFJiJxXPSCg9Nb/OefS/J0ypR3Ol12PIf4Wn///beIlZTa8bq/k1euXBXl/qJ7773X3y2Jfv78+fN01113ebJEPXz4COHfo49Gye870Rvr8gGhogk8VjK7t93msgXhcTt4x9u+++7kdTQnU6YMQbXqlXHPxAkc16xUF6qTtzEluy0uM3qdrtOSA/No7Nr+jvVXKViXOhZ5J861j05so+4xzeKcx4moh5+k6bWWUvJkvj7Nv/31K9WOLkmXNOZPrwCM4Pq2X+in4uQTShzwy968eQt98MFy+RGoB0BbnDt3LqpZszrdf//96rT5+9ln+2j+/Bjz2F+matXKVKjQS4QPtG/fd/3d5nh+0KD+dOedwQ9gXrH46KNd9OWXX5ltAJNerVo1MaCkMs8hgyBzYOgvXTIGV5x74YUC9OyzzyDLKQwR8EoTYdiVkDQJ322nTm+bdRUokI+GDx9iHnPGOwK1a9cVwo+fKU+e3DR+/BjvFXFJRiAMEEiKY+fYsRMkow74Vq1aSvfcc08YIBk5TfBCE5E4LnrBQVEBlD1z5sylzz77H3399TfyNARLRYoUFuvtGkJJFpdBA+O2YsUq2rnzI9qz51NZBsGN8+fPR40a1fdL59euXRfC4600b95CESD5mCx3//1p6JlnnqGmTRvRww9nVs0K+Hvq1CkaNmxUwHvAL6At9vTjjz9KXmHfvv/J+RHXsW6uWPFVV8K02bPfpxkzZlONGtWodeuW9sfEOdbXOvnyPW9eB74PPfQQZc/+OBUsWIAQKDoUKSE0gedv3LiZdu/+hCDYUuuInDlzUpUqlWR79TauXLlaKA536Kcc8/fck1rwXXH5VcebQ3jSNaNeavRTcR4/reFSejJdbnn+zN9/0E9/fk+PiMBt9955n3nv2J1DKHbXDPMYmTLPVaVepYxF7ZV/L9NXp/ZTtgceozQpDUZ227EN1DvWN2J7+rSZaFHDrVIoACHA4V8PUqo77jYDxeH5lcf5N5/e1OFLnzbYDxJCHGfOnKUePd6RhGGvVx3jox4woJ9cfKpz+N2wYSMNHDhUP+WY79ChrSC0ygRJd7VqNR3v8Xdy7doVUvrm77r9vFcsFi5cJCwJzpnVlShRXH7E5gktc+jQ17Rjx4fmmYwZM1LlyhXNY86EFwJeaWLWrDniPe+UtIsJJVJS/fqNRbDEE1Iije8S0mkER+SUcAQwHmJcrFGjulhItEh4hVwDI3ATEfA6dt7EJtP69Rtp0KChlC1bVpo5c1qCLQN37dpN0dEzKEeOJ+nttzvfzK6FxbO90EQkjotecMALhMLq7bd7mGtuaJf/+usv892WKlWCunXrQnfccYd5Dpau48ZNFMq0ZeY5vRxofeTIoXEYTjD3EFz5K4fKhg0bJJlms2I/mQMHvqQ2bTr4uWqcfvHFF2jIkIE+90A40Lnz2wReAwn8hMrjuG3bVlS9elVk401uGfVgeBTgCOwgKElo8koTUJaOHj2OVq9e69gEtPHdd/vS888/Z16fOHEKxcQsNo/9ZVAWfNSNTiFh1IfUnCZY5us0bH13OnP2d7MP2TI/QXPeWCWPwYiXHOnL5C9r+zE9kOpBwrVKkwvSxYsXCI7zC1tspkz3PCzrLDP2GR8Ned9qY6nEY2Vlne2W1qPPj+yW+S4VB1HFnNVl/u1VLWj3wS0yb/+TWIw6zGdatGhjatGxuCxY8AV66qlc9PPPvxCkX6tWrTGlcNOnT/FZ0KuPAINE165v2ZttHmfKlFFq5PG8b745bJ5HBoHbevbsLc+NGjWMUqWyNNjA9cknn3SULvpUoh14/VCmT59JCCanUt26b/gVEFy8eJHmzp2vbqWUKVNS/fp1zWPOhBcCXmliyJDhYoBbLyXEkfJ+YWb3yiuG0CE6erJfYVR4vcGk1Rq40Tz44ANJq9HcWkbAAQGvY6dDVTf01Nmz54SGMbUn81p7QzEHYC6AheG4ce/ZL99yx15pItLGRa84DBw4RAhzN1G6dOlo8OABck2NteeaNeto1CjDCqtnz65Upkxpk7bAwA0bNlIe9+vXmwoXfkmsi2+nAwcOCKa+l2T0y5cvG2cdjufgeUitWjWnqlVfkwIArO9HjRotNfNgnBctmk9wzQ2Utm//kHr37kclSxYXWv/XHW9NnfpuypzZ0tBDm1+16uuSMYflXrdub8u58cKFCzRhwmTZZ1S0YMH7QmOcybFO/WRCGPUZM6aaVf32229Cc71HWBpsk20DM7t69XJXvIZZmZbxShPQjo8YYYwt7du3IQhr4NZw4sRJSRP79x+QT9m0aa0pwPn1118DBr5eunS5XL9CADFp0jitlYmfhWApJIx6oKbOabqOst1v+BeWm/CcZMZxf+aMWWlBvY2yaPSecTRnm9H55ELytbbtZ3Rn8pRxroHZ3NTpAN1xewr65tevqOnMKvIe/NG1+vZr5k0ik1iM+qJFsfJjwbNAHPiI7QmD65tvNpADAQYODCAqKUY9Tx7vZp74YKpVqyWrXLNmhfD1vktV7+nX64cyZco0n+c1b97U59h+4PZ+e3k+vnEIeKUJLM7cMuqYcP1t2QdBFVxZ3CY35eK7V7dq2bJlfbztia8+f30JhAPKyIFcjI0YH+3J7TPd3q+ej1+nOBzQQoj/Ezxp2/uljhO7fvUc/mUEEoqA17FTf67b79Pt/XhWfOON3h49b3yL1x3HAXUfM+oKCeM3FDThW6NxlNTGRS84YN579dXX5Hp68uTxlDNnDh8o1JoDjNo77/Qwr4FBBqMMZtvOJCsLEjDcH3yw2Jy38B3VrPkmgaGzr91RMbT4derUk4wqNPjlyr1iPs8ps3z5Ssk0Nm/eRJQz1uxO9+nnoE1v1KiZVHotW7bYx40VWNSo8YZsX5cunahChfJ6Uce8V0bdH48Cy0JYGCLZFZGODYjnpBeaQJVdunSTQpM336wj3RH0x+jKFSgzda26fp+ex7sFX4XfYC0m9PIJzWNdl+iMuq7prjjlBWESfUa2u0WZblTn2UYyXyW6MP3+x2mZ71N1DJV8vJzZt+N/HKX60QbRwTd9dp2V8tqEXcMpZqfBEBbOU4YGlRtvlrl67SqVGJHLPNYzicGoY1B8/fU6fj9i/fnw3+7Rw9B6L1my0DSvYUbdQik+xt6608gtWrSE1q1bL7SbpeMMvLAwgMSzceMG0rdflYV5TOvW7WW8hffeGy5M/DYluA5I7GBykyXLI3H8WObNWyhjF0B626hRA9UM+YsJBUE9WrduEdTA4VP4Bh+4HTwhhYYrBN4BBjpIW2EVgjRx4jhhQXGnkGT+QW+91U0yusOGDSYIbuBbBJOuOXNmiABtWeT9f/55nmBC//nnX0jLFNSVJ89TUmJapkwpeY/688kne0Q90cJ37GkxyFah2bPn0t69e2WdsFrBuQoVXjUnYlUOdDF37gJCefi74RmQosIdAz5vSFjINmvWinDv99//IM+pYGdFi77sYxEC4RkwgP/cp59+JiXd+fPnowIF8ktJviz8359gcND7hcl4zpx5YlLaK2tAnW3atJDahS1btgnJeoykKyw64EOHRYGTpB0Ch02bNos+75WWP+gLzO7gsqL6pdqpPx+Ywxpm585dEifdJAwL8k2btkiTRLx3YPjyy4Xk9xmsgAXth58ZhJ72hUco6ld94l9G4EYg4HbsVG1CvJeFCxeLuC9fmuMXxrUmTRo6BrLC94zvBuMN/qnvv0yZkj5zIOrv1q0nnT79q1AadJPBXXfs2CldedRiH2MtzH3tpuqzZ88VzM4OqlXrdeHu86g0Z//iC0NT9dJLBemll16U2kLVByzi+/cfKMd6ZaqrxpZOndoLy8Pc6tZb6tcLTUTiuOgFh5Mnvxda7x5Sew0Nr31eUZpV+J7PnTvLpKumTVuK+EgXhPlzvzjzm85sghlW8aTgF16njuEv/sEHixytvBCQ7sqVK5KBji8W1MyZc+RapkePrnLdajYuQAbriJkzZ1PevM86uo3g+9q8eatc27Rr1zpATcYlf4w6mNnBg4eJtc33Eh+MDUjB8CivvVZdjlEw2ccaIiHJC03gecodsXfvnj5jkGoL3j/W2+gX1jDxJazdp06NlmMgBEJOCpH46kjI9RvCqCuN+uV/L1GpkXnM9ioT9mvXr1Gx4YYkTDeV/+uKWNTfcRchQFzV8YVkOZ0hrz+/Ih3/4RuCBn5du32U4vY76c9L50y/+KLDn5SR/swH/pdJDEYd25A1adJCPgFmEfH5Z2AiRcIEqgaXYD4CWSjAn5utUT9x4oQ0gTl71vCfCdDUgJcwOMJtAAxvMOnDDz+iXr36yA9pypQJZhFd44nI0d27W0G/Dhz4SjA17SXjAlOhUNSBBQgGKiS7RQMsKb7//gf5zpcuXWx+7GD2Spc2BFGLFy+g9OnTme0Px4zbwRMmQxBeOCUwdmCEEVgF0mCk557LK5lFdf/s2dMJjPUvv/wiXUvUIk9dV7+NGzekevWMOnAOC9V33x0sJxow+JCE21OjRvV9mGowlB07djED0mCCxztTSd2PybhUKUuYqK7jF0wlLGqQEKcBwiC9Dnnhvz8q3oQ6FwwO8fULbS5XrqycWFS96hfmgdOmTZI0qM6dP3+eWrZs69hGvBtMTEpQgjLq+XgOFjvqfeBexajD5BAaAyQ88+rVK+Z9CBT53nsj4ghI5M22P2AQlixZKhmSusJ9RqVQ1a/q419G4EYg4HbsRJvU94Y8vrE0ae4zA0hh/RAdPdkU9uMejHWtWjl/z7jeoEE9atiwHrIyVa9eW46N+C7BlKv01lsdZXAqNS/aTdWHDx8lXfmKFSsqGXxVTv9tq/nL6msk/R7kR40KTrNlLxcJx15oIhLHRS84xPf+p02bLoXuoNF+/YILAAahd5cu3eW3tmbNcnOdBgE1fOERtA1aVcx9R48eEwqIn824NGotH1+7cF3NYSNGDBHKhjxizfE1Yd2cPfvjPubuwdSl7mnevLVcu3Ts2E6sQyup035/nRh1MOlQbkHA98QT2YUJ+RCxa5URayw+HuXUqdNiHVdHPk8XcvhtQDwXvNIElAfTps0Qu0y9TAj8lkxETVcJwp26dRvKw5UrlwpB5z3qkuMv3jOUsDdLm45Gof2JqlHPn6MIjawULQHY8u066ruknQnGhHoxlCdjXrp09R8qPeppeV75rG84sooy3/sI5c4g/NO16zUKNaY2hQxz8UpTCtJZse2bYvhPXzxFS/bPpZYvdpZ1VRTXnbaFSwxGfdu2HdSnT3/53HXrVvr4hpsdjiejPgIw+aNHj/B7dyBJ3c1m1OfNmy8jufttvIsL2PICW7UFk8BsVKhguEEo5g/lIIFUPkVY5Oh+M0pK9vrr1QTD3lIGJUloHXimGiwxwEFzigQmE2ZTKs2aFU1RUdnkIbQlYObAjIIpDffkdfCE1YA/03edQUX/u3V7S2KXOvU9QjqdQk6WY8aMl0FcwCAOHNifHnnkEfr999/EueUiAupCCduGDatNczB9gYsyAwb0lVunYUKEhh+mb0iYjNV2gNBC9+v3rmQux4wZISdMaM8hmVfCBj3ysS4I2rp1ow/ziXJt23aUGmU8H5LzJ598QkzG52SQE8RxQBo8+F2hfSoo88HgoPcrT57cwqSvpxTu7Nv3uRRWYUJBgrlf27atpcbtf//7nxmZXpcyw5wPiw9MytBuQZCFX9ArfN6gKQejPXPmVDMKrv58XOvatbM0OUyePLmMLwGBCBb/SMpMDBZHX355UArGcD7YhbnTgjSU9aMtnBiBG4WAl7FTRfiuX/9NwWTXl2MM5vkOHd6SwjWcVxZaGHM6dOgsfG2/kgJojDlYT+CbWbdug/iO58iu6mapilHHBcy35cuXpYwZM4gxN5nw30xuCrD9MeooByudHj26iWflkEFusZMLrNyQxowZ6bOLC5u+S1jMP15oIhLHRS84mCA6ZPS5uXfvHkKzWsLhLt9TMB+H0BqWdAh4C2GVSqDnCRMmSYuwLEKBhPlRzbW4B+vLN96oJVxbDUZVlfP32737O7Rr18fCMq+JtErT68K82rJl06DarOr/+ONPpHUMjhcsmCO+yYfUJb+/dkYduy9B4bVnz6dS6YU1rL7Lg+JRsJ7R/bT//FNsxf3NERFBfpYck+xKE78NiOeCV5qAhVCnTl1kWyBYgZIOW2QfO3ZcRrkH1rCQhCVPfElhBIuim6FNR/sSlVG/M2VKWt16r9R0Qzv+6rh89K+YSFSa33yj2G89q9SCVxiTnxoUb0+N8rcWe6KLe8c+T4OqT6EXsxYhXePerlxvqp7HYHiKj8pFWTI+ZprC155TmnKLPdxVFPkmMVXp8Ikv1ePM38Rg1JXWEASsm9jgoVgMY2J1Svfdl0aa/uKa+gic7tPPBRIE3GxG3e5rrrfbS96NCTyYov37D/gwASrQCD4yDL7QJkJKiKT8WHQTnVDUoT5sfQGlgpeoduiaBjCZU6ZESz+l5s2beIHphpbxOngOCZJR9zepYvEJ6Wb27I/5aJBgbla2bEWJAbbwyiOYVySdoYyJmSsWnxnlefy5ePEvsSA1JM5Tp06UDDTOK2HAq6+Wi2Natm/f/8Q2gpdFEKSnTMZeXwzYGXWYVsHECun992fGsQ5BMBvQReHCL0nBA+7TGXV/OOj9Wr58idSwoSySWsBh0QCBgi7hV/jXrl1DWCYY214eOWJYAuF+CInSa9Yc2PIGVkKwBhgsgvTAlBVJf/6iRfMoQ4YM8rz6o4LkYLERG7tAnZa/R49+K5iG36RQSrk/+NxgO1D9gYlv3f806qGs3/Y4PmQEEhUBt2MnXGGqVKkh27Rw4VzTZQgnwHxDo/fAA/eb45c+5sAUGEI3PfXpM0Bqv8FQgEFAUoy6nTFR5eLTqOM+XfCMYwS96tGjF4F5gK8ufHZVYkZdIWH8uqUJlIrEcdELDr5IWkdgOBEZHWsGrAfGjRttasatu+LmJk+eJt3FMB/Onz/Hx/JsxIj3pMAegq+DBw9Jxvy5554VJvAPSgtAzJNIEJph7RdfUgod3AfLGDCU+G727jXc83AeVgCwBogvYeuxxo2bS8EBAvXC8i+YpNaqNWpUE3N9IyH07ydd/iCUw7rYHuMqGB4F1oSvvVbZR2kRTFuc7kkITcAy4K23DBqw1925cwcpiInPhB0CiIoVjQj6uuLNXl9iH0PRkSgadUgA5jfbSA8JrTiiwTdfVIO+/s4yq0LHlEn8hcsXqPbMkrS82cdyy7UOyxvSvm8+omG1ZlDBLIV9GPWWr3Sn2s8YZgulxz5NMU23yqjxKw4uohGrelH5fDWoW4l3JW7KNN4OYmIw6tiuAYt8fMS6pAnP1k0t7G1BxMlixYrI0/pHgIHCX4LZNPx6ndKtzKgrDbkaqCAdRaARmAqC4YfFQ4sWTaXW4MqVq0LjWFZCqAs+QlGH0pBjglB7P6sFEmgDElt9r21oNGFWNWrUcOGfntfptYbVOa+Dp2IUlfm43imdQdU11vo9TnlokP76628h5X5DTlLQtIPxRVIMJbQ9cG2wJ+XHpAtqVgrNOSZkJNBK6dIlfYQC9joCMepKOKNM5exlMdmDFjBJL1sWKy8Hg4PqFwROEDzpadmyFcKsfKwP86+uK9qGrzf8T5HWrl0nJuQRjvfj+vjxk2jx4iVy4leR+gM9H2V0Hz8IPBALAFvWxTcpoqw9OS1IQ1m//Xl8zAgkJgJux04s0MqXryzHNswnYAKwiIb1ilNatWoNwSRdn1+c7tPPKUZdHwf16/Ex6v7GNxWLx24pxoy6ji4J4fPf8kTq1NYOPb53xD2KxHHRCw5xkTECq8LlbfPmrXJuhSukXZjsVG75ciO4G64pSzD9PsRygOAJCWsK7FiQNm1a85aJ2vZeTkIy88b/MsqXG5ZvnTp1MJliKPb69x8kBWq4df36VdJSzV5eHYOZbNWqnRSow20Qbb9DuAMHkxSjDu3y6dOnZf/y5XteKA76OT5T51HwXasExcevQnCoUpkypWTEfH/jlLovvl+vNAEBJywWoJxDAj+VXighsHZQCcoKKC0CJVg9IgaQE18XqFyoryWaj/qUBksoZ/o8sr3vbupKG/YtjdP2MW/OpbwPFZBbs3135hhlT5uDPvvxE+o4z9iaS9UBDXvZ956V5asUrEsdixi+JpuPrpVB58DoVxibT0Y+fqNIS2pe0DBX0SPM6w9PDEZd+a+AIJSfpnom/DYQZVJPioB0iZn6CHQGTy8TTP5WZtSxVR0CfCn8gDGklpAW1q37ppCMVTG3hFFMEgYl7PmoUijqAPMIKRzMazDIYrAqWbKs1CJCa6kCWejX8HzdbFu1Jxx/vQ6eQ4YEZ/q+ffsmv93es2cvwTz9iy/2m36a+s1OjLq/hSTiE0Dirpue45117dpTWmaoejEpg/kvVaqkqblS1wIx6soHrU6dWjKImyqjfmEdAEESEjTP0EDrjLo/HBSjjEAtWFzrSS02MPnrUW5xT0zMYhG8b4oMyqYYdTD1YO7jS9CmQ6uOFOj5uA7mAs/BLhgqYVw0MCxB+OZ0Tb+6x+nXaUEayvqdnsnnGIHEQsDL2Lljx07xLff1aRLGtOLFi8rglsptBzcorZ+/Mcenkv8OFKMOs86cOY1YQfp98THq/p71ww8/ClNgQ7O3fv1qU7nAjLqObugY9aQ+Lnr5NnyRNI7UnIGjYBhm3Ldt23ahyDHmN38R2/W5Mjo67laswB9abURmD8ZHHOsYxLqBos7uzgpFUqVKxhoSbrAIHueU/v77b+nShvUsrGfGjh0ltyFzutfpnGLU9Wv++o97AvEosGLA+mzQoGFy7avH6tHrd5P3QhN2l8M+fXqZ2+aijXCVnDx5qmyG7gZob5ceb+pmatPRLqyXQq5RH1ozWpqs4wFTdr9H83b4an1wHqlXlVFUJnsF40D8vXrtCpWfkI/+EcSHtKDFJuGnnoXge159wsvyXIGcRWlExWkyr/60jK1FXx3bJw87vtqPquQ2/COLDHtC3eLzmxiMuq41X7nyA8dIrKoRkJjBFxpMwZgxo4T/luGfH+gjUGXj+72VGXWlQVcMcmzsUqFxnC6DYcBXXJm1r169TJocg5lo3bqFYOSN4G/ANhR1oJ6+fQeIPSW3C+3mcCHdTCF9c+vVe0MM5A2FifH70k9m+PDB0v+nRYs2MjqmnelCPeGYvAye6EdCGfUZM2ZL7BQmmJiwvzZ8qDYL6TmSE6PuxNDiXidGHecxee4WEeexC8Bnn+2T3ynOI0Ho06pVC1M7HIhRV/1VFh5GDdZffZuQBQsMn7Ibzagjsuu6dRtko6CFsyf4lT/8cGZp6aHM5eNj1FUdsCxBjIgdOz40A8nhmj+zOlVO/1WLLt30XV0PRf2qLv5lBG4EAl7HTkScxneHOUXXCsEaZ/TokVIIjPYPHTpC7qXstC2Rv/4llFG3B6dTz9HHMj0OCDPqCiHj1wtNROK46AUHXyTJXFvhfCAGVy+HOb5TJyPIMNxB4BbilPTtl+1ubur+yZMN03kI0RDELCFJWWE6zX2oF+uUHj3ekf7kEPJD0JY27YOuHqnWovZC/gQcwfAoWIsh+jyE83pMKPszgjn2QhOHDn0tgw6j/iVLrF219OfB1RQup3nyWFav+nXkg7nHXiYxj0PKqPd8bSS98oThL7p4//s0bp0hpXLqQN2ibajpC+3MS+9u6iY07x+Yx9u6fC1N4Q+dPkDNZ1WT5zOky0yLGxqLcpz4+MQO6hpj+FrhWJnTX/n3MpUc+RROxUmJwajjo1FRoP19WKohhw9bvqu6j2kwH4Gqw9/vrcyoA5OBA4dKqR8C2ERHz5AaUyXNX7BgkZSkDRrUX/oawYzJaUAKRR3KpBhm3vA7wpZiMJUCk6K0+RAQpEuXVgYlCUYC6++d3+jzXgZPtFExrvGZvjtpknWfcgg7qlSp5BPkBHuLQpIdCkZdxxOCG7yv2NgP5CIZ1yZMGGNuJRSIUVdxK6BJRrvsSfcPVxOavrh1wgF1BGKU3WrUlcsOTNV69uxmb6LjcaDnOxYQJ6FdW716rRn0r3nz4PaODbQg1Z/ltX69Ds4zAomNgNexU28XdpLAYnjOnLlSAKabuavv359wUq9H5RPKqPsb3/YI7VoXET3b7nrEjLpC3vj1QhOROC56wUFHUs23OOdkuq7fq/JqLYZjfwJ1da+yLMHxpk1rHc3LVYwb3b1Mldd/L168KP3aU6RIIX3T9Wsq36tXXxHIcWccZRKuQ9k3YMAguSaBsG7SpPE+8StUHfH96ow6XEARtwlWhmD8Z8yYEkfhGAyPoist/THK8bVLXfdCE2oMfOSRuPHCVL1qbMLx1q0bZLA2dQ2/+rpu1KibvyMFrDVCxqi3KduLajxtbPux/vBKGrjMiL6uA6Dn77nnXlrd8lN5Stea48Sz2QvS2CpGhFI7A7+mwz5KnSK19H0vPTqP3MsYZeCXsbHDAcnc2xl4XFcpMRh11K22BIAkyR6YST1b/8DsE1gwH4Gqx9/vrc6ob968Rfr3wOwGTIhusgtGDgwdBtFVq9ZI/yV9mzSFaSjqgLsDtql4Vmx5Aw0/GAlYWsAMXllUwHc+W7ZsMvKnU7Ax1Z5w+/UyeKIPilGHz7J9j8/4GNT//e9zGeUY9dhdBHQTpYQy6idOnJSS6ocfftg01cQzMVDWqVNPmtsjUih8upD0Ad0uZVexCjAezJ8/W9Db/bKM+qP2UdUX2vHhgLKBGGU1SQVr+n7gwJfCsqCD/BbmzZtj+smpNqI9CP6GgDkq+Fug56McIq7Cbw7WDvY+K3eAYLfLcVqQhrJ+1U/+ZQRuBAJux87z588Ld5jTQjuVKk4UZ11zpdztFOOBMWfWrGlxfHMhrN63b59wQylkjmEJZdTxLMxfaW3aPLV9m127qBj1QAvpG/EuwuUZbmkC7Y7EcdELDuodqjUbjqGIKVTIiFOjrjv9Hj/+nfTtxvoMux0gJk2gpM/1sIYsUCC/z+2XL1+W+6z/+uuvwtU1cJR5bKEIV0wk8Aq6vzfOwdquSpXX5dpRD3aLa1iLwAwfcz2YdCgOMmfOjEuuk2LUa9SoJgQCLeV6pkmT5qYAEFaeuptaMDyK2mkJbXNaX7tppBeaUGsaPGfFiiXm1nL6c1W/neL84D5E918kovznCaBx1+tL7HzIfNTfLNqamr1ghLqHBvztpc5Ef/78WWlerDqmNOA4br64Bh06/jkhWvzylrvlHupXr12VmvHr16+pItS6bE+q+bTh+zR33zSaumm4vDa5fizlymCYkTdcUIm+/f5rs4yeSSxGHVGSa9V6UxI5Ji8EZIBvCUBGwoc+evQ4KSXDsd3vIZiPAOUCpVudUdej5AKnzp07CF8fw70CAxwGP7Xvs1Nkb5QJRR2oR+2bjrx9saK09rgG6aUeHXv//gNCIjxS+kNDy5ksWTLcJrYi+0OaFEHLC//j9P9F6MYkBaavRIliMtgQ7oWfzoABgwVj+ZP0l0IwjFAlL4Mnng1Xg5iYxVLDMmXKRJ/9K+NjUPVJUvcrAkMI8y9IgZESyqgr9wj4gMInW01SYODr1Wskn6H7p+ntsjPqmGwbNmwimXsIbPr37y0nDbw/+J2q7RzhLw7hEVJ8OOCeQIwyJm8ww8Ey6hiz3nyzoQwEA4EBsIUrARLiNXTt2kN+L3obAz0f5dQkiIkaFisPPPAATsvtD996q5sM8ILIsBCmIeG7hMkuBBsdO7b3CajotCB1W798CP9hBMIAAbdjpy6ghP/pM88Y6xsIe6G9w/eua7Thgwk/2e+//0FurzRo0AApMMOYg+Bu0NIhKesu5BPKqKMOLGgHDOgnGQe0bc2adWZQTn28xr27dn0sAz0h72TRhvO3UnJLE8AmEsdFLzgACxUfCnk95hOO/aWff/5FBHJtI+c2CN1h0ajW6f7K4Lyae7C+x/eYPfvj8nYw6TBVB20j6Zpkf+s5ZdqOnYCwbkmb1jBbx7rhnXeMLdLwHATXVesQ1K0CnCE/Z84MseVsFmQ9JdWfGjUMRh2VKGEf8nVs8XX88SiYw8+d+1OOMePHT5ICBvtuD6jPbfJCE0oRBgEMsMW6SwUTRDvXrVsvlEYjZFOcXB0gaMGYiBRMgGfE+Jk7d4F0oQWOSLAA7dfvXbnm6dr1LVMQAyteCAGef/45SXPy5iD+oN0h0air/c/je+Y769rT9v1rzdueyPoUTa0RK7XgOInAcanuSEW3if+Qxn80lBZ9NF3m1Z9Uqe6iJc0/FFp1Y0EJM/fbbktGyZMZkVA//2kvtZv7hro9zm9iMep4EPwjsMUBXjYSPjQQCyZOdQ7n7R8AzqmPAPlASdfo2e+71Rl14KGCtSG/4D/fX+SRlEYPeT3iPo71FIo6lI8L6sV+tq+8Utp8xObNhh8PToBhAeOikhrAcaxLUzEgQJKKBOmnGhR0gcDq1ctkMJHPP/9C1GlYtJQvX1ZG4JQFQ/DHy+CJx27btsNkTnEMKTIisqYUgrlgGNSOHbtIczGUhTXKvffeK5k+fGPYax0CmIQy6rt27RaLyF54hPx2sf0KJuE9ewzLH0RVRdwBlQIx6rgHlhSgJUwaSOgzNMLqWJ8gcT0YHAIxysvFwt0No45nwlQNQRdVm9BGTHYYs5DgroGAizDTQwr0fFzHGFS3biOzPggpwPwrf38w8NHRU8yFibJ0QVndAgbHTgtSt/WjHk6MQDgg4HbsxAKthYhhgsCoSI899qjc5hFCNGzJhDRqlK9pps6E4DrK4Jz6vu3WLAll1LG+Ue2zj2/2uQ3twfj3+uvGQhjHGMt79OgmmX0c32rJLU0An0gcF73goFvTxUc3ixcvMJUb+vouUDm7kAnfEAIWq7kRGllYjoEBU2nQIF+Nvr/13LFjx4VvfBdTcYQgq3fckdynLuwSpCtZdKGEep7Tr91a1+kenHNi1HF+1Spj9wjkEYytRIliIhc8j2IIH/qJOd6Kii8rcPnHC03gEcCpb993zTEP1juwYP322+PmOShj3nmnu6mYUE1TQQMhfFS7NqlrTr/lylUy61Rm9HpwQn0rTKUIQj3YiQjvKdgUEkb9gzYfUdq70sX7zB5r29DOA0bwInXz41ly06TqC+jO5CnVKbkl27idQ2jJx7PMc3omlVicz2mwljKktjqKbeBg8t4txlmbr8onJqOOZ5w/f14MpBMl462eqX4xkUF6h8WrPSmTEft5+3Egf2adcYA5HJiYhCS3H8qUKb6B/hLybJR1s4+6epYKOuZkWrdz5y7hi9tb3hoo6F8o6tCDlCxZEmMyJnj42bPnhOmhIX3To47jmvrIscACIwuXDiQwVO3adZR7eU+cOJaiorKJs3C5WCCD5hUt+rKQHvaR5/4WARkxGWFCsdcvb0jAH7c0oR6FRefChYuFtHmpKbRSQYb0xZs/32xImkeOHO3zXWGi7CysJiZNmkoQTkALrvb7VsIQO/On2qMGTfu2RNDsQluFWBIqweqhQoVyci9vXbqtW1/YNeqq7JEjR6WPOyYPZc2BSQDtqlWrhmkxgfuDwSFQv1asWCUxKlOmtKDzrqoJ8hemXJDmQoMAYZ+esPBH8MW9e609XDF21KxZXfx7nVKlSmXeHuj56ib0Y8yYcYTvTSXU99JLBaVQCkIWlSAUaCW2l8FiHxYkZcqUUpccF6S46KZ+szLOMAI3GQEvYyfGPWjRsHhWzDa6AQFahw5t4+yVjmsQfsWKuBoff7zbHHMwhlWuXEEEy6rjs8dx7dp1JdOvC4VRh0pqmzWsWRD7RSVl2t6qVXPKmDGj0NJPNMd1rHOKFi0irJDecNxKDnVi3oL2DilYn2L17Ej69UITTow6MEnK46IXHHTBdnw0oTPqSjgVXxk7o477YYWG9QYUJ3rCeq158yZxfM79redQFu8Lvub79x/Qq5LKPXzbOXP67sKghOQ+NzscBMuoY+sxjC2Il9S6dQufmtQOEjg5b95sGVQ2EI+C8SVXrhyyzRDQ2SPZ+1Qe5IEXmlBVIwDn+PGT5bpQHzcxNsHi8I03avusvVAOVqtVqxpbtgUbjHDChMlyhxt9XYV1YbNmrYVm/aIY2wabQkglAMHYjUDiyZIZCmnVZn+/yZIlc69Rf2XcM1Lj4q9Sr+fT3PcA5c32Eh09fZC+//lYUNXATD7voy/RRbFF21fHPvUxq3eqAIvs9W2/cLpknksIcZiViAyCiMH0GAQDjRIYR/wmpeQWi3Bg1JMSvv7aiuCEyZMnj2OOBaYGSWcWcXxJmDzaB0bQH+6HlDaUyS1N2J+Ndl269I+0gkmZ8k775XiP0SdM0HfddbeUksZbwOMNV65cFQvP08JC4R4fM32P1UkTb+xVCiZVZ3y91pcY5SBMAbbYqeCBB+6PQ39un4l3DQ04JqT4pOuwXFBae/Uc5SLStGkjYaJfR502f93UbxbiDCNwkxBI6Nh55swZ6b+aXrg92ecApy7he8aYAyFZqNceOqMOYR4SFAVwU8SiPZgEgTJMjmFVdasmLzQRieOiFxxuJs2A+TsmtOJYe0FhEOj78reeU+3Hd3D06LeSf8me/fEEK9hUvUn9N1Q0AYEIFKiZMz+UKGON0/rb31rdaZ0TzHtyrVGvO788/Xz6ZDB1h909mdJnoffrrAnYrlARR8CHJJGLbrGYN28BYX/oUCQMfHAR4BReCLilifBqPbcmqSCgm+TbNe1JpQ/cTkZARyCSxk4nRl3vK+eDQ8AtTUTquOgWh+DQ5buSMgJME8bbgzDTNaM+/dPxtGDn5CT5/msXbkGN81n+wE6dYOKwUHGLxYkTJ4R/CMxnz1iVeMjB76dAgQLSH89DcS6SiAi4pYlEbApXHaEITJ48VfhwLTJ7Z3cdMS9whhFIQghE0tjJjHpoCM8NTUTyuOgGh9Agz7WEOwJME9Ybcs2oo2hS1KoHo01H35g4gIKRGAuFBP8qBJgmFBL8m1gITJ48TUZnRVCaBg3qSZ+9xHoW18sI3CgEImnsRByXLVu2Cl/PWlSuXNkbBWHEPccNTUTyuOgGh4gjAu6QIwJMEwYscGHyxKifPPsd9VzTKsmYwINJH1h+ImVJk82RIPSTTBwWGoyFhQXnDASYJpgSGAFGgBFwjwCPne4xi/QSTBPGG2YcIp3S3fePacLAzJPpuw43zOC3HV5Dp3//MVECzOnPcptHsJX0D2amYk+Uj9fcXa+bicNCg7GwsFeACbwAAEAASURBVOCcgQDTBFMCI8AIMALuEeCx0z1mkV6CacJ4w4xDpFO6+/4xTRiYJZhRdw99+Jdg4rDeEWNhYcE5AwGmCaYERoARYATcI8Bjp3vMIr0E04TxhhmHSKd09/1jmjAw87Q9m3u4k1YJJg7rfTEWFhacMxBgmmBKYAQYAUbAPQI8drrHLNJLME0Yb5hxiHRKd98/pgkDM88+6u4hTzolmDisd8VYWFhwzkCAaYIpgRFgBBgB9wjw2Okes0gvwTRhvGHGIdIp3X3/mCYMzNj03YF2mDgsUBgLCwvOGQgwTTAlMAKMACPgHgEeO91jFuklmCaMN8w4RDqlu+8f04SBGTPqDrTDxGGBwlhYWHDOQIBpgimBEWAEGAH3CPDY6R6zSC/BNGG8YcYh0indff+YJgzMmFF3oB0mDgsUxsLCgnMGAkwTTAmMACPACLhHgMdO95hFegmmCeMNMw6RTunu+8c0YWDGPuoOtMPEYYHCWFhYcM5AgGmCKYERYAQYAfcI8NjpHrNIL8E0YbxhxiHSKd19/5gmDMxYo+5AO0wcFiiMhYUF5wwEmCaYEhgBRoARcI8Aj53uMYv0EkwTxhtmHCKd0t33j2nCwIwZdQfaYeKwQGEsLCw4ZyDANMGUwAgwAoyAewR47HSPWaSXYJow3jDjEOmU7r5/TBMGZj6m71mzPuIeyQgs8ccf52SvHnjgvgjsnbsuMRbu8LoV7maauBXeMveREWAEQo0Aj52hRjTp18c0YbxDxiHp03Koe8A0YSF623ffnbyOQ2bUDVCYOCziYCwsLDjH3wfTACPACDACXhHg+dQrcpFbjmnCeLeMQ+TSuNeeMU0YyLFG3YGCmDgsUBgLCwvOGQgwTTAlMAKMACPgHgEeO91jFuklmCaMN8w4RDqlu+8f04SBGTPqDrTDxGGBwlhYWHDOQIBpgimBEWAEGAH3CPDY6R6zSC/BNGG8YcYh0indff+YJgzMrl27Rmz6bqMfJg4LEMbCwoJzBgJME0wJjAAjwAi4R4DHTveYRXoJpgnjDTMOkU7p7vvHNGFhxoy6hYXMMXFYgDAWFhacMxBgmmBKYAQYAUbAPQI8drrHLNJLME0Yb5hxiHRKd98/pgkLM2bULSxkjonDAoSxsLDgnIEA0wRTAiPACDAC7hHgsdM9ZpFegmnCeMOMQ6RTuvv+MU1YmDGjbmEhc0wcFiCMhYUF5wwEmCaYEhgBRoARcI8Aj53uMYv0EkwTxhtmHCKd0t33j2nCwowZdQsLmWPisABhLCwsOGcgwDTBlMAIMAKMgHsEeOx0j1mkl2CaMN4w4xDplO6+f0wTBmYcTM6Bdpg4LFAYCwsLzhkIME0wJTACjAAj4B4BHjvdYxbpJZgmjDfMOEQ6pbvvH9OEhRlr1C0sZI6JwwKEsbCw4JyBANMEUwIjwAgwAu4R4LHTPWaRXoJpwnjDjEOkU7r7/jFNWJgxo25hIXNMHBYgjIWFBecMBJgmmBIYAUaAEXCPAI+d7jGL9BJME8YbZhwindLd949pwsDs+vXrCdtHfeonY2jL1yvp1G8/0r///uv+TSRiidtvv50ypM1MJXJUpGYvtA/6SUwcFlShwuL8+Qt05MgR+u677+i3336XD0iXLh1lzZqFsmfPTvfck9p6KOfCGoFQ0URYd5IbxwgwAoxAiBHgsTPEgEZAdUwTxktkHCKAmEPcBaYJA1DPPuonzhynriua0k+nTob41SROdQ9lyEJDK02jrPdHxfsAJg4LooRicfnyZdq4cTMdP37cqtQhlz3741SyZAlKnjy5w1U+FU4IJJQmwqkv3BZGgBFgBG4UAjx23iikk85zmCaMd8U4JB2avVEtZZqwkPZk+l5rdqkkw6SrroJZX1h/kzr0+8vEYUGTECyuXr1KixfH0u+//2FVGCCXIUMGqlr1NYIlBKfwRSAhNBG+veKWMQKMACOQuAjw2Jm4+CbF2pkmjLfGOCRF6k3cNjNNGPh6Mn2Hufvc7RMS9w0lUu1vFm0drxk8E4cFfkKw2LhxE33zzWGrsiByzz77DBUuXCiIO/mWm4VAQmjiZrWZn8sIMAKMwM1GgMfOm/0Gwu/5TBPGO2Ecwo82b3aLmCaMN+CJUfenTYcmtPjTFejph56nh+/LRhcu/0nfnP6KVn0RQ+fOBdaq3ntvGnr5ybL03MMv0LHfD9PWb1bHq7FPkSIF5X+iKBWKKkF/XblIWw6vpkPHv6Dr16/5pa9gtOqhJA74Fpw7d47uu+8+SpYsmd92hesFr1jAD33hwhifbt11VyoqUaI4ZcyYUZ7/+edfaPPmLfTPP/+Y94GG6tZ9g1KnZp91E5Qwy3iliVB3A9/WqlVrxPd+ncqXL0t33HFHqB/B9TECjAAjEDIEwmXs9NqhS1eu0aTFh4TV223UsnpOSi5+OSUMgaROEwnrvVU6VDio9WTKlCmtysM899tvv9FHH31M9957LxUvXjTMW3vjmhcqmkCLJbN7W9Idr1ybvhcfmTNO4LhmpbpQnbyNKdltcZnR63SdlhyYR2PX9nd8w1UK1qWORd6Jc+2jE9uoe0yzOOdxIurhJ2l6raWUPJmvT/Nvf/1KtaNL0iWN+dMrACO4tfMh/VScfEKJA37ZGzZsotjYD3w0ys8++zQ99dRTVLt2TXrggfvjPHfv3k9p7tz5cc7bT1SvXo1efrkQnT9/nnr16mO/HPB42LDBdOeddwa8R7/oFYsdOz6k/fsPmFWBSa9Zsybdffdd5jlkEGQODP2lS5fM8y++WJCef/4585gz4YWAV5oIdS+uXLlCRYqUlNWuXbuC0qRJE+pHcH2MACPACIQMAS9j59iFB2nLnp8CtiFZstsodlgJoQxI3IXoiu0nqdk7H8q2vD+8GJV+IXPAdoXDRYVfzVcepdriX7glLzQRbn0IRXsSgsPff/9Ds2bNpr17P6NDh76WzXnqqdxUtOjLVKdOrQQpyU6cOEmjRo02uzhkyCBKlcpZCADlwQcfLKMPP9wp7x8yZKC4N5VZ1l9m377/UevW7SkqKhvNnz/H323mebiTNmrUlE6f/pXAV0BpqdKDDz5ITz75BD33XF4RqPlxdTpJ/iaEJtDh9es3CgHILvrii/0Sq6efzkO5cuWUPFj69OmSDCaeNOpFhj0Rp4PTGi6lJ9PllufP/P0H/fTn9/RImmx07533mfeO3TmEYnfNMI+RKfNcVepVaog8d+Xfy/TVqf2U7YHHKE1Kg5HddmwD9Y5t41MmfdpMtKjhVikUgBDg8K8HKdUdd1MW8TwkPL/yuIIy7/Rnx9uBzbETQhxnzpyht9/uQV9++ZXTo+W5+++/n4YMeZdANHpau3Y99e8/UD/lmO/cuSNVr15FRE//jSpWrOp4j7+Tmzevo7vu8mWW/d2L816xeP/9edKSQNVdunQpOXioY/33q68O0tat28xTmTJlomrVqpjHnAkvBLzSxPTpM2nbth3y3b72WqUEd4oZ9QRDyBUwAozADUTAy9jZesjHtGTNsXhb+eO2OnR7IjPq35w4S0XrrpZt2b2wMmV7KPwt3xR+7Rvmoe4Nn44Xxxt9gxeauNFtvBHP84oDFFadOr1trrmxvv3rr7/MJpcpU0ootLp7srjDTlYtWrQx60alGzasFrsU3WPWrzJ//HGG3n13MH388W51SjCKq6SW3DzhJ+OWUf/xx58ED1DLT23W6e7d36ZKlSpYJ5JYzitNYG343ntjaenS5Y49Bg82fPhgyp07l+P1cDzpWqPuxKgPqTlNsMzXadj67nTmrLH9FjqbLfMTNOeNVbLfYMRLjnzKB4NlbT+mB1I9SLhWaXJBunjxAt0mzBMWtthMme55WNZZZuwzPhryvtXGUonHysp62i2tR58fMT6MLhUHUcWc1eX5t1e1oN0Ht/g8Sx0kFqOOj7px4+amFh2a85deKkh58jxFP/30M3322T5avnwlHT36rWzKnDkzfCReilGPispGPXt2U82N8wtGFhp5PO/rr7/xuQ5JW9euPeS5sWNH+TDlwDVHjiddSRe9fiiTJ08lBJNTqWHD+kKbfrc69Pm9cOGCkIZaUkSYLDVp0sjnHj4IHwS80gQmsdWr11LTpo2ENLhBgjvEjHqCIeQKGAFG4AYi4GXsVIxmlXJR1KhyXCUJmg+vuudzpL0hPbn4tzGv353K15rxhjzcw0MUfsyoewDvBhbx8m2gef36vUvr1m0gaEiHDx8i19RYe8ItbtiwkbIHvXv3pHLlXnHdm5iYxTR69Di5jlbMvxOjvnv3J2LN3kcKCHRBwY1g1KdNm0TKzP/ixYuSz9i0aYvYaek72V9o6KOissl8UvvjlSZWrFhFgwcPk93t0aMrFStWRL5DYAJ6AQ8Gq4NZs6KTBCQh06gH6u2cpuso2/2PylvKTXhOMuM4yJwxKy2ot1Gej94zjuZsGyfzyYWv6dq2n9GdyVPGuQZmc1OnA3TH7Snom1+/oqYzLc2rrtW3X5MV/fcnsRj1BQtiaOxYI8hep07t6fXXq+mPlXn4btes+Yb8oDFwYABRSTHq0LRPmWLUo64F+/vrr78KCZrx3E2b1vpljoOtz+uHMn78RJ9HtGnTyufYfuD2fnt5Pr5xCHilCbeMOgRRgXYASCijjsEP/9zGjoivXcG+iVDV4/Q8r31zqovPMQKMQGgQ8DJ2JpTRvPrv9ZviS+72uW7vV28kvnIJxU89J7F+vdCEvS2JOZfAnNvtHGlvXzDHXnBA20qXLi/X09OnT5Fmzfqz1JoDWvV+/Xrrl+LN61praF+7dOkuy9gZ9WPHjtMbb9SX14oWLULNmjU2j28Eo759+yYf03c0BK4AtWq9Ic29lQWubGAS++OFJtDF5s1bS9fbrl3fIrv1Jpj1OnXqSSRWrvyA0qa9MQLOhEAPOg+JRj1QI3RNd8UpLwiT6DPy9hZlulGdZw3NaZXowvT7H6fl+T5Vx1DJx8uZVR7/4yjVjy4vj+GbPrvOSpmfsGs4xeycJvOF85ShQeXGm2WuXrtKJUbkMo/1TGIw6lgYv/Zadflh2Blw/dnIf/jhR8I83vjoV6xYQunSpZO3MKNuIRUfY2/daeQWLlwktbXlypUVH2FNn8uwMIBFQ7NmTaRvv7qIWALNmrWSlgnjxo0WUtn1Ca7ju+9O0MiR71HWrFmEGVQ/9Sj5O2fOPLGn/CbhV11YaJUb+1zDhIII+e3atab8+fP5XAu3A7eDJ6Td8+YtkO8AUmlInB96KJPsli4NxglYiMAN4pNP9ko8IPV84YX8IsBKMWkNomMRiFGHX9LkycbY0KVLJx83EwRtwXWY4SM2QsGCL8hnICBd8uS+WqK33upKp06dlpM8/N+2bNkqJwD0Ae1q3bolZc78kN6sgHnQ3OzZc6V5HHzpUM9TT+WiKlVek1JfVRguMEeOHBUTfm0qW7aMOm3+xsYuFdY5Kyhv3meF2V978zzM7rZu3U47d+4iuOFA6NekSUNh9pdC0iXwhBkgJ0aAEbjxCLgdO9FCL4zmoeNnaeisA7T/m9/FuHtRrDFS0fNPpaPWNXNS/lzGwvSvf65SmZbrJAjRfQpTjmxxY3w0e/cjOvjtGWpdK5f07z534TK92naDLLMt+tU4AoAvjvxBy7aeED71P9M3h8/QI4/cQwXypKNWNXJQ7kfvl+X0Pz/99hfFrD9G2z/7hXZ/ekrMC3dT0fyZqFSBh+jVlx/Rb5X5Gl230E+n/6KpvQvRtk9/oeXbTtIXB34T42hyKpQ/Iw1o+Xwcc3wv+MV5cCKe8EITaA6UMtOmzZD+tydPfi81yrmFKW/jxg3psccMxRgYtmbNWsrW9+/fx1GzquYa+HIrrTMYf2ikP/74E9q+fYecpzDftWzZXM6dsIyrUqWy3E43VNB4weHkyZPS7D1FihT0/vsz4wj2ly1bQUOHjqAsWR6hmJh5QTcV6/kOHTrTnj2fSle91q1biEDIhkbezqhj3dagQROxpu8smUKYwFeo8Jp8VigYdeUyiLUCXGZhtq0LEZwYdTwc7xU8BYIzt2rVPOi+h9ONXmgCTG109Ayxrr9Gb75ZO46bAr6JEiWMNdW8ebPp0UejwqnLftuS6Iy60qhf/vcSlRpp+WUrE/ZrIkp7seE5ZAN1U/m/rohF/R13EQLEVR1fSF7XGfL68yvS8R++IWjg17XbRyluv5P+vHTO9IsvOvxJqTGz9zwxGPXDh49Q/foG8xUdPTle3wf4lyPho1Naw0hg1I8fPy6jV549e9YOu6tj4FKo0EuULVvWoMoheF3Xrj0pZ84cNGPGVLOM7scPRuyddwy3ANyAYHeQvIHRio1dSKGoA8xR+fKV5fPtFg2wpMCEir6tXr1MunjgRjBvRYuWkmWWLYulDBnSy3y4/nE7eIKphPDCKekxEw4ePCRdR5zuwzm7xNwfow5GHAw2EsyeKlZ8VebxB64nQ4YMN4/1DBYemGz1VLlyNSl8g3Blx46d+iWZx+S5ZEmMCGR3X5xr9hMQUrRp08EMdoPFA+hBJd0lAEElJ0yYLAPFTJpkCSDVvYqWevbsKhYFRv82bdoi6LuvusXnF8w+BCYJsdbxqZAPGAFGwDUCbsdOPMAto7n2ox+oYfftfts2b0RxKikYYaSqb22mXXt+oQ6N8lC3Br7+27/8/jc9W+UDed+HcytQ9iz30W9n/6GnKi2R505urk0p7hA29/+lfUIoUF5YT/pLs4YUpbIvPWxe/v3cP1S21Qb6/vvz5jk907/D89SsqrEuVOfzVPtAMKh/U4nCmWnLzh/VafMXDPvemMr04H2GNSYuuMXPrOwGZbzQxFFhuou1izLHts8lo0ePlIJkdKFDh7eE4HuPo8sZLDwrVjSsUmNi5gqGNovs9fDho2RQNDsEmO/y5n1GrvHgvoY5K1TJCw7xPXvSpKk0Z85cudvQwIG+ipNAZdev30h9+w6Qa7VFi+aLNXoyv4w61rpgzhXDB/fTUDHqU6dG08yZc6SgZPz40XJ9i3bHx6hjbQR+BNpjMPfQ9CfFlBg0AWVLz559JKYbN665IdYiocA+URn1/DmK0MhKhh/Alm/XUd8l7cw2T6gXQ3ky5qVLV/+h0qOMSUL5rG84sooy3/sI5c4g/NO16zUKNaY2hYxFeKUpBems2PZNMfynL56iJfvnUssXjcV2RXHdaVu4xGDUt2zZJl6+YVqzZcv6oCI9mkD8l1GMOqJV4qP0lwJFbb/Zpu+IvHnhwkV/TXd1HltV1Kv3ZlBlzp8/T2XKGAyLzvyBOYFPChImGf3DhIZ70qQpwhXhdTGZtZVR9BNaB57TqFEzyYzpkyW2oqtatQYuy6RL8g4c+FJq9qOigov4adRw8/56HTyVGZrOkKpewOKhbt2GcuFRokRxuQCAVcKJEyel1gCDK97f3LmzKFOmjLKYE6MOjXKnTm/L63369PLRRn/66WfUtm1HeQ2WC6++Wk6ajO3evYe6d+8lz3fu3EEEabECNCpGHRdRpmLFCkL4d01q4+G7hoUSNBjQWseXFCOdPn06wYSPpYcfzizjOCxbttIUZChpPbT4sNBBsptn6aZb6v7Dhy1BIQRSiAkBqwWY5U2cOMUMcMOMenxvia8zAomHgJex0w2jee3adXqh7krJ/JYtnoUGt32eMj54Fx08fob6TNpHOz/5hQo8l55WjC4tOxm76Ttq0/8jqfneO6+ST8dnrjhM3UfspTy5HqSNk8vKa/4Y9ZOnLlKx+qvEeHiVir30EPVpnpeezHofHRDa+BFzDtDG7T+I8Ts5HVr5Ot0pmPsrV69R+TYb6MDB3+WzJ/Z8kfI+mVYKAuasOkqjpu+Xz5s5uCiVK2Qx94pRx8WerfNS/QqPE/qMaPR9x++Tz2/X4Cnq0egZWR5/3OBnFrqBGS80oRjpF18sSAMG9JEujpiLcB5rHqwhYa2GtHHjZuFi2c9Rq6w0zvr96hzKtm/fRs6hiC+EAMlgXk+f/hWXZJyZcGbUdSVN//69hYm8oQyRjQ/w59y5P+VaDXgOHTpIWkD+/bcQDvnRqNurChWjDmuJGTNmyXUPXGEff/wx81E6o75y5VKxk1MKeQ0+6t9//wMtWLBIzvkvvFBArC2GmspAs4IkkvHybdi7BkygZQfdYg2IdRsSdsB6+WVDAWwvE47Hicao35kyJa1uvVdquqEdf3VcPvpXCzA2v/lGsd96VqkFrzAmPzUo3p4a5RdSQtw79nkaVH0KvZi1COka93blelP1PAbzVnxULsqS8THTFL72nNKUW+zhrqLIN4mpSodPfBkH88Rg1JXW0MnEBmZEYKCdUpo0acxAEIpRd7pPPxdIEHCzGXW7r7nebi95NybwLVu2oc8/3y/iBIwyzcdVoBFo2mFqjOARMP9FUpLmESOGCu39i/JcKOrA4IpBtlGj+qaJ+8qVq2nQoKFSIop2dOjQTggIDEYMEfLBTCUVEyWvg2cgRj0mJlYMoGOlaR4sIlKKsUMl7IsK4QcYVB03O6P+zTdHxHVDSDdwYH8xsRZTVcjfbt16STM+J5xXrVpDAwcOkZMhTOhUUow6tPLQzusJJmnR0TOpcOFCMoKofs0pP2rUGFq8eInU8Nvr+vTTfdIM/5lnnqbUqe+Wxdu37yRN76Dlh7ZfJQRenDIlWgimLL87tAPtwYJr4sSxPhFuMVEBP2jvmVFXKPIvI3DjEfAydipGE2bkuR+Paz6OXjSr9iS99HR6gr/2lr0/Sea16POZKNWdt5ud3H1ACP9ab5THJ4Q2HAzzX//8S4+WWSjPbZldnnJFWfVX6rCR9uw7TUO6iLVZRWPO9MeoTxR7q/cft08y3VunlaPUd91hPvfPi5epWOM10gRfadU/P/w7lW2yTt6zQ2jrnxDaej21Gfoxxa4+JjXn8wcVMy8pRr36q4/S+K7GnK0uDp65n8bMPCAFBQuHFFenI5JRVxZVdmE0zHo///xzoSW83dSoY/4sXtzZ1Ldt2w6CedlH3bp1ocqV/9/eecBbUVx/fIxYMNgxWKKCsQDGjuVvRQFBsWFBNLEFEQhIBGtQsRALil2CFcHYRQWpNkAE1KhYsBBj7xojKBi7/vc7y7l73r69993ddx/ceznDh7d7d2dnd357dub02d9jhqX+pZfm+LCrvn1Dt3kBE0s+CnVKOVvUCWnr12+A7wdz3nXXXZvzYJS+5NuSgI5s4VihsUZTFregLjwk3pfM53HPUi2o5+sHnnYDBvTLZDTM1+biPp5lvIw/Y7t2nbxBRY5jKDn33LN92KAcK/ctioYGEdRJPnHHCY+4dQOrONnge97T1c1958UaeIhL/MLvF7ojbmnnxp7wpF9y7aSxx7nZ/5rpLuk2wu20wa41BPXeHf/qjtgqHCg6XL2lu7vHVJ81/sFX73FDx5/l9m3T1Z2xV/hxiWt8jZsGPxpGUL8/0FxdWUOTKffFKtitW7JlWAsUWlDHepivTJgwpoYgo+stzYK6WMhlAoG4STSy+uqrBTE6vbzHQ58+vYK4lSOdFvK04qMUbYiFXAtFuNpgFUbL3aNH72AS3SEQTIf6VzdgwGle+3nNNVe4Nm2206+zLPezDp6FBHVRqMAYEJcdL7cHMe7XXjvca/dhTij6HXJMPCeGDr3Yh03E2yAkgdAErayROjpkYerUh3Pflwjqgwef69q330uq+y2xewj/KH6KyR6qLRXQYceOHXL5KWo0vOjHQw+F7neshzps2FW5KiRCQWlxxRWX+vh6TojS6ZRT+icubSgTv6bJXIO2YwgYAosFgSxjpwjqhR7wisAiXWiN8G+/D4wF87512x82xjfzyrhDcu7h/S55yt0z/k3XP3B/P32R+/unX3zjtjoodHufO+FQt9rKK/jr8gnqx58/w41/9F13es+tXP8/1FzZJ+m5bx3/b3faJf90u+64tht9abtaVZ559T9u/14Pu9VWX8HNHRsqtKkkgvqwc3Zxh7RrXuO6cdPfcz3OesJttunq7vFFOY2oIPhVU9Z3LORYyhHk+vfv51cXyreqDhhcHIR7jR07zhsOMCBQtOVXPLPIlr7bbuE8h8JaW3H9RcEfMWYInyXH67vN8m0k3RO+D8u/4DNixPVu7bVDL7yk+voYXgPwZ5SxY+/zcf/sL05BHcs/Lu+82xtu+Lv3vOMZdNGCOoKnvHvCKDknBd4EfqgSEqbJM+ttKWgC3uirr77y3pl4SVDAFp5xxx2317cr6/0GEdSvP/Y+1+o3YTz63x493T08+4FaIFz1x9vcNuvu4Jdme2feW26Tpi3dcx8+7frffpSvK21gYe90xdb+WJedjnL9dz/b7z/2xiSfdA5Bf7+r23j3hj/s3tv13Km/P68zzPsDi/40hKAuLrcI2Lhe6/Lpp58GrrXhM8txrKoU4mZw9aWIoF4fZnppFtRJRHbccT1yVkMwxpLYrVvX4PjRgWC0Xy7ml7Xbjz++l9thhzbuqqsu9/jzpxRtMNlxLwaFKVMeDqybjfzk16JF6NpO4hESkOhz3HvatEcCF6aQIeJ3uZasg2chQZ01QZlgtHeD7r/EnUs+Ac5pQV3X1YK2HNducHIs3xaLfqtWLf1pEdRvuql23glxpYeZ0Vb4fO1CDyeffJr3+pA69Ge33Xb1QjtLJ+qimQPc25o2XdORrPCII47yEw0u8ZLfQrTG1157ldtuu210M35/2rTp3r2/PmNLrUbtgCFgCKRCIMvYKYLmfu03dEd1jtxf9Y1btljNNVujsT+EVf0fE/7tHnnqIzf7lc/d/Hnf6ap+Xwvqs176zB3c95Ea7u8jx73uzrj0GYf7/Mjzdstdn09QFwH61kvaur13Wi9XP99O/8uedneOfcP1PLKVO6/XtrWqkbRus33v9cefu6+LW2+t0HAh9xl3XcdcUjy5eNqzH7tuA6a4jX+3qptxy35yuCoFdYw/vXuf6BXP0lHmrD333MN7WjVr1kwO+y2ehgjY2uMTwR0Bvl27PXOJb2V+4aLp0x+r4ZklDbI+9T33jC5bi7o8H8+bT9kgfdFbPF/hzfAaIBSSkEgpei4WpYaci2+1AiRtMjndVpw31ee0oB5PJkc/6AP8FtsWLZr7kEGMp5VWsoyX+foILq8GeZAmTpzsMJpQdAhqvuvK5XjJBfUhh9/kXdbp4PVPXeFunx7GysQ7fFaXy93em0QD6o8//+D2HdbGfRvEg1Du7PVoEKe+gSP2/NBh4WSxQ6s93ND9b6zRVO/R3dwrb832x/p3Ps912Ty0yCWt906lhhDUyT55+OGh1Xzy5PFu1VVXqfGM+gcEQyw0jDtuLdtsEyohTFDXKIX7aVzfxYIuAvK9944OYtBvCCzXYWIV0QQTp/7ggxPcNdcM83HHrHcvpRRt0NZZZ50TKGymBve4MoiDXs4nfkFZcEKQeV6sm1hEicPv3r2nd71HSK2EknXwLCSoi7Vbhy1oLJ555lnvyoYmdOLEsf5UPkE9adWFDz74MFguMRwXmACXWab2pEXCHTwdsOqLJUEE9XgiOx4graDONTzzrFlPBkq5hx19glaloFDq169PDRc9wiUIm5BlRkaN+ofPZh9338cdkUmZzL4dOtS2UElojgnqgrZtDYHFj0CWsVME9WIswljOu50x1WdQl94RY776qsu7FZZf1seKc1wL6sR4b3XYAz5J29RRnV2rQOgXt3dxVZe28gnqLQ8c7RUCOlGdXJO07X3RLPfApLfdicf83p3ZPYonl7raJf/Juw5wLdZd2Z8SQX389R1dm1ZNpbrfLk2COh3GSgiP8fDDj9RQ/nJOe2ryG76mS5fDfJyuCCfi9q6V47SJkYGiLcr+wKI/Awee7VcWKUeLuvBWPGo+pbXui97H3R23d4xtw4dfU0NJwTyNYYeCG32TJk18EuIVV1xRN+H3SyWo05hOFqtvVEhQl3o6z00ahYVcXw7bLONlXc/9yy+/+KS+s2c/74145GGohFJSQf3Mgy5zHTcNY13ufekf7prJg/NicNQefV2PHfvlzv/t0TMCy/v9ud/TTp3rXeFf+yzIzj3yEH+82VrruXuPm5qr8+S7093pdx+f+y3u9D/89L1rd9nvc8f1TkMI6lpo6Nnz+EAzF67Tp+8r+7KcA78nTXowyBi9mj9lgrogFG3TCOpcJUtSDBt2lY/jJdZKLKy3L3KfJokES1thpU0awErRxvjxYcxzjx5/8vGCxA8PH36tt+iLexUKgrXWahrE1A8LspQnuyxHSJTPXtbBs5CgLvHjLDV22GHht657TGw3Md5YDC68MBxT9Dc3ZMgF3rp8yiln+Mt0LDsHYFR22aWtP6ez2/oDBf6UWlDXt+KZ8Oy4++57PcPFOVzdttgiGreef/6FIGyjnxP3d3F7j9OtxNUdckgXT0v6PuwLviaox5Gx34bA4kMgy9iZRlCf+uxH7ogBIX+EkN22zTpuxUBAp8xf8J1r2Xm039eCOgeGjHzJXTFijhvQfUt37AEbuy0PvN8nf/vX+MPcco0ipWY+Qf2YQdPdQ9PedwP/vI3rFyzlVle54f65btCVz9WKQZfrWOatY/dJ/hnemNg1iLlexp8yQV0QqrklDp2kqGQ4F2/NuEV8xIhRQejdzd5YQDx6584HeqEUq2+jRo1yDYp3m/b2lJPEfnfp0tVb8stNUBdlNM96+eWXOBLtpSlDh14RrOBS2/M3XxvwmMzL8VJfQX38+DGeJyB3ESXJm68YQZ1rxdMuKXSP8+VesoyXeE/OmfOKX5atTZva3jr0WRL1kZuqEgxkKBdKJqj37XSW67plKKA+9Po4d8GYMLFTPmJYeeVV3ITez/rT2mrOga032cld3eVWfy4uwE88abZrsnwTH/ve4cot/PJWVFxuueXcIyfN8cJ9XID3DS360xCCOk2LtQuN3B133Jq4zBbW9HPOOd8z5tqNl+tNUAeFmiWtoM465YMGne+FPYQ7negLiyOWRyYp3L6wzhLvv8wyIRMgdy5FG4Q7HHTQYX4gJ5kXmThlQhSPCmLnW7RoEax5PTMYmKOlUeQ5ynWbZfCkLyKod+16qI+r0/1jUvp7kFCPiY9Yfe2mBV4khkEDynqgWJMpWlAXhZcwI5yPa9TFghAX4qkrQjP7rVu3yrmUl1JQx62QZ15//d/mYuC5H4Mw1n4m31NPHVBjbVptCcE7gz4kudrr1Q2ISdtll51p2hexFPDDBPUQE/trCCwJBLKMnWkE9YsDgfvKQOBus/VabvzVYQIx6ef4IIb7+CCGmxIX1N/6cIHb+YgHvft7n24tvdt7jyNa+nXJ5Xq2+QT1q+58xV00/AXXuuUabvKwjjWWbcMV/8RLnnSfBsu9EQO/Y7Ce+9Mv/8cdGCzNRib4p+48wP1m9dBtX+514YgX3dUjX3Y777C2u39oOzmci1Ffmi3qhNaRowS+5XfBeumaf9Ghj7fdNjK3njoAilcZ7u9HHtnNu72TDyaeME68uOBjUQizegiFuUhiv/ldToK68Gw8V9Zs3nfeeXcQfjidJmoV5m1RgBBigKxx8sknuU033aRW3foI6i1ahOGR8DwDBpzqk8nCp9566wgf+iY3K0ZQ13Xuuus2xyo6lVayjJeEhUhOsHzezQMGhHmhWK2ne7BqTyWUkgjqf9yjjzthx7/4/mIBP+2BHol9X7Bgvv/g5aRYwPnd896u7rW3X3Bkix/b+ym/hvqPP//oLeMsiSSlT6cz3eFbhgkxbpt9o7vh0Uv9qeuOGe1aN9vS7x8XDP5vvh/Ggct1sm0oQZ2Mm4cccrjXNjLIsbQD8aIykKLpGTr0Sp95mmcRl2x5LhPUBYlom1ZQ14MkrYjLMPsIRJ07H5SL60rK5E29UrRBO5KZlf127aI4MH6L1Z59koHgZiaFeLKLLhriiFkm4YUIrax5ipLn559/CibMQTlFEJPUTTfd4pOd9ejR3TfDZH7OOYMD4e9DL/xtvnndVg65f13bLIMnbRJqcMcdd3uXMeLAcfuXIkoUfiOIk/AGtzIsBTffPDKIsbrDV9WW5CRBHWaCpdamT5/hrQW33z4yl0iGuKQhQ4b6dpjM0aaCLVYCQiSwbDMpjh07Ouf2lkVQz/f+JPQCTf+ll16UUwbouMBRo26uNflLdnnohCVGTj65f7CEXBffD/lDv88++zyfsJBjZH+HicNiD7b0a968eSaoC2C2NQSWAAJZxs40gvoDU991vc+Z4XumXcaxUB9+ypRcvHpcUOeCjn0eci/O+Tzw8mrs3eAfGbGP22LjNWqglE9Qn/vOfNf26Am+7pEHbewF/F83buS++e4nd/EtL7rr73jNC+UvBvHmK/96eZ9tftdjx/lM8CgVRg3e3Se3+ylwwx/3+Huu16I+DDl1B3fM/pEwtDgs6vnGb+avGTNmBbHePV3btrv7viL8Dh58YTBXrRDM6ecWDHmsAaT6kZYmFi5c6JPk0gTPcvTRoeKa36Kwhf+cPHlcbh7jHEUyustcggC4ySYbhycX/cX9ndw+CHoU5slVV101SHj7tJ9DZC4pF0Fd8kPxrBdcEOV84ne+guUc7wO8Ggk5q6uw5HCHDvv4ag0Zo96iRbRE7/z5XwZrof/Jz/ko2K+99src+9RCeDxGHcPQyy+/GuReusYrdHhfEi5YVz/L7Xzab0OeH/pFsUKY49lnD8wl00MGwCgEv0fRIY35vvty4btLIqjL+ucCVL7t2ZP/4h5/aVLu9KYb/t7d0HW0t4JzkMRxjZdr7JYJ/lGunTnE3TPzZr8vfxo3Xsnd1/OJwKoexi3h5k7MaaNfhe47L3z0jOt3WzR4yXWybShBnfZJVoCgAENNYcBs3bqlXxpJjnE8HmPKMRHU2S9UTo1Z3XRdrVF99NFJuWyQuk6a/bQfypJcnk36Jcna+D169J2BYLienPIxSFgYKfE4rlylYKcUbWAhFvelQYMGun326ZS7BRlJydxKOfTQg712Vk6eeWaYIZ7fOrEZk8vQoVf4asTVyASjFQLE3xM/NXv2865Pn1BxxjIdxDqVqqSlCbnvlCnTfOZ9+d2iRfNgoLwhWD4kjPOSLOpyHssxQqYUlkphyRQpSYI655hUu3cPlyMj6ykxZRJLxsoMo0eH4TV8m2iZ0cBKrHjcGp1FUM/3/mbOnBW4pZ/hH59745b13XffO2LjKfzGah4veGN07Xpk7vCECWPdGmusnvstOyg1Bg++KCesy/G9924fhAy0tWRyAohtDYElhECWsTONoP7FV9+5Nl3HBOPZj76HWLgXfP2DX1e9RfNV3NvvfOWPJwnqt018w51y8dP+fDwhm8CVT1Dn/ORZH7hjz3hcqnrr+qtzv8j9Pv+k7dwJB7fM/X7zgwWuw/ETc8/K83362f9yv4/rupm7qG+bXH126iOo12go4ceH0450ywYu9knjt05GikWVeZmiY6IHDTozmOM7JrRc+FAWmtC8BUI34VLEJBNWRyEXDjlx4kVC8jie5Jkl9ZlzMAqIFVmO9+nTyy1YsNALueUgqM+bN8/tu++B8ngFt2PGjM4ZN8QlnAtmzpyWM4bka2BJCOo8y+uv/zsQ1kPjy8EHH+SNLhzXgjq/8xX4DHIhIehXYsnybdBPvoPTThuYM8pB66uttmogn83N8XpY0rt3PzZnSE367mnrvvuWPN+NIaYkgvr9fWe6piutRb8KloGT+roZcx6uUWfjDTZ3ww+9063QKErMwNrp18y42N335MgadeVH44AAbz12kmvWJHTL4TjLwOHyfsbdydZ8ubYhBXXusWDBguDjuNoL3nJP2bZo0dzHkCbFtog2VOrm2xaKZ9YTymOPTfaKgnztFHM87YdSDoK6xJ/oDKfS1+nTZwRW9oH+Zz63GE6Wog1JgEZ7ZOhu2jRKfjN//vxgUj+AU4HwXdNVmWXcGDQYXGAIcLOikLCwV68TvQWYZd422qiFP45mGA2hjt8mSynKhvfee79W+/6ievxJSxNyq18CbeYdd9zlM8aK0uqRRybl1g2n3tSpj/usnIQDSCF8Yd99O/n+yTG2eA3IUjLxd6ndnw466ADvWcE1uJSRq4D8BOQvkLL11lt660R8QpN4Pa0wkWuee+75wG3wL7WWZ8v3/riOpftQFpCnQgqM1gEH7Be8r6NzVnY5J1uxhOy++67eU0eOJ23xEECj/u233wSKiA398i7geeqpfzWLehJgdswQWEwIZBk7ZU3xk4Ll085YtHxaocd956OFrucFM711XOp1bLu+G9xnO7fDYWP8odfGH+pWX6XmCiM60/qgE7d1fz6slVye26IIaL1fGOf+/pQjasSvU2nijPfdbRPfdFNmfJi7BqF/YJAwbt9d188dkx0s/cPvneumPf1Rztq/1RZNXaed13P9jtjcC85Sl+223cZ4K/zEGzu5bTdbU59y02d/7LqeNKXW8myCX43KCT9EUM83fou7rA6dQpDt2/ckv1rLiBHFLwOmb5+FJphLEbpvumlEzihEmy1aNA+Snh2XW0mIY7poa7zuh66j97/88isv9DRq1Cjw9NrYe2aJZxyCDvcqVcmCwyeffOJj5ot5Bi2oX331MIeruxZ+C7WBlbp9+9CiHudZ4tdp5UFd1ne59oUXXgz4jxMTlSfarf/88wcFlv327uOPPwmePdkTAOEcxQ1u+Ycc0iWnnJB7VdI2C01I/zBaXnbZVbWS9qJoI/Sjffu9pKrf5vvuy4XvTi2o73lZK8/w1uhlCX6stuoabpvmO7s3PnvVvf/xW0W1iJv8Nhvt7L4Olmh75a1na7jVJzXAckZTT34t6VTuWH2II9dIsIMWBM3XBx984BMbYL1beeXQC0DXK+f9tFiUg6BezngW+2xYi5kcJWxCrkPQpMiyXHIc4Sy+tBv0R30R9KVufbdpaSJ+P56L56VvKwbfb1JBCCfOf+21167V16T6WY4x+aIwQYESxy5Le/qafO9P6nD+s88+8+OBDgGQ82m3WECIW8Q7Yfvta1qhaEsUTyTqI2GfFUPAEFj8CNR37EzzxGRO/+S//wviv1d0TVYKlb1prq9P3R9+/Nm9/+nXbp2mK7nGKyxbZ1OB3One/+xrt2agPMBlfkmXfOP398E61csvv3yNx8s3J9eoVOBHfWkCb7AvvpjnY5jzzacFbl/rFMrkefPmB3Nvs1phWFQWpTFJXTEOlKrUF4e0z5HEM6Vtw+o3LAKlogl4STxBMN7Fv1/dg3zffb5vPImGGoLvRjGXWlDvNqq9++jT93T/KmZ/3WYbuLuOebTg85aKOArepEJOpsVi5MhbA9fjhSXpHUqNY445qiRtWSOlQyAtTZTuztZSPgReVy5y/YIl3si/QAgEikJiCy+7LAyZGDq0pvdGvvbsuCFgCJQeARs7S49ppbdYbjQhK6yAK3mUttlmK280YI4hPJM11Ckkxy2Fktk3FvwpNxzkuWy75BAwmgixzySo3/D0Ve62x4ctubdXjzvrpHf5mjHiiJBJi8Xbb78drBP9VC42JGop3V7Tpmu6nXbayTVvvmG6C612gyOQliYa/IHsBh4BnaCQA7jAYW2R0qXLgd6ajqeGFUPAEFj8CNjYufgxL/c7lhtNMGccd1wPHzYn2MXnkoEDT/fKYDlfim254VCKPlkb9UPAaCLEDyt9aos6l1aiVb0Yazp9M+IAhbAYFoKEbQUBowlBovy25LkYO/bBIAnNG15IJ+Pr5pu3DpLw7eb222/f8ntgeyJDYClCIO3Yufbuty9F6Cz5rn4yPX8S4oZ6urQ00VDPodvFxZ9EuE88MTOXT4XlhJlLiOveaqstdfWS7JcjDiXpmDWSGQGjiQi6TIL6u/Pedqc/2KNiXOAR0occcKPbcPUwAVfU/dp7RhwRJoZFhIXthQgYTVQGJbBcpGTUr4wntqc0BKobARs7q/v9ZuldudME1jxid0udxyWOVbnjEH9e+93wCBhNRBhnEtTlctzgp8wd5z79/MMGSTAn98myJeFWs6brub1a7p9b472Ydow4IpQMiwgL2wsRMJowSjAEDAFDID0CNnamx6zarzCaCN+w4VDtlJ6+f0YTEWb1EtSjZqpnz4gjepeGRYSF7YUIGE0YJRgChoAhkB4BGzvTY1btVxhNhG/YcKh2Sk/fP6OJCDMT1CMs/J4RRwSIYRFhYXshAkYTRgmGgCFgCKRHwMbO9JhV+xVGE+EbNhyqndLT989oIsQsczK59JBXzhVGHNG7MiwiLGwvRMBowijBEDAEDIH0CNjYmR6zar/CaCJ8w4ZDtVN6+v4ZTUSYmUU9wsLvGXFEgBgWERa2FyJgNGGUYAgYAoZAegRs7EyPWbVfYTQRvmHDodopPX3/jCYizExQj7Dwe0YcESCGRYSF7YUIGE0YJRgChoAhkB4BGzvTY1btVxhNhG/YcKh2Sk/fP6OJELNffvkl2zrq6SGvnCuMOKJ3ZVhEWNheiIDRhFGCIWAIGALpEbCxMz1m1X6F0UT4hg2Haqf09P0zmggxsxj1BNox4ohAMSwiLGwvRMBowijBEDAEDIH0CNjYmR6zar/CaCJ8w4ZDtVN6+v4ZTUSYmet7hIXfM+KIADEsIixsL0TAaMIowRAwBAyB9AjY2Jkes2q/wmgifMOGQ7VTevr+GU2EmJnrewLtGHFEoBgWERa2FyJgNGGUYAgYAoZAegRs7EyPWbVfYTQRvmHDodopPX3/jCZCzGoI6uus0yw9klV4xcKF3/heNWnSuAp7l65LhkU6vJaG2kYTS8Nbtj4aAoZAqRGwsbPUiFZ+e0YT4Ts0HCqflkvdA6OJENFf/epXUTI5E9RtwIh/aPahxBGx30YTRgOGgCFgCKRHwMbO9JhV+xVGE+EbNhyqndLT989oIsIsF6NugroNGBFZGBZxLOy30YTRgCFgCBgCWREwxjMrctV7ndFE+G4Nh+ql8aw9M5oIkVtmmWXMoh4nIiOOCBHDIsLC9kIEjCaMEgwBQ8AQSI+AjZ3pMav2K4wmwjdsOFQ7pafvn9FEhJlZ1CMs/J4RRwSIYRFhYXshAkYTRgmGgCFgCKRHwMbO9JhV+xVGE+EbNhyqndLT989oIsTMkskl0I4RRwSKYRFhYXshAkYTRgmGgCFgCKRHwMbO9JhV+xVGE+EbNhyqndLT989oIsTMXN8TaMeIIwLFsIiwsL0QAaMJowRDwBAwBNIjYGNnesyq/QqjifANGw7VTunp+2c0EWJmgnoC7RhxRKAYFhEWthciYDRhlGAIGAKGQHoEbOxMj1m1X2E0Eb5hw6HaKT19/4wmQsxsebYE2jHiiEAxLCIsbC9EwGjCKMEQMAQMgfQI2NiZHrNqv8JoInzDhkO1U3r6/hlNhJhZjHoC7RhxRKAYFhEWthciYDRhlGAIGAKGQHoEbOxMj1m1X2E0Eb5hw6HaKT19/4wmQszM9T2Bdow4IlAMiwgL2wsRMJowSjAEDAFDID0CNnamx6zarzCaCN+w4VDtlJ6+f0YTIWYmqCfQjhFHBIphEWFheyECRhNGCYaAIWAIpEfAxs70mFX7FUYT4Rs2HKqd0tP3z2gixMwE9QTaMeKIQDEsIixsL0TAaMIowRAwBAyB9AjY2Jkes2q/wmgifMOGQ7VTevr+GU2EmFmMegLtGHFEoBgWERa2FyJgNGGUYAgYAoZAegRs7EyPWbVfYTQRvmHDodopPX3/jCZCzMyinkA7RhwRKIZFhIXthQgYTRglGAKGgCGQHgEbO9NjVu1XGE2Eb9hwqHZKT98/o4kQMxPUE2jHiCMCxbCIsLC9EAGjCaMEQ8AQMATSI2BjZ3rMqv0Ko4nwDRsO1U7p6ftnNBFiVlLX959//tm98cYb7uOPP3bz5s1zK664ovvNb37jNttsM/frX/+64Fv63//+59555x334YcfujXXXNO1aNHCrb766gWv+fHHH917773n3n33Xbf88su7jTbayK299toO7UN9SimJA0wWLlzomjRp4li0vtJKKbGotL6nfd633nrLffDBB26DDTZwzZs3T3t5xdQvF5pg8JoxY4Zju/POO7tGjRpVDIb2oIaAIbD0IZBl7HzmmWf8vNKuXTu3yiqrLH2gVXmPs9BENUJSLTiUC1/yxRdfuJdfftmtuuqqbquttqpIkqkWmqgv+MiOy7zzznu/0NA66zTL3N5TTz3lJk6c6BCe44WbdOrUye26667xU/73nDlz3J133lnr3C677OI6d+5c6zgH/vOf/7hhw4a577//vsb59dZbz/Xo0cML7jVOpPhRX+L44Ycf3D//+U8vRHz55Ze5Ozdt2tRtuOGGrkOHDokT7quvvuoeffTRXP18O7vvvrvbeuut3ddff+1uvvnmfNUSj/fq1SsVNvXFIvEhqvTg6NGj3ezZs91OO+3kDjjggCrtpQsUT9/4vjVp0niJ9pGxZtCgQf4Z/vrXv7qVV155iT6P3dwQMAQMgUIIpB074R+GDBnim9x+++1dly5dCjVv5yoQgbQ0UYFdLOqRqwWHcuFLXnzxRXf33Xd7w+fJJ59c1Dsot0pZaOKOO+5wn3/+eZ1d2WSTTdw+++xTqx6y5QMPPOC+/fZbL2chb5VDKYmgjqCNwE1ZZ511XLNmzbx1nE5LOfzww2tpdv71r3+5UaNG+SorrbSSa9Wqlb/uk08+8ccACSFfFyavyy67zCsFUAJsvvnm7rvvvnOvv/66r8b9TzzxRH1Jqv0sxCE3+Oqrr9z111/vPQrkWHyL5e+oo45yEIouTz75pBs3bpw+lLjfvn17t9dee7n58+e7Sy65JLFOvoMINng6FFvqg0Wx96iWeiaoF36T0DYaXqzfe+yxR+HKRZwtlwmxiEe1KoaAIWAIpFZy/vTTT+6CCy7wTOPBBx/s2rRpYyhWGQLGY4UvtFpwKBe+ZGkV1C+66CK3YMGCOkeJ3/3ud6579+416mFwfvDBB3PHttlmG3fYYYflfi/JnZII6uPHj/cu5zDguHlLQbNx+eWX+58I4meddZac8ttrr73WffTRR45zp5xyihcicR256aab3Ntvv+3dxREucW2X8tBDD7nHH3/c//zzn//sfvvb3/r9adOmuYcfftjvY1XHfT5LyTpgMKkOHTrUiRWdl4wSYeONN/Yanrlz57qnn346R0S9e/d266+/fu4RRVAHv27duuWOx3ewzOMCx/1w+9cFRcE999zjD/3xj3+sIZQTEoBLdprQgKxY6GdaWvZNUC/8pkeOHOmVaf/3f//n9t9//8KVizhbLhNiEY9qVQwBQ8AQSC2oAxnz/DfffFODrzIoqwcB47HCd1ktOJQLX7K0CuqEXmO4zVduu+02R6j1Djvs4A466CBfjfH1rrvucv/+97/9bwzAhC2Xi6DOs5REUM8HCseFQWdfW3SJY7/00ks57LS2mIkJS7FoRfQ5hHg0zAC9xRZbuCOOOMJfzx9t1Y+fy1UqYifrgPHII4+4qVOn+jsQT8b/eEGIp88Aj0X9uOOOy1URQZ0Y/ayuKhrTs88+2zVuXD/35KxY5DpVxTu8Qz5oKcUK6vHr5PpCW/+hBoqWNEqWQu3V51xWmpBxoFhBvS6c6jshMpbwX7/DYnCp67mKaYM6pWon6X5Z+5bUlh0zBAyB0iCQdews5u6F5oi0Y03a+sU8n9VJRqAUNNGQ76sh29aIlAIH2kv7vGnr13WPfHxJ2vukrc+cTxEecWkV1D0Ief68+eabuXDhM844IxeCfOONN+YMw4QXIbC/9NJLZSOo804bXFDXlu4zzzwzl1hOBFMw1TGmWMW5RkrLli3d0Ucf7X/iSn/FFVf4/a5du/oYAn6QyAsrvBQs8Oeee678TLXNMmDwkZx33nk+Zj4ugMdv/sILL+Ss3qeeemouaZ7gUYmCOkqKZ5991m233XZu7733rtHl4cOHezd94kGIrZdCLD/eFgxIffv2dbhkt+WiAAAfbElEQVSd1LcNtGn33Xefw+sArwpdJk2a5MC+devW7sADD9SnvDKJaznO+XyFZ501a5Z34SaRIUIeyeOISSexWb4YdbxGJkyY4BMtEvsCfZJPAWUOSRB1wTuFMJK2bdv6uOvp06f7ZELUgTb2228/n6BRX7M499N+H9A13zM5FWTykeSSKKS0twwJJcGQgRKlFolQ+J623XZb7w2i+5lvQqQOkxQ5MyiHHnpojTATBmDwJeyG5+H9EXKDS/6yyy7rr5E/5MHAS4Xx57XXXvPt/ve///XvHW8YlIhrrbWWVK9zC81Dh3jXELoC/ZBwk/wd9FHKLbfc4gj/gQZQbMTLlClTvHcOXkPa+4Z+Pf/88z6pJ/hAL3x3hNvwXRAWdOyxx8abs9+GgCGwGBBIO3bySDfccINjzMEo0TzwiKPwnTNP8P0zdjDWMR9RGM9w1yQZL4noGCsYSxkD8D6EEU0as0iEiscf4yIJcPFypG1i46+++mrfNvyKJe30UJTsTxaa4OYYZQgn471juGIeZXyHP4C3oGBZFI/WY445xq277rr+uP6TNNdgLJs5c6Yjb5LwOcx3WCCZP+HTmJeYn0pVsuLA/ZlLhXaZN4XWsZpqnlOeFfrm+8FrF4Mg8zCJqAktIcdQUgEHeBO+D74nvg8wIRRV8OY6zZecdtpp3vuX+Z5r8JaF34PPTDKiwR/QD8GdfsD/bLnllt4wmfRcr7zySo5nkjmfcGH2l8YY9SSM5BjjGPSBnHLIIYfIYTc8kFH4hjCcrrHGGt66vtQJ6mJJg0gHDhyYA0dc2CHG888/3x/XrvKrrbaa/wD5CPr06ePPa4G8f//+fsJhUCHhCh8fE5DExWN5F+1S7qZF7GQZMPiIr7vuOt/68ccfX0v4it+WgYVCAiwRDipZUEc4uPfeex3vjMFJio6j33TTTWsICQhjTBLEzONpUYo2EKouvvhif/u4R8Hf/vY3/zFCbyhVhDYYHM855xx/DeEXfKj5CsLOc889V+u0DPQI5PFkcqLZrHXRogN/+MMffIiEnP/HP/7hhUImVdpLKgiOKLCWREn7fcAo5kuSqD1smDTRbOYr8XAWPSFqRR8DLG5MlH333bdGEkuUHpMnT068BWEqvAtdzg2UfSSsZEJ+//339Sm/z3s//fTTi0pkh4KGiUK+fSZ6Jgcp2tOAZ+RZUTgNGDBAquS2Qsu6fzDlJEFJKoTfsCJHfZSASe3aMUPAECgegbRjJy1feOGFnrdB0GIFHYrEUsI/IIzFk+oytsCMPvHEE76+/oNAx5iiM8ijpEYpieIyXmTs4DjjoVasxuva7/QIZKEJlCrwm/K+4nOJ5imuueYabyDQ84s8JcKjJCvs16+fF1Y5R0Iu8snEC/MdygBWZ4rzOfG6aX9nwYF7wPdfeeWVNeZSfe/4cyJjMA/DPySVpCTWGBBQmOUrKMsRpimaL8nHNzCvY5zS3xLGPvhh5umkgpIExYMumtfRx9lHiQdPVclzflaaiGPBb3KYIYtStDWd35xjnIO+KfCP5SSo81wNalEnhprkahTi1zt27Oj3+YNgh3DGZAOjTZGYdZKlsbwAFlB9Xgs9CP0I/yLw81HwUdIuRVvv/YEi/2QhDoQ3hDgKQt8KK6xQ5N2iaiKoown/y1/+Ep2I7emPO3bKa1klnCAuqMbrFvO7WCywlqIYoWjhSxgKjkNsgwcPzgnIWBZhJFg6gkSDpWiD+xA2gTCkJyutAKIOg6RolxkYR4wY4WlJK5Kop4sWhLC+7rbbbt47hI+cD1sGfj0xMPhKqAYDJ5YOrMQI4AhVbEm8qN+3COrcm4QXaP6gczSnWE7QAC/JwbdYmtDYsS8KuySGgffDZAvjgdWIGHY03Gg/sRowUUI/J510khdeaU9PiCKoY2mSFSTwcuBdSEFLTXwSZc899/QWAb4lGBLCFiiSqNH/CP6IoM5vrsHCxDMybiFMs6/ft1yXtBX64Z7kp+C9o2Qk34YoMmTMYuwj3wUFxRcKMCnQDOMkReprRSEKMazoWOphqMaMGZNT+CxJupHnt60hsLQikGXsLCSogyPfNJYg+Aa8fhCyRIBj/sDzkPmDczL+aaYfBSJzJlt4LYQOVqdhDiWUTyumTVAvPeVmoQkRpOFhSIiFdZb3x3H4GWgB7wcKKxAxByDMx3NEMffAP+v6coxrmfOY32gft2HmVlEKFTvv0U4xJQsOzJ/wDXicYPDB6wSLNd4G8NP8p+h8UBIiCx5gB58BrWNQEFrXdE7bePHyTeGRwreDsgLFPTwsMg68CddgBNJ8CfemPlZ9lPK8i2mLvIXjybXhc+R56QeGGN4p9eW4NlZ8+umn7qqrruIW/nuF36EvPBeyiBgsK3nOz0ITHpCEP3iWwGcWs3rGUiWo80HDbKLxgtHEzVWsx+CIcMSgghUc67gwstTFsomrMASqLe64F4tLK0IfDK24wjMwffbZZ7ks8gg/MMNpSxbiEKth0mDIB87AkVSYGEXwpq98rHWVQoqAJRmjLh8CSezEfVzeMe+UwbBnz55+UKGPounV2shStCED3o477phzcRe6kedAEYRQRhGFQdwdxp9Uf+R5sWpg3dBFvAM4picwJhIERAR2Bl5519TT1+BRAp1TRFCHlhBA9TeDC9Wtt97q62X1GPEX1+NPlu+D2xUS1BFU+YZgKPn2NU56HNHvTU+I4ISwevvtt/ueodzgfeqCBYI6Se8ZNz/GG60U5FomX+6vw2+kTaEzJm+SWtZVZPBPaov3yn1wcxOXOHHTIpSkrXIx5Dl5Xp21lEylKMVguBhLhZZ4JhKlsEoGjEIlT9p14WvnDYFyRyDL2FmXoC5KSum7jDMID3iO6flDxmDCbAgJoogCk/pY2rVHWdzKx3iox2a5p22zI5CFJsSjCoU2im8psvoR7xIPMQrzCu+NgmeqdtNG0EPgw1ValqFirkA41TTiLw7+YMn/+9//7n9qPkfO12ebBQdtCNSJpeU5sIKj5MelnTA1CnwXcyKGPeZLKWDH90JB8cVcTBHeBH5MEl77E8Ef2sEwhkAtfKzmS5JWrWJJZZQe2mDBd4ZhDVlB2pF7sMXSznPr5GbCt8IzIVtp4yCepchePEslz/lZaELjJvsyxvE7Pl5KHb2VMVTjrc8v7n3oo0Es6hAcQhqu6gwaWDDR9ugikwaExPreuOBAWKL9EuZTC+pamGXw4UPEwiTMrH4h4hqv71nMfhbiEEFdayblXriVIeQlFS1Q6L6BWb6CxTrfZLkkBXUZOGQAhwbkWTt37uy1fDJw6cFMKx5K0YZYyPUAJQM2YQnkMkAjKkv4iReHVjDEsdcDab7QBlkWQvofb0P/xt0ey7hYTfXgIYL673//e3fkkUfqy3y8tLj2izW1RoXF8CPL98FjyfeuJyh5XFHoxL1u5Lx4zeCe9Kc//ckf1jSENlmW1YhrqqUNaJFrtLJIzumQCc2Msg+jQ1wnmlhdiFfDEo+HBO7vdRVtqeA7QJGkGYX49eKNIopMOS+MO7QBjVBEidShQwdvBZG6shWlgv4m5JxtDQFDYPEgkGXslO89yfU9aezBCk7OGAQReCldZH4lJwdLxFIkzAZPphNOOEFX9/taOazHxloV7UAmBLLQBDwMvDW8MTHpJE8WBW/SQwhPgScg9Sna7V14CQwLCIyUJMGX42JMKYbPoX6xJQsOeGRC05qfK/Z+uh79RuiGt4Jv1fmvRLDGgzJp3W3dDvuaL9EWcKknObi0ol1bx1HCxPl/4TV0KJwkQMvHM4nHciXP+VloQnDWW5QpyEY607s+H98vN0G9wZLJSUcBQE8wGpD777/fJ6VAU4W7Ci6oGkhpAyuoxD1r19a2gZUJtxAIEaEc4haC5j7aBVvft679LMQhz8Uz8KHpgtVfJ7rjHNZlis5oL4J6fT6sJSmoo7lEIJbnl5hj3H6IpYXhkIFGcg2gvCE2Skop2mDQRfhnwGXLZMbkg+YR13YUQkxS+hz3L8SEaFdk3Meg2XgRQTQ+gfEcTCgokVDaMJDHS5KgDn3HE/Np7bhMrvG2Gvp3lu+DZxJ8kgR1PAq0VjreB4nFknwGnNcToq6f9B753nDvLKbArMK0UmgLzJOUM6IUjFvh/YUJf+gf1gjcr6TQHyzsjHtyTzmnNfwoAmDKRekHTUO/Yi2DJqGzfLkLZFyUb1PuYVtDwBBYfAhkGTsLCeo6f4/0Ap4IYUALAnJOhAQtqIsgkjQuc10+Jaa0adv6IZCFJpgHhgcJsDQvAZ+MwI4CWHtF8HQSnwvfIu7vkq+FkDxJvCvzC9doLz9+SxG+PM7nyPms2yw4EMrBPJzkJVfoOZA1SIiHhx3zcrxoQV14ANzRwbeuovkSzdfJdaKw17iLN7HUKbQVQV6eK8kCz/XiJVjJc34Wmohjx7smJISS9D7i9fktNF4uFnX4vJJb1MV6Q4dxr9LZjDkmRVxK5DdMK0H+Yi2WCURrhkWQk2vY6nhjGXw4zgSXpWQhDmJpcVWlSOx8vnvDUKNEYKuVGJUuqOt+IUQ89thjPhO6xIqLJpaJAld0GApioLACSilFG7Ql2kYEF4gc1yFRAgl9YpEk+zh1kxgeeSa2hFQQC0VhgBQa9QcW/ZFJQ09gWM6ZUKEPKVhRmTARtnDdougBRLTfS5OgLtbufF4NIhSDmSSe1BOiYMsWlzW97CHHtMYa5ZAkEuScFJgUNPNYz3Fnp/CuEdSTNOPyTMUK6rTHM6N0IFYNN0LoXQoKLXIY6GcjzAGLFrk90JwT9sO3E2dMxJtDx55Ku2zFylbJk7buj+0bApWIQBbeoqEFdVlWNCmZJhhrd2fGw6S5rxLfRbk8cxaa4NkJKUXYRAmrlb+c056a/MYjUFYlEn5Z3N61oEebwjfrFYloQwo5p+BbNJ8j5+qzzYLDqFGjfBZ24e2Kub946kpd5m+MOHgkYECiaEFdeBPhY+W6fFvNl2i+TuonCeoiVFMHHiReUCZghCMnAV4y8LTyXPl4JrLHjx07Nmc4i7dZCb+z0ITuF3SPgQbDnA6F1XWS9stNUOcZSyqoa+Ebd2cyKOYrOgEbdbTQym8hRLIpMphQtLsOv+NaYLHaac0h9dKULMShP86krJH6/jquRn/IlS6o00eJpeFdsvwFsU4yuYv7MoMgAxOJrpLcq0rRhgx80AcfK27ExwbLUpFoS6z5aMvQQiPE5HMZlveGQCWaaMI0WAInXiSJnZ7ARJijLjSMJWO55Zbzl+rkeZoOlkZBXeLHWa6O//GC0of/JDnCdZ2ivzkEXLxZWI6EomPZ+Q0N4IFA0dlt/YECf0otqOtbQVN4nRA2w5ZCchssYVJYCgZmRNzfhWmPfzfE5pNsMClcgrYEXxPUBVnbGgKLH4EsvIV885o/krCYJAVzWou6TjaGkSHudithfaAlc/niR65675iFJuJooEzGcgj/Ld6acYs4/NisWbM8T44bN27evOt4HgPxbksK9+I+WHSZezWfE3+eLL+z4CC0nvQdJD0D7u3ktqLAp2MMkaViOSYKby2oS9gkdeMejlwTL5ov0Xyd1EsS1CUZbJJHrlwX3+KdhxItH89EsmIs9ZU852ehCY1TUvJxfT7ffrkJ6vCvJRPURTii83FGOQkQ/dFoqzl1tRZXT1Cck2QXEDUTB1Y2Ch8IgwzbuADvKxT5JytxiLWL54onZZFb45ZNPC6MuXbj5Xw1COqixUO5guVQJ9qSd4qwSgZa3huThLYggkMp2hBXdQQcJhfiwWVCEqs9lgFc8XmuYoQ38QhI0sxpjwo9gYn1XgQt+idFlqPjtx7Qq11Q14o3wUJiJ8GJJJB8Q1J4X2j/yWKq48SSJkRhRrg27gYuFoSksUmEZq7DBV1cyhlfSmVRx2LPM5PgUlulGIRhHtCaS64NnoPCObGE0B8s7EkWfGHcuSYeoy/MDOcqedLm+a0YApWMQBbeoqEFde0tFvfUiXswMh7qsauS30W5PHtamoCHJC8TfBMCquafdOhjPHGcvGeMWORIITdBUsI48eJiDtarrDAX4eUqVmfN55QCy7Q4cE8xuvCshL8yv+mCcQi3f1zW2waCdqF8CzrEQwvqEi6CUQe+XuQN7sO7gF+DvyR+nRw6SXyJfqYkQV1fk5RDB6MOPCbfHrISRXhLeFjek+aZ4FkwHFV6AtksNCFYQ68oo3g3aWm13AR1vvGSCOraOg4jTjbKpIJ7iSYosYBTl+ReuH1AZLgZoxmEMHGj1oMRWkEYcopegglrGhoUCoTL8kRZSlbiIKaUTNx8dPQRlxQyhMuz0x8IAO0ZJe5KUw2CetzjQVx26S8fDu8SfChJ2a85Xoo2aAfNr6xTreOBOCdWe/ahMRgQKQzs99xzj6dFXKiFXrW3CK5lWOQ5x+Q4PHBvx22MogcFHXukaRIaIG+BYFFfQR3MUACBMZ4DEqOG0oPEQkxUBx54oH8+JhcmXJ4bSzT5IdKUrN+HJDdBQcWEh7uZFFHi8BtmkfGD98JYwIQky6ZoS7Ke3AQ/+o/1mCVKeDd68pYJknswEbMsIN8m90D7zNjBJIwnj0zG0AXn07i+56MfUfTgvkY/hK50XGCSt4ZMyIJHfAk5+kO/CeGAsaYQXoGLP7gyUdEf8DJB3cNjfwyBJYJAlrGzoQV1gJDQGPYZn5sHykq84VCOytjBOcZDxiErpUMgLU3A08DbULTimt+isGVu4V3JPMY5ihi5ZC6RxM3h2fAvfAxzlcRuowzA6oxQzBwi9KD5HH191v20OHAf5mYRSBGkEXLJ5YLinaWdZdlV8abUuWp0mBh9xqUfmqdoQV0vh4rHGrwfGdY1bwLeeOwh3yTxJb7RRX+ED4nzpBKuCb68F3GB55nh7Qhv0HknJAcUzWqeCQMocgZZ4imVPOdnoQnf6eCPlknrCkeWa2SbT1DPx2fnO14q/hv+riSCumTOlo7m28ZdO4kdHTZsWE5g4WNDs8WHRolbhzjGB8L9JC4HDSEfB8cpca2wP5jiT32Ig48HV1V5Fj5grIQIRXIs3zOKoF7Xo8atbrq+1qguznXU9TNIsjaOaeGU3+Kiy348jopjUkrRBmtJioAXX8ZE3P24H0IsiUKkiKsTv3ViMT4WhGtxU+bdwtSIMgDaZVDVExgTgGQSpT3qQAdcg6Aqwr0ImtTJYlHXjJaO+9fKClz3+VbEnZp7JS01x/FCJev3oQdO2qf/elkRSXgm98ZyjJApJZ7vIt+EyEQly5ExaSOsC3Mpa6jSJu9vlVVWKTjewOzwvtII6vnoR7thcW8YIPqAoE7B0o43Qbzo+HrOkceD544XnhPFpwjrcp7JHaUSTEslT9rSH9saApWKQJaxc3EI6uCJQhDGUngvjjFfEbKF8pPCeChjqT9gf+qNQBaa0LwN74O5BN4D/o+SL/xSe70meWZJZ5hz4GNpUxcs8cyvGCA0n6PrZN3PggP3Qg4gPxRzKYV+YYEWOkbppFczEM866sK/8Z9+Mifzn3a0oE49SWbLPkV4vfCXqxE6mY8vkbr5BHUMfTybYM57RfBHCKQgwGPMRJ6Qovk+jmmeibkexUMlz/lZaYJ3z7gJn53FuzqfoK7x1nx2vuOl5L9LIqgjbBNzXFdJyp7MoIDGSDPlECWMOdb5pAJRw5RKMi7q8JExeMjyE0nXFXMsK3FI2wwSWGRFoyXH2SKcIKAiIMWLaEPjx+O/C8VT85GjYaRgGWQQqk/JgoUk60jKE4CWE2wohbRcpWhDx4ezagCDqxRojXgkSlwZRIIWViRg0COhCrQoBbqDoaFt0TjTT2iVGGGE0fjAgFUCdzLR1tIWkwd0gEBJ0ViIMkMPBL5S8IfkdHglUHTWd9yiiFligEIby8RNkXAMHdtNH1iugkEs3nd/UR1/stAETaLowI0MhZQoreLKJPCDWcQSLIXwCcINUMDpgmeALCWj8aOOtlK3bt3ae7dwHHxw+SP0Qr8P3MdQ5sjaqdSlSLyeVtiEZ1zOjS6+RFIh+mHpQJgsmXxpiwmZvrEygrjcyz1kK5aQeIiQnNdbsMX6wXvGqo4CQJQElTxp6z7aviFQiQhkGTslblasgvRbFM06tEzwEEEgKakm3lUwlfkSxzE+4o3E2IhXFvMG/AxML+VcE9QF5pJts9AEcylCN15+MpfyQPCXGHJYNzypMOcjPFCSQsDi12BIYC5hXmLuQUEsnnHlIqjzzPAL5LBhfXIR2JlXWVKVeVW8WqkLXvBYmj9nDkc4HzNmjPciSeKL4F3hXfg+pMAj4vJOIlgphfgS6siScto6LtdipIRng0/gXUkhtxJyDXxKvJBDAr5JlDTIQcStM9cjcFbynJ/l2wAfCSsFC4xgOg9BHL+k3+KdHTf65uOz8x0vFf9NP0oiqCd1Nu0xBgUYbAhLXHfraoOPDgUBHyWuInSoviUrccTvy2BKXBD/IRQY5rQEE29zcf8uFRaL+7nrez8GeyYnPcDH22RgpE6SdTNel9/QKtcwKdRXgZLUPhMEJS7scV++D12gTeprJYQ+X2i/vjTBvXkmsI0/l9yXZxNtcLw/Uqe+WywDKGxQ4OR7jqz3qIt+OE8eBZQ8MFf1LSg7GTvpB4qJeBHFV1J+gHhd+20IGAINg0B9x86GeCoEcRFaGDviY6EovJMU7w3xPEtbm/WlCQwGKH4Jd4q/uyxYIuwyL8KDJyXNFaVxIY/ILPetLw7cE96CeRX+qi5eG6UU1nis1gjcxRbhTRqCb9DPgNAOn8R7LYYHQn6iPs9VCjlIP8uS2i8FTZT62fPx2fmOl4r/LhtBvdSAZm2vHIkja1/qe51hUV8Eq+96o4nye6eSNZYnwxMD10cYa7w55syZ4y0vnEuyFHDciiFgCDQ8AuU4dsJI4jmE4IKV7+CDD/bCAYw/nkcTJkzwAkBS4rGGR6z671BuNCErrIA8eZSw5qLQx3sVr0/cwClxLzZ/sB5/yg2HenTFLi0RAkYTIZAYtUxQjxGVEUcEiGERYWF7IQJGE+VJCTpBIk+IVh3mWwrursSbFqOdl2tsawgYAqVDoFzHzhkzZniXW+lpfOzASkf4D1srpUWg3GgCC/3QoUNruF7H6QF38l133bWkQJQbDiXtnDWWCQGjiQg2E9QjLPyeEUcEiGERYWF7IQJGE+VLCVg8ZgWrYuD+h5COJYQ4dTLcY2W3YggYAksOgXIeO0mSyjKZxFtKnC+Jq7Cyd+rUqSRu1UsO+fK9cznSBLlwyOVC2IPkU8GdnNw3JJSL53IpBbrliEMp+mVtZEfAaCLEjpAOE9RjdGTEEQFiWERY2F6IgNFEZVACyeRYRsaKIWAIlAcClTJ24g6/3HLLFczRUh6IVv5TlDtNICQguJci/r3Q2yp3HAo9u51rGASMJkJczfU9gb6MOCJQDIsIC9sLETCaMEowBAwBQyA9AjZ2pses2q8wmgjfsOFQ7ZSevn9GEyFmJqgn0I4RRwSKYRFhYXshAkYTRgmGgCFgCKRHwMbO9JhV+xVGE+EbNhyqndLT989oIsSMHBHm+h6jHyOOCBDDIsLC9kIEjCaMEgwBQ8AQSI+AjZ3pMav2K4wmwjdsOFQ7pafvn9FEiJnFqCfQjhFHBIphEWFheyECRhNGCYaAIWAIpEfAxs70mFX7FUYT4Rs2HKqd0tP3z2gixMxc3xNox4gjAsWwiLCwvRABowmjBEPAEDAE0iNgY2d6zKr9CqOJ8A0bDtVO6en7ZzQRYmaCegLtGHFEoBgWERa2FyJgNGGUYAgYAoZAegRs7EyPWbVfYTQRvmHDodopPX3/jCZCzExQT6AdI44IFMMiwsL2QgSMJowSDAFDwBBIj4CNnekxq/YrjCbCN2w4VDulp++f0USImcWoJ9COEUcEimERYWF7IQJGE0YJhoAhYAikR8DGzvSYVfsVRhPhGzYcqp3S0/fPaCLEzCzqCbRjxBGBYlhEWNheiIDRhFGCIWAIGALpEbCxMz1m1X6F0UT4hg2Haqf09P0zmggxM0E9gXaMOCJQDIsIC9sLETCaMEowBAwBQyA9AjZ2pses2q8wmgjfsOFQ7ZSevn9GEyFmNVzfN9xw/fRI2hWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFQUgSWeeed936hRRPUS4qrNWYIGAKGgCFgCBgChoAhYAgYAoaAIWAIpEbALOqpIbMLDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDIGGQ8AE9YbD1lo2BAwBQ8AQMAQMAUPAEDAEDAFDwBAwBFIj8PPPPztzfU8Nm11gCBgChoAhYAgYAoaAIWAIGAKGgCFgCDQcAiaoNxy21rIhYAgYAoaAIWAIGAKGgCFgCBgChoAhkBoBE9RTQ2YXGAKGgCFgCBgChoAhYAgYAoaAIWAIGAINh4AJ6g2HrbVsCBgChoAhYAgYAoaAIWAIGAKGgCFgCKRGwAT11JDZBYaAIWAIGAKGgCFgCBgChoAhYAgYAoZAwyBgyeQaBldr1RAwBAwBQ8AQMAQMAUPAEDAEDAFDwBDIjIBZ1DNDZxcaAoaAIWAIGAKGgCFgCBgChoAhYAgYAqVHwAT10mNqLRoChoAhYAgYAoaAIWAIGAKGgCFgCBgCmRD45ZdfbB31TMjZRYaAIWAIGAKGgCFgCBgChoAhYAgYAoZAAyBgMeoNAKo1aQgYAoaAIWAIGAKGgCFgCBgChoAhYAjUBwFzfa8PenatIWAIGAKGgCFgCBgChoAhYAgYAoaAIVBCBHB9/3+99AWhH05avQAAAABJRU5ErkJggg=="}}, {"type": "text", "text": "Do you think we could set up rewrites for the JS and CSS? This basePath method does the job, but we end up with two failed requests for so it impacts page load times"}]}, "uuid": "924fbd38-7ef9-4907-91fd-ade65d44ff0b", "timestamp": "2025-10-04T12:32:34.402Z"} diff --git a/dev-docs/messages/user/user.json b/dev-docs/messages/user/user.json new file mode 100644 index 00000000..d29789c9 --- /dev/null +++ b/dev-docs/messages/user/user.json @@ -0,0 +1,12 @@ +{ + "type": "user", + "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", + "timestamp": "2025-09-29T17:07:46.135Z", + "uuid": "39ea49bc-8cc9-4ec3-b598-4d75428d7c5e", + "parentUuid": null, + "isSidechain": false, + "message": { + "role": "user", + "content": "Oh, I just found out that this is not supported by Chrome :(\\\n\\\nThis is the relevant CSS:\\\n... [truncated]" + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/user.jsonl b/dev-docs/messages/user/user.jsonl new file mode 100644 index 00000000..a23ffbe2 --- /dev/null +++ b/dev-docs/messages/user/user.jsonl @@ -0,0 +1 @@ +{"parentUuid": null, "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "b25638d7-b104-4f06-a797-70ac33d069ed", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": "Oh, I just found out that this is not supported by Chrome :(\\\n\\\nThis is the relevant CSS:\\\n\\\nul#models li span {\n display: ruby-base;\n font-size: 0.7em;\n margin: 0 0.15em;\n}\nul#models li code {\n display: ruby-text;\n font-size: 2em;\n letter-spacing: 0.05em;\n}\n\\\nCan you please help rewriting this to use proper HTML ruby elements?"}, "uuid": "39ea49bc-8cc9-4ec3-b598-4d75428d7c5e", "timestamp": "2025-09-29T17:07:46.135Z", "thinkingMetadata": {"level": "none", "disabled": false, "triggers": []}} diff --git a/dev-docs/messages/user/user_command.json b/dev-docs/messages/user/user_command.json new file mode 100644 index 00000000..10367080 --- /dev/null +++ b/dev-docs/messages/user/user_command.json @@ -0,0 +1,15 @@ +{ + "type": "user", + "message": { + "role": "user", + "content": "/model\n model\n " + }, + "parentUuid": "92757a7c-5fef-4e3f-8f26-4cd4c3069187", + "isSidechain": false, + "userType": "external", + "cwd": "/src/deep-manifest", + "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", + "version": "2.0.55", + "uuid": "200652a8-ed8f-40ca-9239-5a661fa2c9be", + "timestamp": "2025-11-29T15:17:28.972Z" +} \ No newline at end of file diff --git a/dev-docs/messages/user/user_command.jsonl b/dev-docs/messages/user/user_command.jsonl new file mode 100644 index 00000000..5a1fee1c --- /dev/null +++ b/dev-docs/messages/user/user_command.jsonl @@ -0,0 +1 @@ +{"type": "user", "message": {"role": "user", "content": "/model\n model\n "}, "parentUuid": "92757a7c-5fef-4e3f-8f26-4cd4c3069187", "isSidechain": false, "userType": "external", "cwd": "/src/deep-manifest", "sessionId": "a7da6a22-facc-4fcd-8bab-f83c87862004", "version": "2.0.55", "uuid": "200652a8-ed8f-40ca-9239-5a661fa2c9be", "timestamp": "2025-11-29T15:17:28.972Z"} diff --git a/dev-docs/messages/user/user_sidechain.json b/dev-docs/messages/user/user_sidechain.json new file mode 100644 index 00000000..e4d68708 --- /dev/null +++ b/dev-docs/messages/user/user_sidechain.json @@ -0,0 +1,12 @@ +{ + "type": "user", + "sessionId": "7864f562-717b-4d70-a1cb-b588f7826a1a", + "timestamp": "2025-10-29T16:03:05.129Z", + "uuid": "86a390e3-356f-4e9b-9584-cd5d5b9af948", + "parentUuid": null, + "isSidechain": true, + "message": { + "role": "user", + "content": "Warmup" + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/user_sidechain.jsonl b/dev-docs/messages/user/user_sidechain.jsonl new file mode 100644 index 00000000..401cdb43 --- /dev/null +++ b/dev-docs/messages/user/user_sidechain.jsonl @@ -0,0 +1 @@ +{"parentUuid": null, "isSidechain": true, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "7864f562-717b-4d70-a1cb-b588f7826a1a", "version": "2.0.28", "gitBranch": "main", "agentId": "b1f5d80e", "type": "user", "message": {"role": "user", "content": "Warmup"}, "uuid": "86a390e3-356f-4e9b-9584-cd5d5b9af948", "timestamp": "2025-10-29T16:03:05.129Z"} diff --git a/dev-docs/messages/user/user_slash_command.json b/dev-docs/messages/user/user_slash_command.json new file mode 100644 index 00000000..a5c8679e --- /dev/null +++ b/dev-docs/messages/user/user_slash_command.json @@ -0,0 +1,13 @@ +{ + "type": "user", + "sessionId": "4379d1bf-ccb1-414e-a856-9791b73f3af2", + "timestamp": "2025-09-29T19:30:58.343Z", + "uuid": "3660ac37-da42-4774-9e02-ba2c931d9a85", + "parentUuid": null, + "isSidechain": false, + "isMeta": true, + "message": { + "role": "user", + "content": "Caveat: The messages below were generated by the user while running local commands. DO NOT respond to these messages or otherwise consider them in your response unless the user explicitly asks you to." + } +} \ No newline at end of file diff --git a/dev-docs/messages/user/user_slash_command.jsonl b/dev-docs/messages/user/user_slash_command.jsonl new file mode 100644 index 00000000..b32ff41a --- /dev/null +++ b/dev-docs/messages/user/user_slash_command.jsonl @@ -0,0 +1 @@ +{"parentUuid": null, "isSidechain": false, "userType": "external", "cwd": "/Users/dain/workspace/danieldemmel.me-next", "sessionId": "4379d1bf-ccb1-414e-a856-9791b73f3af2", "version": "1.0.128", "gitBranch": "main", "type": "user", "message": {"role": "user", "content": "Caveat: The messages below were generated by the user while running local commands. DO NOT respond to these messages or otherwise consider them in your response unless the user explicitly asks you to."}, "isMeta": true, "uuid": "3660ac37-da42-4774-9e02-ba2c931d9a85", "timestamp": "2025-09-29T19:30:58.343Z"} diff --git a/scripts/extract_message_samples.py b/scripts/extract_message_samples.py new file mode 100644 index 00000000..9eac005a --- /dev/null +++ b/scripts/extract_message_samples.py @@ -0,0 +1,740 @@ +#!/usr/bin/env python3 +"""Extract sample messages for documentation. + +This script finds examples of each message type and content type from +real session data and creates: +1. Abbreviated JSON files (readable, for documentation) +2. Full JSONL lines (complete data, for reference) + +The output message categories map input JSONL types to rendered HTML: + +INPUT (JSONL) -> OUTPUT (HTML css_class) +--------------------------------------------------------- +user + text content -> "user" +user + text (compacted) -> "user compacted" +user + text (slash-command)-> "user slash-command" +user + text (sidechain) -> "user sidechain" (skipped in main) +user + tool_result -> "tool_result" (separate message) +user + tool_result error -> "tool_result error" +user + image -> "image" +assistant + text -> "assistant" +assistant + text (sidechain)-> "assistant sidechain" +assistant + thinking -> "thinking" +assistant + tool_use -> "tool_use" +system (command-name) -> "system" +system (command-output) -> "system command-output" +system (level=info) -> "system system-info" +system (level=warning) -> "system system-warning" +system (level=error) -> "system system-error" +system (hook summary) -> "system system-hook" +session header -> "session-header" +summary -> (not rendered as message) +queue-operation -> (not rendered as message) +file-history-snapshot -> (not rendered as message) +""" + +import json +import sys +from pathlib import Path +from typing import Any, Callable, TypedDict + + +class CategoryDef(TypedDict): + """Type definition for OUTPUT_CATEGORIES entries.""" + + css_class: str | None + description: str + input_type: str + subdir: str + filter: Callable[[dict[str, Any]], bool] + + +# Add project root to path to import from claude_code_log +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Smallest valid base64 PNG (8x8 transparent) +TINY_BASE64_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAAAgAAAAIAQMAAAD+wSzIAAAABlBMVEX///+/v7+jQ3Y5AAAADklEQVQI12P4AIX8EAgALgAD/aNpbtEAAAAASUVORK5CYII" + + +def truncate_text(text: str, max_lines: int = 3, max_len: int = 200) -> str: + """Truncate text to a few lines.""" + if not text: + return text + lines = text.split("\n") + if len(lines) > max_lines: + text = "\n".join(lines[:max_lines]) + "\n... [truncated]" + if len(text) > max_len: + text = text[:max_len] + "... [truncated]" + return text + + +def abbreviate_message(msg: dict[str, Any]) -> dict[str, Any]: + """Abbreviate a message for documentation.""" + result = {} + + # Keep essential fields + for key in [ + "type", + "sessionId", + "timestamp", + "uuid", + "parentUuid", + "isSidechain", + "isMeta", + "level", + "subtype", + "content", + ]: + if key in msg: + if key == "content" and isinstance(msg[key], str): + result[key] = truncate_text(msg[key]) + else: + result[key] = msg[key] + + # Abbreviate message content + if "message" in msg: + message = msg["message"] + result["message"] = {} + + for key in ["role", "type", "model", "id"]: + if key in message: + result["message"][key] = message[key] + + if "content" in message: + content = message["content"] + if isinstance(content, str): + result["message"]["content"] = truncate_text(content) + elif isinstance(content, list): + result["message"]["content"] = [] + for item in content[:3]: # Max 3 content items + abbrev_item = abbreviate_content_item(item) + result["message"]["content"].append(abbrev_item) + if len(content) > 3: + result["message"]["content"].append( + {"_note": f"... +{len(content) - 3} more items"} + ) + + # Abbreviate tool use result + if "toolUseResult" in msg: + tur = msg["toolUseResult"] + # toolUseResult can be a string (for errors) or a dict + if isinstance(tur, str): + result["toolUseResult"] = truncate_text(tur) + elif isinstance(tur, dict): + result["toolUseResult"] = {} + if "type" in tur: + result["toolUseResult"]["type"] = tur["type"] + if "stdout" in tur: + result["toolUseResult"]["stdout"] = truncate_text(tur["stdout"]) + if "stderr" in tur: + result["toolUseResult"]["stderr"] = truncate_text(tur["stderr"]) + if "file" in tur: + result["toolUseResult"]["file"] = { + "filePath": tur["file"].get("filePath", ""), + "content": truncate_text(tur["file"].get("content", "")), + } + else: + result["toolUseResult"] = tur + + # Abbreviate summary + if "summary" in msg: + result["summary"] = truncate_text(msg["summary"]) + if "leafUuid" in msg: + result["leafUuid"] = msg["leafUuid"] + + return result + + +def abbreviate_content_item(item: dict[str, Any]) -> dict[str, Any]: + """Abbreviate a content item.""" + result = {"type": item.get("type", "unknown")} + + if item.get("type") == "text": + result["text"] = truncate_text(item.get("text", "")) + + elif item.get("type") == "thinking": + result["thinking"] = truncate_text(item.get("thinking", "")) + + elif item.get("type") == "tool_use": + result["id"] = item.get("id", "") + result["name"] = item.get("name", "") + inp = item.get("input", {}) + # Abbreviate input + if isinstance(inp, dict): + result["input"] = {} + for k, v in list(inp.items())[:3]: + if isinstance(v, str): + result["input"][k] = truncate_text(v, max_lines=2, max_len=100) + else: + result["input"][k] = v + if len(inp) > 3: + result["input"]["_note"] = f"... +{len(inp) - 3} more fields" + + elif item.get("type") == "tool_result": + result["tool_use_id"] = item.get("tool_use_id", "") + result["is_error"] = item.get("is_error", False) + content = item.get("content", "") + if isinstance(content, str): + result["content"] = truncate_text(content) + elif isinstance(content, list): + result["content"] = [{"_note": f"{len(content)} items"}] + + elif item.get("type") == "image": + source = item.get("source", {}) + result["source"] = { + "type": source.get("type", "base64"), + "media_type": source.get("media_type", "image/png"), + "data": TINY_BASE64_IMAGE + " [abbreviated]", + } + + return result + + +# Output message categories - maps to CSS classes +# Each category specifies the subdirectory where samples are written +OUTPUT_CATEGORIES: dict[str, CategoryDef] = { + # User message variants -> user/ + "user": { + "css_class": "user", + "description": "Regular user prompt", + "input_type": "user", + "subdir": "user", + "filter": lambda m: ( + m.get("type") == "user" + and not m.get("isSidechain") + and not m.get("isMeta") + and _has_text_content(m) + and not _is_compacted(m) + ), + }, + "user_compacted": { + "css_class": "user compacted", + "description": "Compacted conversation summary", + "input_type": "user", + "subdir": "user", + "filter": lambda m: ( + m.get("type") == "user" and not m.get("isSidechain") and _is_compacted(m) + ), + }, + "user_slash_command": { + "css_class": "user slash-command", + "description": "Expanded slash command prompt (isMeta=true)", + "input_type": "user", + "subdir": "user", + "filter": lambda m: (m.get("type") == "user" and m.get("isMeta")), + }, + "user_sidechain": { + "css_class": "user sidechain", + "description": "Sub-agent user prompt (usually skipped)", + "input_type": "user", + "subdir": "user", + "filter": lambda m: ( + m.get("type") == "user" + and m.get("isSidechain") + and _has_text_content(m) + and not m.get("isMeta") + ), + }, + "image": { + "css_class": "image", + "description": "User-attached image", + "input_type": "user", + "subdir": "user", + "filter": lambda m: (m.get("type") == "user" and _has_image(m)), + }, + "bash_input": { + "css_class": "user", + "description": "Bash command input (from background Bash tool)", + "input_type": "user", + "subdir": "user", + "filter": lambda m: (m.get("type") == "user" and _has_bash_input(m)), + }, + "bash_output": { + "css_class": "user", + "description": "Bash command output (stdout/stderr)", + "input_type": "user", + "subdir": "user", + "filter": lambda m: (m.get("type") == "user" and _has_bash_output(m)), + }, + # Assistant message variants -> assistant/ + "assistant": { + "css_class": "assistant", + "description": "Assistant text response", + "input_type": "assistant", + "subdir": "assistant", + "filter": lambda m: ( + m.get("type") == "assistant" + and not m.get("isSidechain") + and _has_text_content(m) + ), + }, + "assistant_sidechain": { + "css_class": "assistant sidechain", + "description": "Sub-agent assistant response", + "input_type": "assistant", + "subdir": "assistant", + "filter": lambda m: ( + m.get("type") == "assistant" + and m.get("isSidechain") + and _has_text_content(m) + ), + }, + "thinking": { + "css_class": "thinking", + "description": "Extended thinking content", + "input_type": "assistant", + "subdir": "assistant", + "filter": lambda m: (m.get("type") == "assistant" and _has_thinking(m)), + }, + # System message variants -> system/ + "system_command": { + "css_class": "system", + "description": "User-initiated command (e.g., /context)", + "input_type": "system", + "subdir": "system", + "filter": lambda m: ( + m.get("type") == "system" and _has_command_name(m.get("content", "")) + ), + }, + "system_command_output": { + "css_class": "system command-output", + "description": "Command output (e.g., from /context)", + "input_type": "system", + "subdir": "system", + "filter": lambda m: ( + m.get("type") == "system" and _has_command_output(m.get("content", "")) + ), + }, + "system_info": { + "css_class": "system system-info", + "description": "System info message", + "input_type": "system", + "subdir": "system", + "filter": lambda m: ( + m.get("type") == "system" + and m.get("level") == "info" + and not _has_command_name(m.get("content", "")) + and not _has_command_output(m.get("content", "")) + ), + }, + "system_warning": { + "css_class": "system system-warning", + "description": "System warning message", + "input_type": "system", + "subdir": "system", + "filter": lambda m: (m.get("type") == "system" and m.get("level") == "warning"), + }, + "system_error": { + "css_class": "system system-error", + "description": "System error message", + "input_type": "system", + "subdir": "system", + "filter": lambda m: (m.get("type") == "system" and m.get("level") == "error"), + }, + "system_hook": { + "css_class": "system system-hook", + "description": "Hook execution summary", + "input_type": "system", + "subdir": "system", + "filter": lambda m: ( + m.get("type") == "system" and m.get("subtype") == "stop_hook_summary" + ), + }, + # Non-rendered types -> system/ + "summary": { + "css_class": None, + "description": "Session summary (not rendered as message)", + "input_type": "summary", + "subdir": "system", + "filter": lambda m: m.get("type") == "summary", + }, + "queue_operation": { + "css_class": None, + "description": "Queue operation (not rendered as message)", + "input_type": "queue-operation", + "subdir": "system", + "filter": lambda m: m.get("type") == "queue-operation", + }, + "file_history_snapshot": { + "css_class": None, + "description": "File history snapshot (not rendered as message)", + "input_type": "file-history-snapshot", + "subdir": "system", + "filter": lambda m: m.get("type") == "file-history-snapshot", + }, +} + + +def _has_text_content(msg: dict) -> bool: + """Check if message has text content.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, str): + return True + if isinstance(content, list): + return any(item.get("type") == "text" for item in content) + return False + + +def _has_tool_result(msg: dict) -> bool: + """Check if message has tool_result content.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + return any(item.get("type") == "tool_result" for item in content) + return False + + +def _has_tool_result_error(msg: dict) -> bool: + """Check if message has tool_result with is_error=True.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + return any( + item.get("type") == "tool_result" and item.get("is_error") + for item in content + ) + return False + + +def _has_image(msg: dict) -> bool: + """Check if message has image content.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + return any(item.get("type") == "image" for item in content) + return False + + +def _has_thinking(msg: dict) -> bool: + """Check if message has thinking content.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + return any(item.get("type") == "thinking" for item in content) + return False + + +def _has_tool_use(msg: dict) -> bool: + """Check if message has tool_use content.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + return any(item.get("type") == "tool_use" for item in content) + return False + + +def _is_compacted(msg: dict) -> bool: + """Check if message is a compacted conversation.""" + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if item.get("type") == "text": + text = item.get("text", "") + if "(compacted conversation)" in text: + return True + return False + + +def _has_command_name(content: str) -> bool: + """Check if system message has command-name tag.""" + return "" in content + + +def _has_command_output(content: str) -> bool: + """Check if system message has command output tag.""" + return "" in content + + +def _has_bash_input(msg: dict[str, Any]) -> bool: + """Check if user message has bash-input tag.""" + content = msg.get("message", {}).get("content", "") + if isinstance(content, str): + return "" in content + return False + + +def _has_bash_output(msg: dict[str, Any]) -> bool: + """Check if user message has bash-stdout tag.""" + content = msg.get("message", {}).get("content", "") + if isinstance(content, str): + return "" in content + return False + + +def find_samples(data_dirs: list[Path]) -> dict[str, list[dict]]: + """Find sample messages for each output category.""" + samples: dict[str, list[dict]] = {cat: [] for cat in OUTPUT_CATEGORIES} + + for data_dir in data_dirs: + for jsonl_file in data_dir.rglob("*.jsonl"): + try: + with open(jsonl_file) as f: + for line in f: + line = line.strip() + if not line: + continue + try: + msg = json.loads(line) + except json.JSONDecodeError: + continue + + # Check each category + for cat_name, cat_def in OUTPUT_CATEGORIES.items(): + if len(samples[cat_name]) < 2: + try: + if cat_def["filter"](msg): + samples[cat_name].append(msg) + except Exception: + pass + + except Exception as e: + print(f"Error processing {jsonl_file}: {e}") + + return samples + + +def find_tool_result_by_id( + data_dirs: list[Path], tool_use_id: str, session_id: str | None = None +) -> dict | None: + """Find a tool_result message by its tool_use_id. + + Args: + data_dirs: List of directories to search + tool_use_id: The tool_use_id to search for + session_id: If provided, only search in files matching this session + + Returns: + The tool_result message if found, None otherwise + """ + for data_dir in data_dirs: + for jsonl_file in data_dir.rglob("*.jsonl"): + # Skip agent files if we have a session_id (they're sub-agents) + if session_id and jsonl_file.stem.startswith("agent-"): + continue + try: + with open(jsonl_file) as f: + for line in f: + line = line.strip() + if not line: + continue + try: + msg = json.loads(line) + except json.JSONDecodeError: + continue + + # Filter by session if provided + if session_id and msg.get("sessionId") != session_id: + continue + + # Check for tool_result with matching tool_use_id + if msg.get("type") == "user": + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if ( + item.get("type") == "tool_result" + and item.get("tool_use_id") == tool_use_id + ): + return msg + except Exception: + pass + return None + + +def find_tool_samples( + data_dirs: list[Path], +) -> tuple[dict[str, list[dict]], dict[str, list[dict]], dict[str, list[dict]]]: + """Find sample messages for each tool type, including paired results. + + Returns: + Tuple of (tool_use_samples, tool_result_samples, tool_result_error_samples) + - tool_use_samples: assistant messages with tool_use content + - tool_result_samples: user messages with tool_result content (paired with tool_use) + - tool_result_error_samples: user messages with tool_result is_error=True + """ + tool_use_samples: dict[str, list[dict]] = {} + tool_result_error_samples: dict[str, list[dict]] = {} + + # Track tool_use info for later pairing: tool_use_id -> (tool_name, session_id) + tool_use_info: dict[str, tuple[str, str]] = {} + + # First pass: collect all tool_use samples and error results + for data_dir in data_dirs: + for jsonl_file in data_dir.rglob("*.jsonl"): + try: + with open(jsonl_file) as f: + for line in f: + line = line.strip() + if not line: + continue + try: + msg = json.loads(line) + except json.JSONDecodeError: + continue + + # Collect tool_use samples from assistant messages + if msg.get("type") == "assistant": + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if item.get("type") == "tool_use": + tool_name = item.get("name", "") + tool_id = item.get("id", "") + session_id = msg.get("sessionId", "") + if tool_name and tool_id: + tool_use_info[tool_id] = ( + tool_name, + session_id, + ) + if tool_name not in tool_use_samples: + tool_use_samples[tool_name] = [] + if len(tool_use_samples[tool_name]) < 1: + tool_use_samples[tool_name].append(msg) + + # Collect error tool_results separately + if msg.get("type") == "user": + content = msg.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if item.get("type") == "tool_result": + tool_id = item.get("tool_use_id", "") + is_error = item.get("is_error", False) + + # Get tool_name from our mapping + info = tool_use_info.get(tool_id) + if info: + tool_name = info[0] + else: + # Try to infer from existing samples + tool_name = "" + + if tool_name and is_error: + if ( + tool_name + not in tool_result_error_samples + ): + tool_result_error_samples[ + tool_name + ] = [] + if ( + len( + tool_result_error_samples[tool_name] + ) + < 1 + ): + tool_result_error_samples[ + tool_name + ].append(msg) + + except Exception as e: + print(f"Error processing {jsonl_file}: {e}") + + # Second pass: find paired tool_results for each tool_use sample + tool_result_samples: dict[str, list[dict]] = {} + for tool_name, samples in tool_use_samples.items(): + for sample in samples: + content = sample.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if item.get("type") == "tool_use" and item.get("name") == tool_name: + tool_use_id = item.get("id", "") + session_id = sample.get("sessionId", "") + + # Search for the paired tool_result + paired_result = find_tool_result_by_id( + data_dirs, tool_use_id, session_id + ) + if paired_result: + if tool_name not in tool_result_samples: + tool_result_samples[tool_name] = [] + if len(tool_result_samples[tool_name]) < 1: + tool_result_samples[tool_name].append(paired_result) + break + if tool_name in tool_result_samples: + break + + return tool_use_samples, tool_result_samples, tool_result_error_samples + + +def write_sample(output_dir: Path, name: str, msg: dict) -> None: + """Write a sample message as both .json and .jsonl files.""" + out_json = output_dir / f"{name}.json" + abbreviated = abbreviate_message(msg) + with open(out_json, "w") as f: + json.dump(abbreviated, f, indent=2) + + out_jsonl = output_dir / f"{name}.jsonl" + with open(out_jsonl, "w") as f: + f.write(json.dumps(msg) + "\n") + + +def main(): + test_data = Path(__file__).parent.parent / "test" / "test_data" + data_dirs = [ + test_data / "real_projects", # Only use real_projects as instructed + ] + + output_dir = Path(__file__).parent.parent / "dev-docs" / "messages" + + # Create subdirectories + for subdir in ["user", "assistant", "system", "tools"]: + (output_dir / subdir).mkdir(parents=True, exist_ok=True) + + print("Finding samples from real_projects...") + samples = find_samples(data_dirs) + tool_use_samples, tool_result_samples, tool_result_error_samples = ( + find_tool_samples(data_dirs) + ) + + # Write samples for each category to appropriate subdirectory + for cat_name, messages in samples.items(): + if not messages: + print(f" {cat_name}: NO SAMPLES FOUND") + continue + + cat_def = OUTPUT_CATEGORIES[cat_name] + subdir = cat_def["subdir"] + target_dir = output_dir / subdir + + write_sample(target_dir, cat_name, messages[0]) + print(f" {subdir}/{cat_name}: wrote .json, .jsonl") + + # Write tool_use samples (assistant messages) -> tools/ToolName-tool_use + tools_dir = output_dir / "tools" + for tool_name, messages in sorted(tool_use_samples.items()): + if not messages: + continue + write_sample(tools_dir, f"{tool_name}-tool_use", messages[0]) + print(f" tools/{tool_name}-tool_use: wrote .json, .jsonl") + + # Write tool_result samples (user messages) -> tools/ToolName-tool_result + # These are now properly paired with the tool_use samples + for tool_name, messages in sorted(tool_result_samples.items()): + if not messages: + continue + write_sample(tools_dir, f"{tool_name}-tool_result", messages[0]) + # Verify pairing by checking tool_use_id + tool_use_id = None + for item in ( + tool_use_samples.get(tool_name, [{}])[0] + .get("message", {}) + .get("content", []) + ): + if item.get("type") == "tool_use" and item.get("name") == tool_name: + tool_use_id = item.get("id") + break + result_tool_use_id = None + for item in messages[0].get("message", {}).get("content", []): + if item.get("type") == "tool_result": + result_tool_use_id = item.get("tool_use_id") + break + paired = "✓ paired" if tool_use_id == result_tool_use_id else "✗ not paired" + print(f" tools/{tool_name}-tool_result: wrote .json, .jsonl ({paired})") + + # Write tool_result_error samples -> tools/ToolName-tool_result_error + for tool_name, messages in sorted(tool_result_error_samples.items()): + if not messages: + continue + write_sample(tools_dir, f"{tool_name}-tool_result_error", messages[0]) + print(f" tools/{tool_name}-tool_result_error: wrote .json, .jsonl") + + print(f"\nWrote samples to {output_dir}") + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_style_guide.py b/scripts/generate_style_guide.py index 86203771..7ce4af5e 100755 --- a/scripts/generate_style_guide.py +++ b/scripts/generate_style_guide.py @@ -5,17 +5,21 @@ This script creates a comprehensive HTML file that demonstrates how different types of Claude transcript messages are rendered, serving both as a test and as documentation for the visual design. + +Session 1: Hand-crafted examples showing various formatting scenarios +Session 2: Auto-generated from dev-docs/messages samples showing all message types """ +import copy import json import sys import tempfile +import uuid as uuid_module +from datetime import datetime, timedelta, timezone from pathlib import Path -from claude_code_log.converter import ( - convert_jsonl_to_html, - generate_projects_index_html, -) +from claude_code_log.converter import convert_jsonl_to_html +from claude_code_log.html.renderer import generate_projects_index_html def create_style_guide_data(): @@ -355,6 +359,290 @@ def create_index_style_guide_data(): ] +def load_jsonl_sample(file_path: Path) -> dict | None: + """Load a single message from a JSONL file.""" + try: + with open(file_path, encoding="utf-8") as f: + line = f.readline().strip() + if line: + return json.loads(line) + except (OSError, json.JSONDecodeError) as e: + print(f" ⚠️ Warning: Could not load {file_path}: {e}") + return None + + +def create_sample_messages_session() -> list[dict]: + """Create a session from all message samples in dev-docs/messages. + + This function: + 1. Loads all .jsonl samples from dev-docs/messages/ + 2. Transforms them in-memory with consistent sessionId, uuid, parentUuid + 3. Orders them logically (user→assistant→tools, sidechain under Task) + 4. Duplicates tools with isSidechain=True (except Task itself) + + Returns: + List of transformed message dictionaries for a single session + """ + script_dir = Path(__file__).parent + samples_dir = script_dir.parent / "dev-docs" / "messages" + + session_id = "sample_messages_session" + base_time = datetime(2025, 7, 1, 12, 0, 0, tzinfo=timezone.utc) + time_counter = [0] # Use list for mutable closure + + def new_uuid() -> str: + return str(uuid_module.uuid4()) + + def new_timestamp() -> str: + time_counter[0] += 1 + dt = base_time + timedelta(seconds=time_counter[0] * 10) + return dt.strftime("%Y-%m-%dT%H:%M:%S.000Z") + + def transform_message( + msg: dict, + *, + uuid: str, + parent_uuid: str | None, + is_sidechain: bool = False, + tool_id_map: dict[str, str] | None = None, + ) -> dict: + """Transform a message with new identifiers.""" + result = copy.deepcopy(msg) + result["sessionId"] = session_id + result["uuid"] = uuid + result["parentUuid"] = parent_uuid + result["timestamp"] = new_timestamp() + result["isSidechain"] = is_sidechain + + # Update tool_use ids + if tool_id_map is not None: + content = result.get("message", {}).get("content", []) + if isinstance(content, list): + for item in content: + if item.get("type") == "tool_use" and "id" in item: + old_id = item["id"] + new_id = f"toolu_{new_uuid()[:24]}" + item["id"] = new_id + tool_id_map[old_id] = new_id + elif item.get("type") == "tool_result" and "tool_use_id" in item: + old_id = item["tool_use_id"] + if old_id in tool_id_map: + item["tool_use_id"] = tool_id_map[old_id] + + return result + + # Collect samples by category + user_samples: dict[str, dict] = {} + assistant_samples: dict[str, dict] = {} + system_samples: dict[str, dict] = {} + tool_samples: dict[str, dict] = {} # key: "ToolName-tool_use" etc. + + # Expected sample names for validation + expected_user = { + "user", + "user_slash_command", + "user_command", + "command_output", + "image", + } + expected_assistant = {"assistant", "thinking"} + expected_system = {"system_info", "summary"} + # Tools are discovered dynamically + + # Load user samples + user_dir = samples_dir / "user" + if user_dir.exists(): + for jsonl_file in user_dir.glob("*.jsonl"): + name = jsonl_file.stem + msg = load_jsonl_sample(jsonl_file) + if msg: + user_samples[name] = msg + if name not in expected_user and name not in {"user_sidechain"}: + print(f" ⚠️ Warning: Unknown user sample type: {name}") + + # Load assistant samples + assistant_dir = samples_dir / "assistant" + if assistant_dir.exists(): + for jsonl_file in assistant_dir.glob("*.jsonl"): + name = jsonl_file.stem + msg = load_jsonl_sample(jsonl_file) + if msg: + assistant_samples[name] = msg + if name not in expected_assistant and name not in {"assistant_sidechain"}: + print(f" ⚠️ Warning: Unknown assistant sample type: {name}") + + # Load system samples + system_dir = samples_dir / "system" + if system_dir.exists(): + for jsonl_file in system_dir.glob("*.jsonl"): + name = jsonl_file.stem + msg = load_jsonl_sample(jsonl_file) + if msg: + system_samples[name] = msg + if name not in expected_system and name not in { + "queue_operation", + "file_history_snapshot", + }: + print(f" ⚠️ Warning: Unknown system sample type: {name}") + + # Load tool samples + tools_dir = samples_dir / "tools" + if tools_dir.exists(): + for jsonl_file in tools_dir.glob("*.jsonl"): + name = jsonl_file.stem + msg = load_jsonl_sample(jsonl_file) + if msg: + tool_samples[name] = msg + + # Build the message list + messages: list[dict] = [] + last_uuid: str | None = None + tool_id_map: dict[str, str] = {} + + # Helper to add a message + def add_message( + msg: dict, *, is_sidechain: bool = False, update_last: bool = True + ) -> str: + nonlocal last_uuid + uuid = new_uuid() + transformed = transform_message( + msg, + uuid=uuid, + parent_uuid=last_uuid, + is_sidechain=is_sidechain, + tool_id_map=tool_id_map, + ) + messages.append(transformed) + if update_last: + last_uuid = uuid + return uuid + + # === MAIN CHAIN MESSAGES === + + # 1. User messages + print(" Loading user samples...") + if "user" in user_samples: + add_message(user_samples["user"]) + if "user_slash_command" in user_samples: + add_message(user_samples["user_slash_command"]) + if "user_command" in user_samples: + add_message(user_samples["user_command"]) + if "command_output" in user_samples: + add_message(user_samples["command_output"]) + + # 2. Assistant message + print(" Loading assistant samples...") + if "assistant" in assistant_samples: + add_message(assistant_samples["assistant"]) + + # 3. System info + print(" Loading system samples...") + if "system_info" in system_samples: + add_message(system_samples["system_info"]) + + # 4. Image message (user) + if "image" in user_samples: + add_message(user_samples["image"]) + + # 5. Main chain tools (except Task which comes later) + print(" Loading tool samples...") + + # Get unique tool names (excluding Task, exit_plan_mode variants, and error results) + tool_names = set() + for name in tool_samples: + if name.endswith("-tool_use"): + tool_name = name[:-9] # Remove "-tool_use" + # Skip Task (handled separately) and exit_plan_mode (duplicate of ExitPlanMode) + if tool_name not in {"Task", "exit_plan_mode"}: + tool_names.add(tool_name) + + # Sort tools alphabetically + sorted_tools = sorted(tool_names) + + # Add each tool (use + result pair) + for tool_name in sorted_tools: + use_key = f"{tool_name}-tool_use" + result_key = f"{tool_name}-tool_result" + error_key = f"{tool_name}-tool_result_error" + + if use_key in tool_samples: + # Add tool_use + add_message(tool_samples[use_key]) + + # Add tool_result (prefer success, fallback to error) + if result_key in tool_samples: + add_message(tool_samples[result_key]) + elif error_key in tool_samples: + add_message(tool_samples[error_key]) + + # === TASK TOOL WITH SIDECHAIN === + print(" Loading Task tool with sidechain...") + + task_use_key = "Task-tool_use" + task_result_key = "Task-tool_result" + + if task_use_key in tool_samples: + # Add Task tool_use + task_uuid = add_message(tool_samples[task_use_key]) + + # === SIDECHAIN MESSAGES (appear under Task) === + + # Save main chain position + main_chain_last_uuid = last_uuid + + # Start sidechain from the Task message + last_uuid = task_uuid + + # Add sidechain user + if "user_sidechain" in user_samples: + add_message(user_samples["user_sidechain"], is_sidechain=True) + elif "user" in user_samples: + # Duplicate regular user as sidechain + add_message(user_samples["user"], is_sidechain=True) + + # Add sidechain assistant + if "assistant_sidechain" in assistant_samples: + add_message(assistant_samples["assistant_sidechain"], is_sidechain=True) + elif "assistant" in assistant_samples: + add_message(assistant_samples["assistant"], is_sidechain=True) + + # Add thinking (always sidechain in practice) + if "thinking" in assistant_samples: + add_message(assistant_samples["thinking"], is_sidechain=True) + + # Add sidechain tools (all except Task - no nested Tasks) + for tool_name in sorted_tools: + use_key = f"{tool_name}-tool_use" + result_key = f"{tool_name}-tool_result" + error_key = f"{tool_name}-tool_result_error" + + if use_key in tool_samples: + # Add tool_use as sidechain + add_message(tool_samples[use_key], is_sidechain=True) + + # Add tool_result as sidechain + if result_key in tool_samples: + add_message(tool_samples[result_key], is_sidechain=True) + elif error_key in tool_samples: + add_message(tool_samples[error_key], is_sidechain=True) + + # Restore main chain position + last_uuid = main_chain_last_uuid + + # Add Task tool_result (back on main chain) + if task_result_key in tool_samples: + add_message(tool_samples[task_result_key]) + + # === SUMMARY (at the end) === + if "summary" in system_samples: + summary = copy.deepcopy(system_samples["summary"]) + summary["leafUuid"] = last_uuid + messages.append(summary) + + print(f" ✓ Loaded {len(messages)} messages from samples") + return messages + + def generate_style_guide(): """Generate the complete style guide HTML files.""" script_dir = Path(__file__).parent @@ -363,17 +651,27 @@ def generate_style_guide(): print("🎨 Generating Claude Code Log Style Guide...") - # Generate transcript style guide + # Generate transcript style guide with two sessions: + # Session 1: Hand-crafted examples + # Session 2: Auto-generated from dev-docs/messages samples print(" 📝 Creating transcript style guide...") + + print(" Session 1: Hand-crafted examples...") style_guide_data = create_style_guide_data() + print(" Session 2: Message samples from dev-docs/messages...") + sample_messages_data = create_sample_messages_session() + + # Combine both sessions + all_data = style_guide_data + sample_messages_data + with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) jsonl_file = temp_path / "style_guide.jsonl" - # Write style guide data + # Write combined style guide data with open(jsonl_file, "w", encoding="utf-8") as f: - for entry in style_guide_data: + for entry in all_data: f.write(json.dumps(entry, ensure_ascii=False) + "\n") # Convert to HTML diff --git a/scripts/style_guide_output/index_style_guide.html b/scripts/style_guide_output/index_style_guide.html index 0e85dece..90f647ec 100644 --- a/scripts/style_guide_output/index_style_guide.html +++ b/scripts/style_guide_output/index_style_guide.html @@ -1,5 +1,5 @@ - + @@ -54,10 +54,15 @@ --answer-accent: #4caf50; --answer-bg: #f0fff4; + /* Priority palette (purple intensity - darker = more urgent) */ + --priority-600: #7c3aed; + --priority-400: #a78bfa; + --priority-300: #c4b5fd; + /* Priority colors for todos */ - --priority-high: #dc3545; - --priority-medium: #ffc107; - --priority-low: #28a745; + --priority-high: var(--priority-600); + --priority-medium: var(--priority-400); + --priority-low: var(--priority-300); /* Status colors */ --status-in-progress: #fff3cd; diff --git a/scripts/style_guide_output/transcript_style_guide.html b/scripts/style_guide_output/transcript_style_guide.html index 9472d732..0d29cb9f 100644 --- a/scripts/style_guide_output/transcript_style_guide.html +++ b/scripts/style_guide_output/transcript_style_guide.html @@ -1,5 +1,5 @@ - + @@ -39,23 +39,69 @@ /* Slightly transparent variants (55 = ~33% opacity) */ --highlight-light: #e3f2fd55; + /* Solid colors for message types */ + --user-color: #ff9800; + --user-dimmed: #ff980066; + --assistant-color: #9c27b0; + --system-color: #d98100; + --system-warning-color: #2196f3; + --system-error-color: #f44336; + --tool-use-color: #4caf50; + + /* Question/answer tool colors */ + --question-accent: #f5a623; + --question-bg: #fffbf0; + --answer-accent: #4caf50; + --answer-bg: #f0fff4; + + /* Priority palette (purple intensity - darker = more urgent) */ + --priority-600: #7c3aed; + --priority-400: #a78bfa; + --priority-300: #c4b5fd; + + /* Priority colors for todos */ + --priority-high: var(--priority-600); + --priority-medium: var(--priority-400); + --priority-low: var(--priority-300); + + /* Status colors */ + --status-in-progress: #fff3cd; + --status-completed: #d4edda; + + /* Plan/todo accent color */ + --plan-accent: #6c63ff; + --todo-accent: #4169e1; + /* Solid colors for text and accents */ + --text-primary: #333; --text-muted: #666; --text-secondary: #495057; + /* Border colors */ + --border-light: #e0e0e0; + --border-separator: #f0f3f6; + + /* Background colors */ + --bg-card: #ffffff66; + --bg-hover: #f8f9fa; + --bg-neutral: #f8f9fa; + + /* Layout spacing */ + --message-padding: 1em; + /* Font families */ --font-monospace: 'Fira Code', 'Monaco', 'Consolas', 'SF Mono', 'Inconsolata', 'Droid Sans Mono', 'Source Code Pro', 'Ubuntu Mono', 'Cascadia Code', 'Menlo', monospace; --font-ui: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif; } body { - font-family: var(--font-monospace); + font-family: var(--font-ui); line-height: 1.5; max-width: 1200px; margin: 0 auto; padding: 10px; background: linear-gradient(90deg, #f3d6d2, #f1dcce, #f0e4ca, #eeecc7, #e3ecc3, #d5eac0, #c6e8bd, #b9e6bc, #b6e3c5, #b3e1cf); - color: #333; + color: var(--text-primary); } h1 { @@ -87,6 +133,12 @@ line-height: 1.5; } +/* Summary stats layout (used in index.html) */ +.summary-stats-flex { + display: flex; + gap: 3em; +} + /* Common card styling */ .card-base { background-color: #ffffff66; @@ -135,7 +187,7 @@ /* Timestamps */ .timestamp { font-size: 0.85em; - color: #666; + color: var(--text-muted); font-weight: normal; } @@ -143,8 +195,8 @@ .floating-btn { position: fixed; right: 20px; - background-color: #e8f4fd66; - color: #666; + background-color: var(--session-bg-dimmed); + color: var(--text-muted); border: none; border-radius: 50%; width: 50px; @@ -167,7 +219,7 @@ } .floating-btn:visited { - color: #666; + color: var(--text-muted); } /* Floating buttons positioning */ @@ -189,8 +241,7 @@ /* Message and content styles */ .message { margin-bottom: 1em; - margin-left: 1em; - padding: 1em; + padding: var(--message-padding); border-radius: 8px; border-left: var(--white-dimmed) 2px solid; background-color: var(--highlight-light); @@ -198,8 +249,225 @@ border-top: var(--white-dimmed) 1px solid; border-bottom: #00000017 1px solid; border-right: #00000017 1px solid; + position: relative; +} + +/* Message with fold bar: remove bottom padding */ +.message:has(.fold-bar) { + padding-bottom: 0; +} + +/* Horizontal Fold Bar - integrated into message box */ +.fold-bar { + display: flex; + margin: 1em calc(-1 * var(--message-padding)) 0; + height: 28px; + border-radius: 0 0 8px 8px; + overflow: hidden; + transition: all 0.2s ease; +} + +.fold-bar-section { + flex: 1; + display: flex; + align-items: center; + justify-content: center; + gap: 0.4em; + cursor: pointer; + user-select: none; + font-size: 0.9em; + font-weight: 500; + padding: 0.4em; + transition: all 0.2s ease; + border-bottom: 2px solid transparent; + background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); +} + +/* Show border only when folded (content is hidden) */ +.fold-bar-section.folded { + border-bottom-style: solid; + border-bottom-width: 2px; +} + +.fold-bar-section:hover { + background: linear-gradient(to bottom, #fff, #f5f5f5); + transform: translateY(1px); +} + +.fold-bar-section:active { + transform: translateY(0); +} + +/* Left section: fold one level */ +.fold-one-level { + border-right: 1px solid rgba(0, 0, 0, 0.1); +} + +/* Full-width single button when counts are equal */ +.fold-bar-section.full-width { + border-right: none; +} + +/* Icon styling */ +.fold-icon { + font-size: 1.1em; + line-height: 1; +} + +.fold-count { + font-weight: 600; + min-width: 1.5em; + text-align: center; +} + +.fold-label { + color: var(--text-muted); + font-size: 0.9em; +} + +/* Border colors matching message types - only shown when folded */ +.fold-bar[data-border-color="user"] .fold-bar-section.folded, +.fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, +.fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, +.fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--user-color); +} + +.fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, +.fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, +.fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { + border-bottom-color: var(--user-dimmed); +} + +.fold-bar[data-border-color="assistant"] .fold-bar-section.folded, +.fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--assistant-color); +} + +.fold-bar[data-border-color="system"] .fold-bar-section.folded { + border-bottom-color: var(--system-color); +} + +.fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { + border-bottom-color: var(--system-warning-color); +} + +.fold-bar[data-border-color="system-error"] .fold-bar-section.folded { + border-bottom-color: var(--system-error-color); +} + +.fold-bar[data-border-color="system-info"] .fold-bar-section.folded { + border-bottom-color: var(--info-dimmed); +} + +.fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--tool-use-color); +} + +.fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--success-dimmed); +} + +.fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, +.fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--error-dimmed); +} + +.fold-bar[data-border-color="thinking"] .fold-bar-section.folded, +.fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--assistant-dimmed); +} + +.fold-bar[data-border-color="image"] .fold-bar-section.folded, +.fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--info-dimmed); +} + +.fold-bar[data-border-color="unknown"] .fold-bar-section.folded, +.fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { + border-bottom-color: var(--neutral-dimmed); +} + +.fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { + border-bottom-color: var(--user-color); +} + +.fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { + border-bottom-color: var(--user-dimmed); +} + +.fold-bar[data-border-color="session-header"] .fold-bar-section.folded { + border-bottom-color: var(--system-warning-color); +} + +/* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ +.sidechain .fold-bar-section.folded { + border-bottom-style: dashed; +} + +/* ======================================== + CONVERSATION STRUCTURE - Margin Hierarchy + ======================================== */ + +/* Right-aligned messages (user-initiated, right margin 0, left margin 33%) */ +.user:not(.compacted), +.bash-input, +.bash-output, +.system:not(.system-info):not(.system-warning):not(.system-error) { + margin-left: 33%; + margin-right: 0; +} + +/* System error messages (assistant-generated) */ +.system-error { + margin-left: 0; + margin-right: 8em; } +/* Left-aligned messages (assistant-generated) with progressive indentation */ +/* Base assistant messages */ +.assistant, +.thinking { + margin-left: 0; + margin-right: 8em; +} + +/* Tool messages (nested under assistant) */ +.tool_use, +.tool_result { + margin-left: 2em; + margin-right: 6em; +} + +/* System warnings/info at tool level (e.g., hook notifications) */ +.system-warning, +.system-info { + margin-left: 2em; + margin-right: 6em; +} + +/* Sidechain messages (sub-assistant hierarchy) */ +/* Note: .sidechain.user (Sub-assistant prompt) is no longer produced + since it duplicates the Task tool input prompt */ + +/* Sub-assistant response and thinking (nested under Task tool result) */ +.sidechain.assistant, +.sidechain.thinking { + margin-left: 4em; + margin-right: 4em; +} + +/* Sub-assistant tools (nested below sub-assistant) */ +.sidechain.tool_use, +.sidechain.tool_result { + margin-left: 6em; + margin-right: 2em; +} + +/* ======================================== */ + /* Message header info styling */ .header-info { display: flex; @@ -217,7 +485,7 @@ .pair-duration { font-size: 80%; - color: #666; + color: var(--text-muted); font-style: italic; } @@ -227,17 +495,14 @@ } /* Paired message styling */ -.message.paired-message { +.message.pair_first { margin-bottom: 0; -} - -.message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } -.message.paired-message.pair_last { +.message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -245,8 +510,9 @@ border-top: 1px solid #00000011; } -.message.paired-message.pair_middle { +.message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -259,43 +525,107 @@ /* Message type styling */ .user { - border-left-color: #ff9800; - margin-left: 0; + border-left-color: var(--user-color); +} + +/* Steering user messages (out-of-band input while agent is working) */ +.user.steering { + border-left-color: var(--user-dimmed); + opacity: 0.7; +} + +/* Slash command prompts (isMeta=true, LLM-generated content) */ +.user.slash-command { + border-left-color: var(--user-dimmed); + opacity: 0.85; } .assistant { - border-left-color: #9c27b0; + border-left-color: var(--assistant-color); } /* Dimmed assistant when paired with thinking */ -.assistant.paired-message { +.assistant.pair_last { border-left-color: var(--assistant-dimmed); } .system { - border-left-color: #d98100; - margin-left: 0; + border-left-color: var(--system-color); } .system-warning { - border-left-color: #2196f3; + border-left-color: var(--system-warning-color); background-color: var(--highlight-semi); - margin-left: 2em; /* Extra indent - assistant-initiated */ } .system-error { - border-left-color: #f44336; + border-left-color: var(--system-error-color); background-color: var(--error-semi); - margin-left: 0; } .system-info { border-left-color: var(--info-dimmed); background-color: var(--highlight-dimmed); - margin-left: 2em; /* Extra indent - assistant-initiated */ +} + +.system-info .header, +.system-info .content { font-size: 80%; } +/* Hook summary styling */ +.system-hook { + border-left-color: var(--warning-dimmed); + background-color: var(--highlight-dimmed); + font-size: 90%; +} + +.hook-summary { + cursor: pointer; +} + +.hook-summary summary { + display: flex; + align-items: center; + gap: 0.5em; +} + +.hook-details { + margin-top: 0.5em; + padding: 0.5em; + background-color: var(--code-bg); + border-radius: 4px; +} + +.hook-commands { + margin-bottom: 0.5em; +} + +.hook-commands code { + display: block; + padding: 0.25em 0.5em; + font-size: 0.85em; + word-break: break-all; + white-space: pre-wrap; +} + +.hook-errors { + margin-top: 0.5em; +} + +.hook-error { + margin: 0.25em 0; + padding: 0.5em; + background-color: var(--error-semi); + border-left: 3px solid var(--system-error-color); + font-size: 0.85em; + white-space: pre-wrap; + word-wrap: break-word; + overflow-x: auto; + max-height: 300px; + overflow-y: auto; +} + /* Command output styling */ .command-output { background-color: #1e1e1e11; @@ -313,18 +643,27 @@ line-height: 1.4; white-space: pre-wrap; word-wrap: break-word; - color: #333; + color: var(--text-primary); overflow-x: auto; } -/* Bash command styling */ +.command-output-content td { + border-left: 1px dotted #bbb; + padding: 0 1em; +} +.command-output-content th { + text-align: left; + padding: 0 1em; +} + +/* Bash command styling (user-initiated, right-aligned) */ .bash-input { - background-color: #1e1e1e08; - border-left-color: #4caf50; + background-color: var(--highlight-light); + border-left-color: var(--user-color); } .bash-prompt { - color: #4caf50; + color: var(--user-color); font-weight: bold; font-size: 1.1em; margin-right: 8px; @@ -334,49 +673,45 @@ font-family: var(--font-monospace); font-size: 0.95em; color: #2c3e50; - background-color: #f8f9fa; + background-color: var(--bg-neutral); padding: 2px 6px; border-radius: 3px; } -/* Bash output styling */ +/* Bash output styling (user-initiated, right-aligned) */ .bash-output { - background-color: var(--neutral-dimmed); - border-left-color: #607d8b; + background-color: var(--highlight-light); + border-left-color: var(--user-dimmed); } -.bash-stdout { - background-color: #1e1e1e05; - padding: 12px; +.bash-output pre.bash-stdout, +.bash-output pre.bash-stderr { + padding: 8px; border-radius: 4px; - border: 1px solid #00000011; - margin: 8px 0; + margin: 4px 0; font-family: var(--font-monospace); - font-size: 0.9em; - line-height: 1.4; - white-space: pre-wrap; - word-wrap: break-word; - color: #333; + font-size: 80%; + line-height: 1.3; + white-space: pre; overflow-x: auto; + overflow-y: auto; + max-height: 300px; +} + +.bash-output pre.bash-stdout { + background-color: #1e1e1e05; + border: 1px solid #00000011; + color: var(--text-primary); } -.bash-stderr { +.bash-output pre.bash-stderr { background-color: #ffebee; - padding: 12px; - border-radius: 4px; border: 1px solid #ffcdd2; - margin: 8px 0; - font-family: var(--font-monospace); - font-size: 0.9em; - line-height: 1.4; - white-space: pre-wrap; - word-wrap: break-word; color: #c62828; - overflow-x: auto; } .bash-empty { - color: #999; + color: var(--text-muted); font-style: italic; } @@ -405,13 +740,11 @@ } .tool_use { - border-left-color: #4caf50; - margin-left: 2em; /* Extra indent - assistant-initiated */ + border-left-color: var(--tool-use-color); } .tool_result { border-left-color: var(--success-dimmed); - margin-left: 2em; /* Extra indent - assistant-initiated */ } .tool_result.error { @@ -431,20 +764,6 @@ border-left-style: dashed; } -/* Sidechain indentation hierarchy */ -.sidechain.user { - margin-left: 3em; /* Sub-assistant Prompt - nested below Task tool use (2em) */ -} - -.sidechain.assistant { - margin-left: 4em; /* Sub-assistant - nested below prompt (3em) */ -} - -.sidechain.tool_use, -.sidechain.tool_result { - margin-left: 5em; /* Sub-assistant tools - nested below assistant (4em) */ -} - .sidechain .sidechain-indicator { color: var(--text-muted); font-size: 0.9em; @@ -460,8 +779,8 @@ } /* Full purple when thinking is paired (as pair_first) */ -.thinking.paired-message.pair_first { - border-left-color: #9c27b0; +.thinking.pair_first { + border-left-color: var(--assistant-color); } .image { @@ -487,10 +806,16 @@ font-size: 1.2em; } +.session-subtitle { + font-size: 0.9em; + color: var(--text-muted); + margin-top: 4px; +} + /* IDE notification styling */ .ide-notification { background-color: var(--ide-notification-dimmed); - border-left: #9c27b0 2px solid; + border-left: var(--assistant-color) 2px solid; padding: 8px 12px; margin: 8px 0; border-radius: 4px; @@ -510,13 +835,13 @@ } .ide-selection-collapsible summary:hover { - color: #333; + color: var(--text-primary); } .ide-selection-content { margin-top: 8px; padding: 8px; - background-color: #f8f9fa; + background-color: var(--bg-neutral); border-radius: 3px; border: 1px solid #dee2e6; font-family: var(--font-monospace); @@ -544,10 +869,25 @@ /* Assistant and Thinking content styling */ .assistant .content, .thinking-text, -.user.compacted .content { +.user.compacted .content, +.markdown { font-family: var(--font-ui); } +.markdown table { + border-collapse: collapse; +} + +.markdown td { + padding: .5em; + border-top: 1px solid #bbb; +} + +.markdown th { + padding: .5em; + border-bottom: 1px solid #888; +} + /* Code block styling */ pre > code { display: block; @@ -594,7 +934,7 @@ .tool-param-structured { margin: 0; - background-color: #f8f9fa; + background-color: var(--bg-neutral); padding: 4px; border-radius: 3px; } @@ -609,7 +949,11 @@ } .tool-param-collapsible summary:hover { - color: #333; + color: var(--text-primary); +} + +.tool-param-collapsible[open] > summary { + display: none; } .tool-param-full { @@ -618,7 +962,7 @@ } .tool-params-empty { - color: #999; + color: var(--text-muted); font-style: italic; } @@ -707,13 +1051,21 @@ margin-top: 4px; } -/* Tool result preview content with gradient fade */ -.tool_result .preview-content { +/* Tool use/result/bash-output preview content with gradient fade */ +.tool_use .preview-content, +.tool_result .preview-content, +.bash-output .preview-content { opacity: 0.7; mask-image: linear-gradient(to bottom, black 80%, transparent 100%); -webkit-mask-image: linear-gradient(to bottom, black 80%, transparent 100%); } +/* Fix list rendering in preview-content inside summary */ +/* Mistune wraps list content in

tags which causes line breaks */ +.preview-content li > p:first-child { + display: inline; +} + /* Hide preview content when details/collapsible is open */ .collapsible-details[open] .preview-content, .collapsible-code[open] .preview-content { @@ -792,15 +1144,25 @@ .ansi-dim { opacity: 0.7; } .ansi-italic { font-style: italic; } .ansi-underline { text-decoration: underline; } + +/* Image styling */ +.tool-result-image, +.uploaded-image { + max-width: 100%; + height: auto; + border: 1px solid var(--border-light); + border-radius: 4px; + margin: 10px 0; +} /* Session navigation styles */ .navigation { - background-color: #f8f9fa66; + background-color: var(--bg-neutral); border-radius: 8px; padding: 16px; margin-bottom: 24px; box-shadow: -7px -7px 10px #eeeeee44, 7px 7px 10px #00000011; - border-left: #ffffff66 1px solid; - border-top: #ffffff66 1px solid; + border-left: var(--white-dimmed) 1px solid; + border-top: var(--white-dimmed) 1px solid; border-bottom: #00000017 1px solid; border-right: #00000017 1px solid; } @@ -808,7 +1170,7 @@ .navigation h2 { margin: 0 0 12px 0; font-size: 1.2em; - color: #495057; + color: var(--text-secondary); } .session-nav { @@ -819,11 +1181,11 @@ .session-link { padding: 8px 12px; - background-color: #ffffff66; + background-color: var(--white-dimmed); border: 1px solid #dee2e6; border-radius: 4px; text-decoration: none; - color: #495057; + color: var(--text-secondary); transition: background-color 0.2s; } @@ -852,7 +1214,7 @@ .project-sessions h4 { margin: 0 0 10px 0; font-size: 0.9em; - color: #495057; + color: var(--text-secondary); font-weight: 600; } @@ -874,11 +1236,11 @@ .combined-transcript-link { display: inline-block; padding: 8px 12px; - background-color: #ffffff66; + background-color: var(--white-dimmed); border: 1px solid #dee2e6; border-radius: 4px; text-decoration: none; - color: #495057; + color: var(--text-secondary); font-weight: 500; transition: background-color 0.2s; } @@ -887,15 +1249,26 @@ background-color: #ffffff99; text-decoration: none; } + +/* Navigation hints */ +.nav-hint { + font-size: 0.75em; +} + +/* Session preview styling */ +.session-preview { + font-size: 0.75em; + line-height: 1.3; +} /* Filter toolbar and controls */ .filter-toolbar { - background-color: #f8f9fa66; + background-color: var(--neutral-dimmed); border-radius: 8px; padding: 12px 16px; margin-bottom: 16px; box-shadow: -7px -7px 10px #eeeeee44, 7px 7px 10px #00000011; - border-left: #ffffff66 1px solid; - border-top: #ffffff66 1px solid; + border-left: var(--white-dimmed) 1px solid; + border-top: var(--white-dimmed) 1px solid; border-bottom: #00000017 1px solid; border-right: #00000017 1px solid; display: none; @@ -924,14 +1297,14 @@ .toolbar-header h3 { margin: 0; font-size: 1em; - color: #495057; + color: var(--text-secondary); font-weight: 600; } .close-toolbar-btn { background: none; border: none; - color: #666; + color: var(--text-muted); cursor: pointer; font-size: 20px; padding: 5px; @@ -947,8 +1320,8 @@ } .close-toolbar-btn:hover { - background-color: #ffffff66; - color: #333; + background-color: var(--white-dimmed); + color: var(--text-primary); } .filter-section { @@ -972,7 +1345,7 @@ border: 1px solid #dee2e6; border-radius: 20px; background-color: transparent; - color: #495057; + color: var(--text-secondary); font-size: 0.85em; font-weight: 500; cursor: pointer; @@ -993,32 +1366,32 @@ } .filter-toggle.active:hover { - background-color: #ffffff66; + background-color: var(--white-dimmed); } /* Color-coded filter buttons */ .filter-toggle[data-type="user"] { - border-color: #ff9800; + border-color: var(--user-color); border-width: 2px; } .filter-toggle[data-type="system"] { - border-color: #d98100; + border-color: var(--system-color); border-width: 2px; } .filter-toggle[data-type="tool"] { - border-color: #4caf50; + border-color: var(--tool-use-color); border-width: 2px; } .filter-toggle[data-type="assistant"] { - border-color: #9c27b0; + border-color: var(--assistant-color); border-width: 2px; } .filter-toggle[data-type="thinking"] { - border-color: #9c27b066; + border-color: var(--assistant-dimmed); border-width: 2px; } @@ -1031,7 +1404,7 @@ padding: 4px 8px; border: 1px solid #dee2e6; border-radius: 4px; - background-color: #ffffff66; + background-color: var(--white-dimmed); color: #6c757d; font-size: 0.75em; cursor: pointer; @@ -1057,25 +1430,25 @@ } /* TodoWrite tool styling */ .todo-write { - background-color: #f0f8ff66; - border-left: #4169e188 3px solid; - font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Helvetica Neue', Arial, sans-serif; + background-color: var(--highlight-dimmed); + border-left: var(--todo-accent) 3px solid; + font-family: var(--font-ui); } .tool-header { font-weight: 600; margin-bottom: 12px; - color: #2c3e50; + color: var(--text-secondary); font-size: 1.1em; } .todo-list { - background-color: #ffffff66; + background-color: var(--bg-card); border-radius: 6px; padding: 8px; box-shadow: -7px -7px 10px #eeeeee44, 7px 7px 10px #00000011; - border-left: #ffffff66 1px solid; - border-top: #ffffff66 1px solid; + border-left: var(--bg-card) 1px solid; + border-top: var(--bg-card) 1px solid; border-bottom: #00000017 1px solid; border-right: #00000017 1px solid; } @@ -1085,7 +1458,7 @@ align-items: center; gap: 8px; padding: 8px 4px; - border-bottom: 1px solid #f0f3f6; + border-bottom: 1px solid var(--border-separator); transition: background-color 0.2s ease; } @@ -1094,21 +1467,17 @@ } .todo-item:hover { - background-color: #f8f9fa; -} - -.todo-item.completed { - opacity: 0.7; + background-color: var(--bg-hover); } .todo-item.completed .todo-content { text-decoration: line-through; - color: #6c757d; + color: var(--text-muted); + opacity: 0.7; } -.todo-item input[type="checkbox"] { - margin: 0; - cursor: default; +.todo-item.pending .todo-content { + font-weight: normal; } .todo-status { @@ -1118,7 +1487,7 @@ .todo-content { flex: 1; - color: #333; + color: var(--text-primary); font-weight: 500; font-size: 90%; font-family: var(--font-ui); @@ -1126,30 +1495,120 @@ .todo-id { font-size: 0.8em; - color: #6c757d; + color: var(--text-muted); font-weight: normal; } /* Priority-based left border colors */ .todo-item.high { - border-left: 3px solid #dc3545; + border-left: 3px solid var(--priority-high); } .todo-item.medium { - border-left: 3px solid #ffc107; + border-left: 3px solid var(--priority-medium); } .todo-item.low { - border-left: 3px solid #28a745; + border-left: 3px solid var(--priority-low); } /* Status-based background tints */ .todo-item.in_progress { - background-color: #fff3cd; + background-color: var(--status-in-progress); } .todo-item.completed { - background-color: #d4edda; + background-color: var(--status-completed); +} + +/* AskUserQuestion tool styling */ +.askuserquestion-content { + font-family: var(--font-ui); + padding: 8px 0; +} + +.question-block { + margin-bottom: 16px; + padding: 12px; + background-color: var(--question-bg); + border-radius: 6px; + border-left: 3px solid var(--question-accent); +} + +.question-block:last-child { + margin-bottom: 0; +} + +/* Answered questions in result (lighter, success-tinted) */ +.question-block.answered { + background-color: var(--answer-bg); + border-left-color: var(--answer-accent); +} + +.question-header { + font-size: 0.85em; + font-weight: 600; + color: var(--text-muted); + text-transform: uppercase; + letter-spacing: 0.5px; + margin-bottom: 4px; +} + +.question-text { + font-size: 1.1em; + font-weight: 500; + color: var(--text-primary); + line-height: 1.4; + margin-bottom: 8px; +} + +.answer-text { + font-size: 1.05em; + font-weight: 600; + color: var(--answer-accent); + line-height: 1.4; + padding-left: 4px; +} + +.question-options-hint { + font-size: 0.8em; + color: var(--text-muted); + font-style: italic; + margin-bottom: 6px; +} + +.question-options { + list-style: none; + padding: 0; + margin: 0; +} + +.question-option { + padding: 8px 12px; + margin: 4px 0; + background-color: #fff; + border: 1px solid var(--border-light); + border-radius: 4px; +} + +.question-option strong { + color: var(--text-primary); +} + +.question-option .option-desc { + color: var(--text-muted); + font-size: 0.9em; +} + +/* ExitPlanMode tool styling */ +.plan-content { + font-family: var(--font-ui); +} + +.plan-content.markdown { + background-color: var(--bg-neutral); + border-radius: 6px; + padding: 12px; } /* Timeline-specific styles for vis-timeline */ @@ -1195,13 +1654,13 @@ .vis-labelset .vis-label { font-size: 12px !important; font-weight: 500 !important; - color: #495057 !important; + color: var(--text-secondary) !important; } /* Timeline items styling */ .vis-item { border-radius: 4px !important; - border: 1px solid #ddd !important; + border: 1px solid var(--border-light) !important; font-size: 11px !important; /* Stuck item workaround, see: https://github.com/visjs/vis-timeline/issues/494#issuecomment-1638974075 */ transform: scale(0); @@ -1219,7 +1678,7 @@ /* Message type specific styling */ .vis-item.timeline-item-user { background-color: #e3f2fd !important; - border-color: #2196f3 !important; + border-color: var(--system-warning-color) !important; } .vis-item.timeline-item-assistant { @@ -1264,12 +1723,12 @@ /* Timeline axis styling */ .vis-time-axis { - border-top: 1px solid #ddd !important; + border-top: 1px solid var(--border-light) !important; } .vis-time-axis .vis-text { font-size: 11px !important; - color: #666 !important; + color: var(--text-muted) !important; } /* Timeline navigation controls */ @@ -1314,7 +1773,7 @@ right: 45px; top: 50%; transform: translateY(-50%); - color: #999; + color: var(--text-muted); font-size: 0.75em; pointer-events: none; transition: opacity 0.2s; @@ -1349,7 +1808,7 @@ background: #ffffff88; border: 1px solid #dee2e6; border-radius: 20px; - color: #495057; + color: var(--text-secondary); font-size: 0.85em; font-family: inherit; transition: all 0.2s; @@ -1357,13 +1816,13 @@ .search-input:focus { outline: none; - border-color: #2196f3; + border-color: var(--system-warning-color); background: #ffffff99; box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.2); } .search-input::placeholder { - color: #666; + color: var(--text-muted); } .search-clear { @@ -1371,7 +1830,7 @@ right: 15px; background: none; border: none; - color: #666; + color: var(--text-muted); cursor: pointer; font-size: 18px; padding: 5px; @@ -1381,8 +1840,8 @@ } .search-clear:hover { - color: #333; - background: #ffffff66; + color: var(--text-primary); + background: var(--white-dimmed); } .search-clear.visible { @@ -1393,7 +1852,7 @@ .search-results-info { margin-top: 8px; font-size: 12px; - color: #666; + color: var(--text-muted); display: none; } @@ -1421,11 +1880,11 @@ align-items: center; gap: 4px; cursor: pointer; - color: #666; + color: var(--text-muted); } .search-option-inline:hover { - color: #333; + color: var(--text-primary); } .search-option-inline input[type="checkbox"] { @@ -1437,7 +1896,7 @@ background: #ffffff88; border: 1px solid #dee2e6; border-radius: 4px; - color: #333; + color: var(--text-primary); padding: 4px 8px; cursor: pointer; font-size: 12px; @@ -1446,7 +1905,7 @@ .search-nav-btn:hover:not(:disabled) { background: #ffffff99; - border-color: #2196f3; + border-color: var(--system-warning-color); transform: translateY(-1px); } @@ -1480,7 +1939,7 @@ display: flex; align-items: center; gap: 5px; - color: #666; + color: var(--text-muted); transition: color 0.2s; } @@ -1495,18 +1954,18 @@ } .search-option:hover { - color: #333; + color: var(--text-primary); } /* Index Page Search Results */ .search-results-panel { margin: 20px 0; padding: 20px; - background-color: #ffffff66; + background-color: var(--white-dimmed); border-radius: 8px; box-shadow: -7px -7px 10px #eeeeee44, 7px 7px 10px #00000011; - border-left: #ffffff66 1px solid; - border-top: #ffffff66 1px solid; + border-left: var(--white-dimmed) 1px solid; + border-top: var(--white-dimmed) 1px solid; border-bottom: #00000017 1px solid; border-right: #00000017 1px solid; display: none; @@ -1534,7 +1993,7 @@ .search-close-btn { background: none; border: none; - color: #666; + color: var(--text-muted); cursor: pointer; font-size: 20px; padding: 5px; @@ -1543,8 +2002,8 @@ } .search-close-btn:hover { - color: #333; - background: #ffffff66; + color: var(--text-primary); + background: var(--white-dimmed); } .search-result-group { @@ -1563,7 +2022,7 @@ .search-result-count { background: #ffffff88; - color: #666; + color: var(--text-muted); padding: 2px 8px; border-radius: 12px; font-size: 11px; @@ -1599,14 +2058,14 @@ .search-result-session { font-size: 12px; - color: #666; + color: var(--text-muted); margin-bottom: 5px; font-weight: 500; } .search-result-excerpt { font-size: 13px; - color: #495057; + color: var(--text-secondary); line-height: 1.4; } @@ -1621,7 +2080,7 @@ .search-result-meta { margin-top: 8px; font-size: 11px; - color: #666; + color: var(--text-muted); display: flex; gap: 15px; padding-top: 5px; @@ -1634,7 +2093,7 @@ .search-no-results { text-align: center; padding: 30px; - color: #666; + color: var(--text-muted); font-size: 14px; background-color: #ffffff44; border-radius: 6px; @@ -1645,7 +2104,7 @@ .search-loading { text-align: center; padding: 20px; - color: #666; + color: var(--text-muted); background-color: #ffffff44; border-radius: 6px; } @@ -2109,14 +2568,14 @@

Claude Transcript - style_guide

let displayContent = content ?? messageTypeGroups[messageType].content; // Check for sidechain context regardless of primary message type + // Note: Sidechain user messages (Sub-assistant prompts) are now skipped + // since they duplicate the Task tool input prompt if (classList.includes('sidechain')) { // Override group for sidechain messages, but preserve the content messageType = 'sidechain'; // For sidechain messages, prefix with appropriate icon based on original type - if (classList.includes('user')) { - displayContent = '📝 ' + (content ?? 'Sub-assistant prompt'); - } else if (classList.includes('assistant')) { + if (classList.includes('assistant')) { displayContent = '🔗 ' + (content ?? 'Sub-assistant response'); } else if (classList.includes('tool_use')) { displayContent = '🔗 ' + (content ?? 'Sub-assistant tool use'); @@ -2513,20 +2972,113 @@

🔍 Search & Filter

+ + + + + +
-
+
Session: 📋 **Summary Example** - This style guide demonstrated all major message types: user messages, assistant responses, tool usage (both successful and error cases), system commands, command outputs, long text handling, special character support, and this summary. The transcript viewer properly renders markdown, handles tool interactions, manages text wrapping, and supports full Unicode character sets. All visual elements maintain consistent styling and readability. • style_gu
- + +
+ + +
+ + 3 users +
+
+ ▼▼ + 4 assistants, 3 users, 2 more total +
+ +
+
-
+
🤷 User
@@ -2550,12 +3102,27 @@

🔍 Search & Filter

print("Hello, world!") return 42 ```
+ +
+ + +
+ + 3 assistants +
+
+ ▼▼ + 3 assistants, 2 tools total +
+ +
+
-
+
🤖 Assistant
@@ -2565,7 +3132,7 @@

🔍 Search & Filter

-

🤖 Assistant Message Example

+

🤖 Assistant Message Example

This is a basic assistant response. It supports the same markdown formatting:

Headers work too

Subheaders

@@ -2609,13 +3176,14 @@

Subheaders

  • Responsive design
  • Clean typography
  • -
    +
    +
    -
    +
    🤖 Assistant
    @@ -2627,15 +3195,26 @@

    Subheaders

    -

    🛠️ Tool Use Example

    +

    🛠️ Tool Use Example

    Let me create a file to demonstrate tool usage:

    -
    +
    + +
    + + +
    + + 1 tool +
    + +
    +
    -
    +
    📝 Edit /tmp/style_guide_example.py
    @@ -2646,12 +3225,13 @@

    Subheaders

    +#!/usr/bin/env python3
    +"""
    +Style Guide Example File
    +
    +This file demonstrates how tool usage appears in transcripts.
    +"""
    +
    +import os
    +import sys
    +from typing import List, Dict, Any
    +
    +
    +class StyleGuideExample:
    + """Example class for the style guide."""
    +
    + def __init__(self, name: str):
    + self.name = name
    + self.items: List[Dict[str, Any]] = []
    +
    + def add_item(self, item: Dict[str, Any]) -> None:
    + """Add an item to the collection."""
    + self.items.append(item)
    + print(f"Added item: {item.get('name', 'Unknown')}")
    +
    + def process_all(self) -> None:
    + """Process all items in the collection."""
    + for i, item in enumerate(self.items):
    + print(f"Processing item {i+1}: {item}")
    +
    +
    +if __name__ == "__main__":
    + example = StyleGuideExample("Demo")
    + example.add_item({"name": "Sample", "value": 42})
    + example.add_item({"name": "Test", "value": 100})
    + example.process_all()
    + print("Style guide example completed!")
    +
    -
    +
    @@ -2662,12 +3242,13 @@

    Subheaders

    File created successfully at: /tmp/style_guide_example.py
    +
    -
    +
    🤖 Assistant
    @@ -2679,15 +3260,26 @@

    Subheaders

    -

    ⚠️ Tool Error Example

    +

    ⚠️ Tool Error Example

    Let me demonstrate what happens when a tool encounters an error:

    -
    +
    + +
    + + +
    + + 1 tool +
    + +
    +
    -
    +
    🛠️ Bash This command will fail to demonstrate error handling
    @@ -2698,12 +3290,13 @@

    Subheaders

    nonexistent_command --invalid-flag
    +
    -
    +
    🚨 Error
    @@ -2716,14 +3309,15 @@

    Subheaders

    bash: line 1: nonexistent_command: command not found
     Error: Command 'nonexistent_command' not found in PATH
     Exit code: 127
    +
    -
    +
    - ⚙️ System + 🤷 Slash Command
    2025-06-14 10:02:00 @@ -2731,36 +3325,21 @@

    Subheaders

    -
    Command: style-guide-test
    Args: --format json --output /tmp/test.json
    -
    -
    - - Content -
    This is the command content with JSON data: -{ - "name": "Style Guide Test", - "version": "1.0.0", - "description": "Demonstrates command content ...
    -
    -
    - This is the command content with JSON data: +
    style-guide-test
    Args: --format json --output /tmp/test.json
    Content:
    This is the command content with JSON data:
     {
       "name": "Style Guide Test",
       "version": "1.0.0",
       "description": "Demonstrates command content formatting"
    -}
    -            
    -
    -
    -
    +}
    +
    -
    +
    - System + 🤷 Command Output
    2025-06-14 10:02:10 @@ -2768,7 +3347,7 @@

    Subheaders

    -
    Command Output:
    Style Guide Test Results:
    +        
    Style Guide Test Results:
     ✓ Message rendering: PASSED
     ✓ Markdown processing: PASSED
     ✓ Tool use formatting: PASSED
    @@ -2784,12 +3363,13 @@ 

    Subheaders

    Status: ALL_TESTS_PASSED Style guide generation completed successfully! 🎉
    +
    -
    +
    🤷 User
    @@ -2808,12 +3388,23 @@

    Subheaders

    Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. The layout should remain clean and readable even with this amount of text content. Line breaks and paragraph spacing should be preserved appropriately.
    + +
    + + +
    + + 1 assistant +
    + +
    +
    -
    +
    🤖 Assistant
    @@ -2823,7 +3414,7 @@

    Subheaders

    -

    🌍 Special Characters & Unicode Example

    +

    🌍 Special Characters & Unicode Example

    The transcript viewer supports full Unicode:

    Accented characters: café, naïve, résumé, piñata
    Non-Latin scripts: 中文, العربية, русский, ελληνικά, हिन्दी
    @@ -2841,63 +3432,2875 @@

    Subheaders

    print(f"Mathematical: √16 = {16**0.5}")

    All characters should render correctly without breaking the layout!

    -
    +
    +
    - - - - - 🔝 - - diff --git a/test/README.md b/test/README.md index 5d9b40b9..6a307f78 100644 --- a/test/README.md +++ b/test/README.md @@ -9,7 +9,7 @@ Representative JSONL files covering all message types and edge cases: **Note**: After the module split, import paths have changed: - `from claude_code_log.parser import load_transcript, extract_text_content` -- `from claude_code_log.renderer import generate_html, format_timestamp` +- `from claude_code_log.html.renderer import generate_html, format_timestamp` - `from claude_code_log.converter import convert_jsonl_to_html` ### `representative_messages.jsonl` diff --git a/test/__snapshots__/test_snapshot_html.ambr b/test/__snapshots__/test_snapshot_html.ambr index b84f2ece..c7d4aab1 100644 --- a/test/__snapshots__/test_snapshot_html.ambr +++ b/test/__snapshots__/test_snapshot_html.ambr @@ -2131,14 +2131,14 @@ font-weight: 500; padding: 0.4em; transition: all 0.2s ease; - border-bottom: 2px solid; + border-bottom: 2px solid transparent; background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); } - /* Double-line effect when folded */ + /* Show border only when folded (content is hidden) */ .fold-bar-section.folded { - border-bottom-style: double; - border-bottom-width: 4px; + border-bottom-style: solid; + border-bottom-width: 2px; } .fold-bar-section:hover { @@ -2177,92 +2177,86 @@ font-size: 0.9em; } - /* Border colors matching message types */ - .fold-bar[data-border-color="user"] .fold-bar-section, - .fold-bar[data-border-color="user compacted"] .fold-bar-section, - .fold-bar[data-border-color="user sidechain"] .fold-bar-section, - .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section { + /* Border colors matching message types - only shown when folded */ + .fold-bar[data-border-color="user"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, + .fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="user slash-command"] .fold-bar-section, - .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section { + .fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, + .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="assistant"] .fold-bar-section, - .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section { + .fold-bar[data-border-color="assistant"] .fold-bar-section.folded, + .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-color); } - .fold-bar[data-border-color="system"] .fold-bar-section, - .fold-bar[data-border-color="system command-output"] .fold-bar-section { + .fold-bar[data-border-color="system"] .fold-bar-section.folded { border-bottom-color: var(--system-color); } - .fold-bar[data-border-color="system-warning"] .fold-bar-section { + .fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - .fold-bar[data-border-color="system-error"] .fold-bar-section { + .fold-bar[data-border-color="system-error"] .fold-bar-section.folded { border-bottom-color: var(--system-error-color); } - .fold-bar[data-border-color="system-info"] .fold-bar-section { + .fold-bar[data-border-color="system-info"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="tool_use"] .fold-bar-section, - .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { border-bottom-color: var(--tool-use-color); } - .fold-bar[data-border-color="tool_result"] .fold-bar-section, - .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { border-bottom-color: var(--success-dimmed); } - .fold-bar[data-border-color="tool_result error"] .fold-bar-section, - .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { border-bottom-color: var(--error-dimmed); } - .fold-bar[data-border-color="thinking"] .fold-bar-section, - .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section { + .fold-bar[data-border-color="thinking"] .fold-bar-section.folded, + .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-dimmed); } - .fold-bar[data-border-color="image"] .fold-bar-section, - .fold-bar[data-border-color="image sidechain"] .fold-bar-section { + .fold-bar[data-border-color="image"] .fold-bar-section.folded, + .fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="unknown"] .fold-bar-section, - .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section { + .fold-bar[data-border-color="unknown"] .fold-bar-section.folded, + .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { border-bottom-color: var(--neutral-dimmed); } - .fold-bar[data-border-color="bash-input"] .fold-bar-section { + .fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="bash-output"] .fold-bar-section { + .fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="session-header"] .fold-bar-section { + .fold-bar[data-border-color="session-header"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - /* Sidechain (sub-assistant) fold-bar styling */ - .sidechain .fold-bar-section { - border-bottom-style: dashed; - border-bottom-width: 2px; - } - + /* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ .sidechain .fold-bar-section.folded { border-bottom-style: dashed; - border-bottom-width: 4px; } /* ======================================== @@ -2353,17 +2347,14 @@ } /* Paired message styling */ - .message.paired-message { + .message.pair_first { margin-bottom: 0; - } - - .message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } - .message.paired-message.pair_last { + .message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -2371,8 +2362,9 @@ border-top: 1px solid #00000011; } - .message.paired-message.pair_middle { + .message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -2405,7 +2397,7 @@ } /* Dimmed assistant when paired with thinking */ - .assistant.paired-message { + .assistant.pair_last { border-left-color: var(--assistant-dimmed); } @@ -2639,7 +2631,7 @@ } /* Full purple when thinking is paired (as pair_first) */ - .thinking.paired-message.pair_first { + .thinking.pair_first { border-left-color: var(--assistant-color); } @@ -2734,6 +2726,20 @@ font-family: var(--font-ui); } + .markdown table { + border-collapse: collapse; + } + + .markdown td { + padding: .5em; + border-top: 1px solid #bbb; + } + + .markdown th { + padding: .5em; + border-bottom: 1px solid #888; + } + /* Code block styling */ pre > code { display: block; @@ -4938,7 +4944,7 @@ -
    +
    📝 Edit /tmp/decorator_example.py
    @@ -4955,7 +4961,7 @@ -
    +
    @@ -5031,7 +5037,7 @@ -
    +
    🛠️ Bash Run the decorator example to show output
    @@ -5048,7 +5054,7 @@ -
    +
    @@ -6861,14 +6867,14 @@ font-weight: 500; padding: 0.4em; transition: all 0.2s ease; - border-bottom: 2px solid; + border-bottom: 2px solid transparent; background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); } - /* Double-line effect when folded */ + /* Show border only when folded (content is hidden) */ .fold-bar-section.folded { - border-bottom-style: double; - border-bottom-width: 4px; + border-bottom-style: solid; + border-bottom-width: 2px; } .fold-bar-section:hover { @@ -6907,92 +6913,86 @@ font-size: 0.9em; } - /* Border colors matching message types */ - .fold-bar[data-border-color="user"] .fold-bar-section, - .fold-bar[data-border-color="user compacted"] .fold-bar-section, - .fold-bar[data-border-color="user sidechain"] .fold-bar-section, - .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section { + /* Border colors matching message types - only shown when folded */ + .fold-bar[data-border-color="user"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, + .fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="user slash-command"] .fold-bar-section, - .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section { + .fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, + .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="assistant"] .fold-bar-section, - .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section { + .fold-bar[data-border-color="assistant"] .fold-bar-section.folded, + .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-color); } - .fold-bar[data-border-color="system"] .fold-bar-section, - .fold-bar[data-border-color="system command-output"] .fold-bar-section { + .fold-bar[data-border-color="system"] .fold-bar-section.folded { border-bottom-color: var(--system-color); } - .fold-bar[data-border-color="system-warning"] .fold-bar-section { + .fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - .fold-bar[data-border-color="system-error"] .fold-bar-section { + .fold-bar[data-border-color="system-error"] .fold-bar-section.folded { border-bottom-color: var(--system-error-color); } - .fold-bar[data-border-color="system-info"] .fold-bar-section { + .fold-bar[data-border-color="system-info"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="tool_use"] .fold-bar-section, - .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { border-bottom-color: var(--tool-use-color); } - .fold-bar[data-border-color="tool_result"] .fold-bar-section, - .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { border-bottom-color: var(--success-dimmed); } - .fold-bar[data-border-color="tool_result error"] .fold-bar-section, - .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { border-bottom-color: var(--error-dimmed); } - .fold-bar[data-border-color="thinking"] .fold-bar-section, - .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section { + .fold-bar[data-border-color="thinking"] .fold-bar-section.folded, + .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-dimmed); } - .fold-bar[data-border-color="image"] .fold-bar-section, - .fold-bar[data-border-color="image sidechain"] .fold-bar-section { + .fold-bar[data-border-color="image"] .fold-bar-section.folded, + .fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="unknown"] .fold-bar-section, - .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section { + .fold-bar[data-border-color="unknown"] .fold-bar-section.folded, + .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { border-bottom-color: var(--neutral-dimmed); } - .fold-bar[data-border-color="bash-input"] .fold-bar-section { + .fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="bash-output"] .fold-bar-section { + .fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="session-header"] .fold-bar-section { + .fold-bar[data-border-color="session-header"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - /* Sidechain (sub-assistant) fold-bar styling */ - .sidechain .fold-bar-section { - border-bottom-style: dashed; - border-bottom-width: 2px; - } - + /* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ .sidechain .fold-bar-section.folded { border-bottom-style: dashed; - border-bottom-width: 4px; } /* ======================================== @@ -7083,17 +7083,14 @@ } /* Paired message styling */ - .message.paired-message { + .message.pair_first { margin-bottom: 0; - } - - .message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } - .message.paired-message.pair_last { + .message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -7101,8 +7098,9 @@ border-top: 1px solid #00000011; } - .message.paired-message.pair_middle { + .message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -7135,7 +7133,7 @@ } /* Dimmed assistant when paired with thinking */ - .assistant.paired-message { + .assistant.pair_last { border-left-color: var(--assistant-dimmed); } @@ -7369,7 +7367,7 @@ } /* Full purple when thinking is paired (as pair_first) */ - .thinking.paired-message.pair_first { + .thinking.pair_first { border-left-color: var(--assistant-color); } @@ -7464,6 +7462,20 @@ font-family: var(--font-ui); } + .markdown table { + border-collapse: collapse; + } + + .markdown td { + padding: .5em; + border-top: 1px solid #bbb; + } + + .markdown th { + padding: .5em; + border-bottom: 1px solid #888; + } + /* Code block styling */ pre > code { display: block; @@ -9559,13 +9571,13 @@
    -
    +
    - 4 users + 5 users
    -
    +
    ▼▼ - 4 users, 2 assistants, 3 more total + 5 users, 2 assistants, 2 more total
    @@ -9675,13 +9687,9 @@
    -
    +
    - 1 tool, 1 system, 1 more -
    -
    - ▼▼ - 2 tools, 1 system, 1 more total + 1 tool
    @@ -9691,7 +9699,7 @@ -
    +
    🛠️ FailingTool
    @@ -9713,7 +9721,7 @@ -
    +
    🚨 Error
    @@ -9730,9 +9738,9 @@ -
    +
    - ⚙️ System + 🤷 Slash Command
    2025-06-14 11:02:10 @@ -9740,7 +9748,7 @@
    -
    Command: test-command
    Args: --verbose --output /tmp/test.log
    Content:
    This is the actual command content with some JSON structure and escaped characters: "quotes" and 
    +          
    test-command
    Args: --verbose --output /tmp/test.log
    Content:
    This is the actual command content with some JSON structure and escaped characters: "quotes" and 
       line breaks
       
    @@ -9749,9 +9757,9 @@ -
    +
    - System + 🤷 Command Output
    2025-06-14 11:02:20 @@ -9759,19 +9767,33 @@
    -
    Command Output:
    Command output here:
    +          
    Command output here:
       Line 1 of output
       Line 2 of output
       Some data: 12345
       Status: SUCCESS
       Timestamp: 2025-06-14T11:02:20Z
    +
    + + +
    + + 1 assistant +
    +
    + ▼▼ + 1 assistant, 1 tool total +
    + +
    +
    -
    +
    🤖 Assistant
    @@ -9801,7 +9823,7 @@ -
    +
    🛠️ MultiEdit
    @@ -9811,30 +9833,7 @@
    -
    - - - - - - - - - -
    file_path/tmp/complex_example.py
    edits -
    - [ - { - "old_string": "", - "new_string": "#!/usr/bin/env python3\n\"\"\"\nComplex example with ... -
    [
    -    {
    -      "old_string": "",
    -      "new_string": "#!/usr/bin/env python3\n\"\"\"\nComplex example with multiple operations.\n\"\"\"\n\nimport json\nimport sys\nfrom typing import List, Dict, Any\n\ndef process_data(items: List[Dict[str, Any]]) -> None:\n    \"\"\"\n    Process a list of data items.\n    \"\"\"\n    for item in items:\n        print(f\"Processing: {item['name']}\")\n        if item.get('active', False):\n            print(f\"  Status: Active\")\n        else:\n            print(f\"  Status: Inactive\")\n\nif __name__ == \"__main__\":\n    sample_data = [\n        {\"name\": \"Item 1\", \"active\": True},\n        {\"name\": \"Item 2\", \"active\": False},\n        {\"name\": \"Item 3\", \"active\": True}\n    ]\n    process_data(sample_data)"
    -    }
    -  ]
    -
    -
    +
    📝 /tmp/complex_example.py
    Applying 1 edits
    Edit #1
    +#!/usr/bin/env python3
    +"""
    +Complex example with multiple operations.
    +"""
    +
    +import json
    +import sys
    +from typing import List, Dict, Any
    +
    +def process_data(items: List[Dict[str, Any]]) -> None:
    + """
    + Process a list of data items.
    + """
    + for item in items:
    + print(f"Processing: {item['name']}")
    + if item.get('active', False):
    + print(f" Status: Active")
    + else:
    + print(f" Status: Inactive")
    +
    +if __name__ == "__main__":
    + sample_data = [
    + {"name": "Item 1", "active": True},
    + {"name": "Item 2", "active": False},
    + {"name": "Item 3", "active": True}
    + ]
    + process_data(sample_data)
    @@ -9907,35 +9906,36 @@
    -
    - - broken_todo -
    - -
    - 🔄 - Implement core functionality - #2 -
    - -
    - - Add comprehensive tests - #3 -
    - -
    - - Write user documentation - #4 -
    - -
    - - Perform code review - #5 -
    - +
    + + broken_todo + +
    + +
    + 🔄 + Implement core functionality + #2 +
    + +
    + + Add comprehensive tests + #3 +
    + +
    + + Write user documentation + #4 +
    + +
    + + Perform code review + #5 +
    +
    @@ -11693,14 +11693,14 @@ font-weight: 500; padding: 0.4em; transition: all 0.2s ease; - border-bottom: 2px solid; + border-bottom: 2px solid transparent; background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); } - /* Double-line effect when folded */ + /* Show border only when folded (content is hidden) */ .fold-bar-section.folded { - border-bottom-style: double; - border-bottom-width: 4px; + border-bottom-style: solid; + border-bottom-width: 2px; } .fold-bar-section:hover { @@ -11739,92 +11739,86 @@ font-size: 0.9em; } - /* Border colors matching message types */ - .fold-bar[data-border-color="user"] .fold-bar-section, - .fold-bar[data-border-color="user compacted"] .fold-bar-section, - .fold-bar[data-border-color="user sidechain"] .fold-bar-section, - .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section { + /* Border colors matching message types - only shown when folded */ + .fold-bar[data-border-color="user"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, + .fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="user slash-command"] .fold-bar-section, - .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section { + .fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, + .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="assistant"] .fold-bar-section, - .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section { + .fold-bar[data-border-color="assistant"] .fold-bar-section.folded, + .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-color); } - .fold-bar[data-border-color="system"] .fold-bar-section, - .fold-bar[data-border-color="system command-output"] .fold-bar-section { + .fold-bar[data-border-color="system"] .fold-bar-section.folded { border-bottom-color: var(--system-color); } - .fold-bar[data-border-color="system-warning"] .fold-bar-section { + .fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - .fold-bar[data-border-color="system-error"] .fold-bar-section { + .fold-bar[data-border-color="system-error"] .fold-bar-section.folded { border-bottom-color: var(--system-error-color); } - .fold-bar[data-border-color="system-info"] .fold-bar-section { + .fold-bar[data-border-color="system-info"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="tool_use"] .fold-bar-section, - .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { border-bottom-color: var(--tool-use-color); } - .fold-bar[data-border-color="tool_result"] .fold-bar-section, - .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { border-bottom-color: var(--success-dimmed); } - .fold-bar[data-border-color="tool_result error"] .fold-bar-section, - .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { border-bottom-color: var(--error-dimmed); } - .fold-bar[data-border-color="thinking"] .fold-bar-section, - .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section { + .fold-bar[data-border-color="thinking"] .fold-bar-section.folded, + .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-dimmed); } - .fold-bar[data-border-color="image"] .fold-bar-section, - .fold-bar[data-border-color="image sidechain"] .fold-bar-section { + .fold-bar[data-border-color="image"] .fold-bar-section.folded, + .fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="unknown"] .fold-bar-section, - .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section { + .fold-bar[data-border-color="unknown"] .fold-bar-section.folded, + .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { border-bottom-color: var(--neutral-dimmed); } - .fold-bar[data-border-color="bash-input"] .fold-bar-section { + .fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="bash-output"] .fold-bar-section { + .fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="session-header"] .fold-bar-section { + .fold-bar[data-border-color="session-header"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - /* Sidechain (sub-assistant) fold-bar styling */ - .sidechain .fold-bar-section { - border-bottom-style: dashed; - border-bottom-width: 2px; - } - + /* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ .sidechain .fold-bar-section.folded { border-bottom-style: dashed; - border-bottom-width: 4px; } /* ======================================== @@ -11915,17 +11909,14 @@ } /* Paired message styling */ - .message.paired-message { + .message.pair_first { margin-bottom: 0; - } - - .message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } - .message.paired-message.pair_last { + .message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -11933,8 +11924,9 @@ border-top: 1px solid #00000011; } - .message.paired-message.pair_middle { + .message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -11967,7 +11959,7 @@ } /* Dimmed assistant when paired with thinking */ - .assistant.paired-message { + .assistant.pair_last { border-left-color: var(--assistant-dimmed); } @@ -12201,7 +12193,7 @@ } /* Full purple when thinking is paired (as pair_first) */ - .thinking.paired-message.pair_first { + .thinking.pair_first { border-left-color: var(--assistant-color); } @@ -12296,6 +12288,20 @@ font-family: var(--font-ui); } + .markdown table { + border-collapse: collapse; + } + + .markdown td { + padding: .5em; + border-top: 1px solid #bbb; + } + + .markdown th { + padding: .5em; + border-bottom: 1px solid #888; + } + /* Code block styling */ pre > code { display: block; @@ -14638,7 +14644,7 @@ -
    +
    📝 Edit /tmp/decorator_example.py
    @@ -14655,7 +14661,7 @@ -
    +
    @@ -14731,7 +14737,7 @@ -
    +
    🛠️ Bash Run the decorator example to show output
    @@ -14748,7 +14754,7 @@ -
    +
    @@ -16561,14 +16567,14 @@ font-weight: 500; padding: 0.4em; transition: all 0.2s ease; - border-bottom: 2px solid; + border-bottom: 2px solid transparent; background: linear-gradient(to bottom, #f8f8f844, #f0f0f0); } - /* Double-line effect when folded */ + /* Show border only when folded (content is hidden) */ .fold-bar-section.folded { - border-bottom-style: double; - border-bottom-width: 4px; + border-bottom-style: solid; + border-bottom-width: 2px; } .fold-bar-section:hover { @@ -16607,92 +16613,86 @@ font-size: 0.9em; } - /* Border colors matching message types */ - .fold-bar[data-border-color="user"] .fold-bar-section, - .fold-bar[data-border-color="user compacted"] .fold-bar-section, - .fold-bar[data-border-color="user sidechain"] .fold-bar-section, - .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section { + /* Border colors matching message types - only shown when folded */ + .fold-bar[data-border-color="user"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted"] .fold-bar-section.folded, + .fold-bar[data-border-color="user sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user compacted sidechain"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="user slash-command"] .fold-bar-section, - .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section { + .fold-bar[data-border-color="user slash-command"] .fold-bar-section.folded, + .fold-bar[data-border-color="user slash-command sidechain"] .fold-bar-section.folded, + .fold-bar[data-border-color="user command-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="assistant"] .fold-bar-section, - .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section { + .fold-bar[data-border-color="assistant"] .fold-bar-section.folded, + .fold-bar[data-border-color="assistant sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-color); } - .fold-bar[data-border-color="system"] .fold-bar-section, - .fold-bar[data-border-color="system command-output"] .fold-bar-section { + .fold-bar[data-border-color="system"] .fold-bar-section.folded { border-bottom-color: var(--system-color); } - .fold-bar[data-border-color="system-warning"] .fold-bar-section { + .fold-bar[data-border-color="system-warning"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - .fold-bar[data-border-color="system-error"] .fold-bar-section { + .fold-bar[data-border-color="system-error"] .fold-bar-section.folded { border-bottom-color: var(--system-error-color); } - .fold-bar[data-border-color="system-info"] .fold-bar-section { + .fold-bar[data-border-color="system-info"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="tool_use"] .fold-bar-section, - .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_use"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_use sidechain"] .fold-bar-section.folded { border-bottom-color: var(--tool-use-color); } - .fold-bar[data-border-color="tool_result"] .fold-bar-section, - .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result sidechain"] .fold-bar-section.folded { border-bottom-color: var(--success-dimmed); } - .fold-bar[data-border-color="tool_result error"] .fold-bar-section, - .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section { + .fold-bar[data-border-color="tool_result error"] .fold-bar-section.folded, + .fold-bar[data-border-color="tool_result error sidechain"] .fold-bar-section.folded { border-bottom-color: var(--error-dimmed); } - .fold-bar[data-border-color="thinking"] .fold-bar-section, - .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section { + .fold-bar[data-border-color="thinking"] .fold-bar-section.folded, + .fold-bar[data-border-color="thinking sidechain"] .fold-bar-section.folded { border-bottom-color: var(--assistant-dimmed); } - .fold-bar[data-border-color="image"] .fold-bar-section, - .fold-bar[data-border-color="image sidechain"] .fold-bar-section { + .fold-bar[data-border-color="image"] .fold-bar-section.folded, + .fold-bar[data-border-color="image sidechain"] .fold-bar-section.folded { border-bottom-color: var(--info-dimmed); } - .fold-bar[data-border-color="unknown"] .fold-bar-section, - .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section { + .fold-bar[data-border-color="unknown"] .fold-bar-section.folded, + .fold-bar[data-border-color="unknown sidechain"] .fold-bar-section.folded { border-bottom-color: var(--neutral-dimmed); } - .fold-bar[data-border-color="bash-input"] .fold-bar-section { + .fold-bar[data-border-color="bash-input"] .fold-bar-section.folded { border-bottom-color: var(--user-color); } - .fold-bar[data-border-color="bash-output"] .fold-bar-section { + .fold-bar[data-border-color="bash-output"] .fold-bar-section.folded { border-bottom-color: var(--user-dimmed); } - .fold-bar[data-border-color="session-header"] .fold-bar-section { + .fold-bar[data-border-color="session-header"] .fold-bar-section.folded { border-bottom-color: var(--system-warning-color); } - /* Sidechain (sub-assistant) fold-bar styling */ - .sidechain .fold-bar-section { - border-bottom-style: dashed; - border-bottom-width: 2px; - } - + /* Sidechain (sub-assistant) fold-bar styling - dashed border when folded */ .sidechain .fold-bar-section.folded { border-bottom-style: dashed; - border-bottom-width: 4px; } /* ======================================== @@ -16783,17 +16783,14 @@ } /* Paired message styling */ - .message.paired-message { + .message.pair_first { margin-bottom: 0; - } - - .message.paired-message.pair_first { border-bottom-left-radius: 0; border-bottom-right-radius: 0; border-bottom: none; } - .message.paired-message.pair_last { + .message.pair_last { margin-top: 0; margin-bottom: 1em; border-top-left-radius: 0; @@ -16801,8 +16798,9 @@ border-top: 1px solid #00000011; } - .message.paired-message.pair_middle { + .message.pair_middle { margin-top: 0; + margin-bottom: 0; border-radius: 0; border-top: 1px solid #00000011; border-bottom: none; @@ -16835,7 +16833,7 @@ } /* Dimmed assistant when paired with thinking */ - .assistant.paired-message { + .assistant.pair_last { border-left-color: var(--assistant-dimmed); } @@ -17069,7 +17067,7 @@ } /* Full purple when thinking is paired (as pair_first) */ - .thinking.paired-message.pair_first { + .thinking.pair_first { border-left-color: var(--assistant-color); } @@ -17164,6 +17162,20 @@ font-family: var(--font-ui); } + .markdown table { + border-collapse: collapse; + } + + .markdown td { + padding: .5em; + border-top: 1px solid #bbb; + } + + .markdown th { + padding: .5em; + border-bottom: 1px solid #888; + } + /* Code block styling */ pre > code { display: block; @@ -19368,7 +19380,7 @@ -
    +
    📝 Edit /tmp/decorator_example.py
    @@ -19385,7 +19397,7 @@ -
    +
    @@ -19461,7 +19473,7 @@ -
    +
    🛠️ Bash Run the decorator example to show output
    @@ -19478,7 +19490,7 @@ -
    +
    diff --git a/test/test_ansi_colors.py b/test/test_ansi_colors.py index 99c7e60f..4a1da013 100644 --- a/test/test_ansi_colors.py +++ b/test/test_ansi_colors.py @@ -1,6 +1,6 @@ """Tests for ANSI color code conversion to HTML.""" -from claude_code_log.renderer import _convert_ansi_to_html +from claude_code_log.html.ansi_colors import convert_ansi_to_html class TestAnsiColorConversion: @@ -9,55 +9,55 @@ class TestAnsiColorConversion: def test_standard_colors(self): """Test standard ANSI color codes.""" text = "\x1b[31mRed text\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Red text' in result text = "\x1b[32mGreen\x1b[0m and \x1b[34mBlue\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Green' in result assert 'Blue' in result def test_bright_colors(self): """Test bright ANSI color codes.""" text = "\x1b[91mBright red\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Bright red' in result def test_background_colors(self): """Test background color codes.""" text = "\x1b[41mRed background\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Red background' in result def test_text_styles(self): """Test text style codes (bold, italic, etc).""" text = "\x1b[1mBold text\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Bold text' in result text = "\x1b[3mItalic\x1b[0m and \x1b[4mUnderline\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Italic' in result assert 'Underline' in result def test_rgb_colors(self): """Test RGB color codes.""" text = "\x1b[38;2;255;0;0mRGB Red\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'style="color: rgb(255, 0, 0)"' in result text = "\x1b[48;2;0;255;0mRGB Green Background\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'style="background-color: rgb(0, 255, 0)"' in result def test_combined_styles(self): """Test combinations of colors and styles.""" text = "\x1b[1;31mBold Red\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'class="ansi-red ansi-bold"' in result text = "\x1b[4;34;43mUnderlined Blue on Yellow\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert "ansi-blue" in result assert "ansi-bg-yellow" in result assert "ansi-underline" in result @@ -65,17 +65,17 @@ def test_combined_styles(self): def test_reset_codes(self): """Test various reset codes.""" text = "\x1b[31mRed\x1b[39m Normal" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Red Normal' in result text = "\x1b[1mBold\x1b[22m Normal" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert 'Bold Normal' in result def test_html_escaping(self): """Test that HTML special characters are escaped.""" text = "\x1b[31m\x1b[0m" - result = _convert_ansi_to_html(text) + result = convert_ansi_to_html(text) assert "<script>" in result assert "</script>" in result assert " & \"quotes\"", - "status": "pending", - "priority": "high", - } - ] - }, + todo_input = TodoWriteInput( + todos=[ + TodoWriteItem( + id="1", + content="Fix & \"quotes\"", + status="pending", + priority="high", + ) + ] ) - html = format_todowrite_content(tool_use) + html = format_todowrite_content(todo_input) # Check that HTML is escaped assert "<script>" in html @@ -122,23 +109,18 @@ def test_format_todowrite_html_escaping(self): def test_format_todowrite_invalid_status_priority(self): """Test TodoWrite formatting with invalid status/priority values.""" - tool_use = ToolUseContent( - type="tool_use", - id="toolu_invalid", - name="TodoWrite", - input={ - "todos": [ - { - "id": "1", - "content": "Test invalid values", - "status": "unknown_status", - "priority": "unknown_priority", - } - ] - }, + todo_input = TodoWriteInput( + todos=[ + TodoWriteItem( + id="1", + content="Test invalid values", + status="unknown_status", + priority="unknown_priority", + ) + ] ) - html = format_todowrite_content(tool_use) + html = format_todowrite_content(todo_input) # Should use default emojis for unknown values assert "⏳" in html # default status emoji @@ -247,7 +229,7 @@ def test_todowrite_vs_regular_tool_use(self): ) # Test both through the main format function - from claude_code_log.renderer import format_tool_use_content + from claude_code_log.html import format_tool_use_content regular_html = format_tool_use_content(regular_tool) todowrite_html = format_tool_use_content(todowrite_tool) diff --git a/test/test_toggle_functionality.py b/test/test_toggle_functionality.py index 8c26199e..9ca59788 100644 --- a/test/test_toggle_functionality.py +++ b/test/test_toggle_functionality.py @@ -5,9 +5,9 @@ AssistantTranscriptEntry, AssistantMessage, UsageInfo, - parse_content_item, ) -from claude_code_log.renderer import generate_html +from claude_code_log.parser import parse_content_item +from claude_code_log.html.renderer import generate_html class TestToggleFunctionality: diff --git a/test/test_tool_result_image_rendering.py b/test/test_tool_result_image_rendering.py index 409c9a0d..961de78a 100644 --- a/test/test_tool_result_image_rendering.py +++ b/test/test_tool_result_image_rendering.py @@ -1,6 +1,6 @@ """Test image rendering within tool results.""" -from claude_code_log.renderer import format_tool_result_content +from claude_code_log.html import format_tool_result_content from claude_code_log.models import ToolResultContent diff --git a/test/test_user_renderer.py b/test/test_user_renderer.py new file mode 100644 index 00000000..ed4fb38d --- /dev/null +++ b/test/test_user_renderer.py @@ -0,0 +1,450 @@ +"""Tests for user message parsing and rendering. + +Split into: +- Parsing tests: test parse_compacted_summary(), parse_user_memory() +- Content model tests: test parse_user_message_content() +- HTML rendering tests: test full pipeline from JSONL to HTML +""" + +import json +import tempfile +from pathlib import Path + + +from claude_code_log.converter import load_transcript +from claude_code_log.html.renderer import generate_html +from claude_code_log.html.user_formatters import ( + format_compacted_summary_content, + format_user_memory_content, + format_user_text_model_content, +) +from claude_code_log.models import ( + CompactedSummaryContent, + TextContent, + UserMemoryContent, + UserTextContent, +) +from claude_code_log.parser import ( + COMPACTED_SUMMARY_PREFIX, + parse_compacted_summary, + parse_user_memory, + parse_user_message_content, +) + + +# ============================================================================= +# Parsing Tests - parse_compacted_summary() +# ============================================================================= + + +class TestParseCompactedSummary: + """Tests for parse_compacted_summary() parser function (takes content list).""" + + def test_parse_compacted_summary_detected(self): + """Test that compacted summary is detected and content combined.""" + text = ( + f"{COMPACTED_SUMMARY_PREFIX}. The conversation is summarized below:\n" + "Summary content here." + ) + content_list = [TextContent(type="text", text=text)] + + result = parse_compacted_summary(content_list) + + assert result is not None + assert isinstance(result, CompactedSummaryContent) + assert result.summary_text == text + + def test_parse_compacted_summary_not_detected(self): + """Test that regular text is not detected as compacted summary.""" + text = "This is a regular user message." + content_list = [TextContent(type="text", text=text)] + + result = parse_compacted_summary(content_list) + + assert result is None + + def test_parse_compacted_summary_empty_list(self): + """Test that empty content list returns None.""" + result = parse_compacted_summary([]) + assert result is None + + def test_parse_compacted_summary_combines_multiple_texts(self): + """Test that multiple text items are combined with double newlines.""" + first_text = f"{COMPACTED_SUMMARY_PREFIX}. Part 1." + second_text = "Part 2." + third_text = "Part 3." + content_list = [ + TextContent(type="text", text=first_text), + TextContent(type="text", text=second_text), + TextContent(type="text", text=third_text), + ] + + result = parse_compacted_summary(content_list) + + assert result is not None + expected = "\n\n".join([first_text, second_text, third_text]) + assert result.summary_text == expected + + +# ============================================================================= +# Parsing Tests - parse_user_memory() +# ============================================================================= + + +class TestParseUserMemory: + """Tests for parse_user_memory() parser function.""" + + def test_parse_user_memory_detected(self): + """Test that user memory input tag is detected correctly.""" + text = "Memory content from CLAUDE.md" + + result = parse_user_memory(text) + + assert result is not None + assert isinstance(result, UserMemoryContent) + assert result.memory_text == "Memory content from CLAUDE.md" + + def test_parse_user_memory_with_surrounding_text(self): + """Test memory tag extraction from mixed content.""" + text = "Some prefix The actual memory suffix" + + result = parse_user_memory(text) + + assert result is not None + assert result.memory_text == "The actual memory" + + def test_parse_user_memory_multiline(self): + """Test multiline memory content.""" + memory_content = "Line 1\nLine 2\nLine 3" + text = f"{memory_content}" + + result = parse_user_memory(text) + + assert result is not None + assert result.memory_text == memory_content + + def test_parse_user_memory_not_detected(self): + """Test that regular text without tag returns None.""" + text = "Regular text without memory tag." + + result = parse_user_memory(text) + + assert result is None + + def test_parse_user_memory_strips_whitespace(self): + """Test that memory content whitespace is stripped.""" + text = " \n Content with spaces \n " + + result = parse_user_memory(text) + + assert result is not None + assert result.memory_text == "Content with spaces" + + +# ============================================================================= +# Content Model Tests - parse_user_message_content() +# ============================================================================= + + +class TestParseUserMessageContentCompacted: + """Tests for parse_user_message_content() handling compacted summaries.""" + + def test_compacted_summary_single_text_item(self): + """Test compacted summary with single text content item.""" + text = f"{COMPACTED_SUMMARY_PREFIX}. The conversation summary." + content_list = [TextContent(type="text", text=text)] + + content_model = parse_user_message_content(content_list) + + assert content_model is not None + assert isinstance(content_model, CompactedSummaryContent) + assert content_model.summary_text == text + + def test_compacted_summary_multiple_text_items(self): + """Test compacted summary with multiple text content items combines all.""" + first_text = f"{COMPACTED_SUMMARY_PREFIX}. Summary part 1." + second_text = "Summary part 2." + third_text = "Summary part 3." + content_list = [ + TextContent(type="text", text=first_text), + TextContent(type="text", text=second_text), + TextContent(type="text", text=third_text), + ] + + content_model = parse_user_message_content(content_list) + + assert content_model is not None + assert isinstance(content_model, CompactedSummaryContent) + # All text items should be combined with double newlines + expected = "\n\n".join([first_text, second_text, third_text]) + assert content_model.summary_text == expected + + +class TestParseUserMessageContentMemory: + """Tests for parse_user_message_content() handling user memory.""" + + def test_user_memory_detected(self): + """Test user memory content is detected and returned.""" + text = "CLAUDE.md content here" + content_list = [TextContent(type="text", text=text)] + + content_model = parse_user_message_content(content_list) + + assert content_model is not None + assert isinstance(content_model, UserMemoryContent) + assert content_model.memory_text == "CLAUDE.md content here" + + +class TestParseUserMessageContentRegular: + """Tests for parse_user_message_content() handling regular user text.""" + + def test_regular_text(self): + """Test regular user text without special markers.""" + text = "Hello, please help me with this code." + content_list = [TextContent(type="text", text=text)] + + content_model = parse_user_message_content(content_list) + + assert content_model is not None + assert isinstance(content_model, UserTextContent) + assert content_model.text == text + assert content_model.ide_notifications is None + + def test_empty_content_list(self): + """Test empty content list returns None.""" + content_list = [] + + content_model = parse_user_message_content(content_list) + + assert content_model is None + + +# ============================================================================= +# Formatting Tests - format_compacted_summary_content() +# ============================================================================= + + +class TestFormatCompactedSummaryContent: + """Tests for format_compacted_summary_content() formatter function.""" + + def test_format_compacted_summary_basic(self): + """Test basic compacted summary formatting.""" + content = CompactedSummaryContent(summary_text="Summary:\n- Point 1\n- Point 2") + + html = format_compacted_summary_content(content) + + # Should render as markdown (not preformatted) + assert "
      " in html or "
    • " in html # Markdown list rendering + assert "Point 1" in html + assert "Point 2" in html + + def test_format_compacted_summary_collapsible(self): + """Test that long compacted summaries are collapsible.""" + # Create long content that exceeds threshold + long_summary = "Summary:\n" + "\n".join([f"- Point {i}" for i in range(50)]) + content = CompactedSummaryContent(summary_text=long_summary) + + html = format_compacted_summary_content(content) + + # Should be collapsible for long content + assert "" in html + assert "
    " in html + assert "CLAUDE.md content" in html + + def test_format_user_memory_escapes_html(self): + """Test that HTML characters are escaped.""" + content = UserMemoryContent(memory_text="") + + html = format_user_memory_content(content) + + assert "<script>" in html + assert "