diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml index 345dbe12..5700ae16 100644 --- a/.github/workflows/claude.yml +++ b/.github/workflows/claude.yml @@ -136,5 +136,5 @@ jobs: claude_args: | --allowedTools "Bash,Edit,Replace,NotebookEditCell,MultiEdit,Write,Read,Grep,Glob,LS,WebSearch,WebFetch,Task,TodoWrite,BashOutput,KillBash" --max-turns 100 - --model claude-sonnet-4-20250514 + --model claude-sonnet-4-5-20250929 diff --git a/README.md b/README.md index eddcbc12..38770207 100644 --- a/README.md +++ b/README.md @@ -222,3 +222,4 @@ git update-index --assume-unchanged frontend/src-tauri/gen/apple/maple.xcodeproj # When you need to commit changes to this file, use: git update-index --no-assume-unchanged frontend/src-tauri/gen/apple/maple.xcodeproj/project.pbxproj ``` + diff --git a/docs/conversations-api-implementation.md b/docs/conversations-api-implementation.md new file mode 100644 index 00000000..24838940 --- /dev/null +++ b/docs/conversations-api-implementation.md @@ -0,0 +1,1489 @@ +# Conversations/Responses API Implementation Guide + +## Overview + +This document describes the implementation of OpenAI's Conversations/Responses API in Maple's UnifiedChat component, building on the foundation established in [unified-chat-refactor.md](./unified-chat-refactor.md). + +## Background + +The Conversations/Responses API provides server-side conversation state management, replacing our previous localStorage-based approach. This migration enables: + +- **Server-managed state**: Conversations persist across devices and sessions +- **Streaming responses**: Real-time AI responses via Server-Sent Events (SSE) +- **Stateless client**: No localStorage dependencies, pure server-driven state +- **Automatic context management**: Server handles conversation history and token limits + +## Key Differences from POC + +While the proof-of-concept in `responses-poc` demonstrates the API capabilities, our production implementation differs in several key ways: + +1. **Component Architecture**: All logic stays in UnifiedChat.tsx (no Context providers) +2. **Title Generation**: Backend generates titles automatically (no frontend generation) +3. **Tool Support**: No web search or tool calling in initial implementation +4. **State Management**: Direct state in component, no external state libraries + +## Technical Architecture + +### API Endpoints + +The OpenSecret backend provides OpenAI-compatible endpoints: + +- **Conversations API**: + - `POST /v1/conversations` - Create conversation + - `GET /v1/conversations/{id}` - Get conversation + - `PATCH /v1/conversations/{id}` - Update metadata + - `DELETE /v1/conversations/{id}` - Delete conversation + - `GET /v1/conversations/{id}/items` - List conversation items + - `GET /v1/conversations` - List all conversations (custom extension) + +- **Responses API**: + - `POST /v1/responses` - Create response (with streaming) + - `GET /v1/responses/{id}` - Get response status + - `DELETE /v1/responses/{id}` - Delete response + +### Client Setup + +The SDK provides a custom fetch wrapper that handles: +- JWT authentication +- Session key encryption/decryption +- Automatic token refresh +- SSE streaming decryption + +### Streaming Architecture + +Responses use Server-Sent Events with the following event types: +- `response.created` - Response initiated +- `response.in_progress` - Processing started +- `response.output_item.added` - New output item +- `response.content_part.added` - Content part added +- `response.output_text.delta` - Text chunk +- `response.output_text.done` - Text complete +- `response.content_part.done` - Part complete +- `response.output_item.done` - Item complete +- `response.completed` - Response finished + +## Implementation Plan + +### Phase 1: Infrastructure Setup ✅ COMPLETE +1. ✅ Update OpenAI package to v5.20.0 (matched with SDK version) +2. ✅ OpenAI client already configured with custom fetch via context +3. ✅ Types added for conversations and responses + +### Phase 2: Core Functionality ✅ COMPLETE +4. ✅ Implement conversation creation (lazy creation on first message) +5. ✅ Add message sending with streaming (using responses.create API) +6. ✅ Handle response streaming and display (all SSE events working) + +### Phase 3: State Management ✅ COMPLETE +7. ✅ Add polling for conversation updates (5-second interval with cursor-based pagination) +8. ✅ Implement conversation loading from URL (loads on mount and URL changes) +9. ✅ Handle conversation switching (new chat clears conversation state) +10. ✅ Message deduplication (by ID and content signature) +11. ✅ LastSeenItemId tracking (React state-based cursor, no localStorage) +12. ✅ Handle browser navigation (back/forward button support) + +### Phase 4: Integration ✅ COMPLETE +13. ✅ Update Sidebar to fetch from API (using OpenSecret SDK's listConversations) +14. ✅ Remove localStorage dependencies for chat data (all chat data now from API) +15. ✅ Add error handling and recovery (404 handling, network errors, streaming failures) +16. ✅ Fix redundant API calls (removed immediate polling after load) +17. ✅ Improve conversation switching reliability (proper state tracking) + +## Completed Implementation Summary + +### What Was Built (Phases 1-4 Complete) + +The UnifiedChat component and Sidebar now have full Conversations/Responses API integration with the following features: + +#### ✅ Core Conversation Management +- **Lazy conversation creation** - Conversations only created on first message to avoid clutter +- **URL-based routing** - Uses query parameters (`?conversation_id=xxx`) for conversation state +- **Automatic conversation loading** - Loads existing conversation when URL contains conversation_id +- **Event-based communication** - Listens for `newchat` and `conversationselected` events from sidebar + +#### ✅ Streaming Implementation +- **Full SSE support** - Handles all streaming events from the responses API +- **Local to server ID mapping** - Smoothly transitions from local UUIDs to server-assigned IDs +- **Real-time text accumulation** - Shows text as it streams in +- **Status tracking** - Messages have `streaming`, `complete`, or `error` states +- **Abort controller** - Can cancel in-flight requests (foundation for future cancel button) + +#### ✅ Polling & Synchronization +- **5-second polling interval** - Checks for new messages every 5 seconds +- **Cursor-based pagination** - Uses `after` parameter with `lastSeenItemId` for efficient polling +- **React state-based cursor** - No localStorage/sessionStorage, pure component state +- **Message deduplication** - Prevents duplicates using both ID and content signature matching +- **Cross-device sync** - Enables conversation continuity across devices + +#### ✅ Error Handling +- **404 recovery** - Clears invalid conversation IDs and starts fresh +- **Network error display** - Shows user-friendly error messages +- **Silent polling failures** - Polling errors don't interrupt user experience +- **Streaming error handling** - Gracefully handles streaming failures + +#### ✅ State Management +- **No localStorage for chat data** - All conversation state comes from the API +- **Proper cleanup** - Clears state when switching conversations or starting new chats +- **TypeScript compliant** - Fully typed with proper error handling + +### What Still Needs Work (Phase 4) + +1. **Sidebar Integration** - Currently still uses localStorage, needs to fetch from API +2. **Legacy Chat Migration** - Strategy for handling old localStorage-based chats +3. **Advanced Features** - Model selection, token management, file attachments, etc. + +### Key Technical Decisions Made + +1. **Cursor-based Polling with React State** + - The `lastSeenItemId` is stored in component state, not localStorage + - Resets on page refresh (loads all messages fresh) + - Updates after each poll and after streaming completes + - Simple and effective for maintaining position during a session + +2. **Message ID Management** + - User messages get client-side UUIDs immediately + - Assistant messages start with local UUIDs, then swap to server IDs + - Deduplication handles the transition gracefully + - Prevents flicker and maintains smooth UX + +3. **Error Recovery Strategy** + - 404s clear the conversation and remove invalid ID from URL + - Network errors show user message but don't break the app + - Polling failures are silent (logged to console only) + - Streaming errors mark the message with error state + +4. **Event-Driven Architecture** + - Custom events for sidebar communication (`newchat`, `conversationselected`) + - Avoids prop drilling and complex state management + - Keeps components loosely coupled + +## Implementation Details + +### 1. Package Updates + +Update OpenAI SDK to support conversations API: + +```json +{ + "dependencies": { + "openai": "^5.20.0" // Updated from 4.56.1 to match SDK version exactly + } +} +``` + +The v5+ SDK includes full support for: +- Conversations API (create, retrieve, update, delete, list items) +- Responses API (create with streaming, retrieve, delete) +- Proper TypeScript types for all new endpoints +- Streaming iterator support for SSE events + +### 2. OpenAI Client Configuration + +Create client with OpenSecret's custom fetch from the SDK: + +```typescript +import { createCustomFetch } from "@opensecret/react"; +import OpenAI from "openai"; + +// Get API URL from environment +const API_URL = import.meta.env.VITE_OPEN_SECRET_API_URL || "https://api.opensecret.cloud"; + +// Create OpenAI client with custom fetch +const openai = new OpenAI({ + baseURL: `${API_URL}/v1/`, + dangerouslyAllowBrowser: true, // Required for browser usage + apiKey: "not-needed", // Auth handled by custom fetch + defaultHeaders: { + "Accept-Encoding": "identity" // Disable compression for SSE + }, + fetch: createCustomFetch() // SDK's custom fetch handles auth & encryption +}); +``` + +The custom fetch from `@opensecret/react` handles: +- JWT token injection from localStorage +- Automatic token refresh on 401 +- Session key encryption/decryption for E2E encryption +- Proper error handling with retry logic + +### 3. Complete Type Definitions + +Based on the actual API responses and SDK implementation: + +```typescript +// Core conversation types +interface Conversation { + id: string; + object: "conversation"; + created_at: number; // Unix timestamp + metadata?: { + title?: string; // Auto-generated by backend + [key: string]: any; + }; +} + +// Conversation item types +interface ConversationItem { + id: string; + type: "message" | "web_search_call"; // Extensible for tools + object?: string; + role?: "user" | "assistant" | "system"; + status?: "completed" | "in_progress"; + content?: Array<{ + type: "text" | "input_text"; + text?: string; + }>; + created_at?: number; +} + +// Message type for UI rendering +interface Message { + id: string; + role: "user" | "assistant" | "system"; + content: string; + timestamp: number; + status: "complete" | "streaming" | "error"; + isStreaming?: boolean; +} + +// Response streaming event types +interface ResponseEvent { + type: + | "response.created" + | "response.in_progress" + | "response.output_item.added" + | "response.content_part.added" + | "response.output_text.delta" + | "response.output_text.done" + | "response.content_part.done" + | "response.output_item.done" + | "response.completed" + | "response.failed" + | "error"; + + // Event-specific fields + sequence_number?: number; + delta?: string; // For text deltas + item_id?: string; // ID of the item being streamed + item?: any; // Full item for added events + response?: { // Full response object for created/completed + id: string; + status: "in_progress" | "completed" | "failed"; + model?: string; + usage?: { + input_tokens: number; + output_tokens: number; + total_tokens: number; + }; + output?: Array; + }; + error?: { + message: string; + type: string; + code?: string; + }; +} + +// API list response format +interface ConversationListResponse { + object: "list"; + data: Conversation[]; + first_id?: string; + last_id?: string; + has_more: boolean; +} + +// Conversation items cursor pagination +interface ConversationItemsPage { + data: ConversationItem[]; + has_more: boolean; + first_id?: string; + last_id?: string; +} +``` + +### 4. Complete Conversation Lifecycle Management + +Managing conversation lifecycle is critical for a seamless user experience. Here's the complete implementation: + +```typescript +// Full conversation lifecycle implementation +const ConversationManager = () => { + const [conversation, setConversation] = useState(null); + const [messages, setMessages] = useState([]); + const [lastSeenItemId, setLastSeenItemId] = useState(); + + // 1. CREATE NEW CONVERSATION + const createConversation = async () => { + try { + // Don't pre-create conversations - wait for first message + // This avoids empty conversations cluttering the list + setConversation(null); + setMessages([]); + setLastSeenItemId(undefined); + + // Clear URL parameter for new chat + const params = new URLSearchParams(window.location.search); + params.delete("conversation_id"); + const newUrl = params.toString() ? `/?${params}` : "/"; + window.history.replaceState({}, "", newUrl); + } catch (error) { + handleAPIError(error, "Create conversation"); + } + }; + + // 2. LAZY CONVERSATION CREATION ON FIRST MESSAGE + const ensureConversation = async (): Promise => { + if (conversation?.id) { + return conversation.id; + } + + // Create conversation on demand + const newConv = await openai.conversations.create({ + metadata: { + // Backend will auto-generate title from first message + // No need to set title here + } + }); + + setConversation({ + id: newConv.id, + object: "conversation", + created_at: newConv.created_at, + metadata: newConv.metadata + }); + + // Update URL with new conversation ID + const params = new URLSearchParams(window.location.search); + params.set("conversation_id", newConv.id); + window.history.replaceState({}, "", `/?${params}`); + + return newConv.id; + }; + + // 3. LOAD EXISTING CONVERSATION + const loadConversation = async (conversationId: string) => { + try { + // Fetch conversation metadata + const conv = await openai.conversations.retrieve(conversationId); + setConversation(conv); + + // Fetch all conversation items + const itemsResponse = await openai.conversations.items.list(conversationId, { + limit: 100 // Get up to 100 most recent items + }); + + // Convert items to messages + const loadedMessages: Message[] = []; + + for (const item of itemsResponse.data) { + if (item.type === "message" && item.role && item.content) { + let text = ""; + if (Array.isArray(item.content)) { + for (const part of item.content) { + if (part.type === "text" || part.type === "input_text") { + text += part.text || ""; + } + } + } else if (typeof item.content === "string") { + text = item.content; + } + + loadedMessages.push({ + id: item.id, + role: item.role as "user" | "assistant", + content: text, + timestamp: item.created_at ? item.created_at * 1000 : Date.now(), + status: "complete" + }); + } + } + + setMessages(loadedMessages); + + // Set last seen ID for polling + if (itemsResponse.data.length > 0) { + const lastItem = itemsResponse.data[itemsResponse.data.length - 1]; + setLastSeenItemId(lastItem.id); + } + + // Update URL if needed + const params = new URLSearchParams(window.location.search); + if (params.get("conversation_id") !== conversationId) { + params.set("conversation_id", conversationId); + window.history.replaceState({}, "", `/?${params}`); + } + } catch (error: any) { + if (error.status === 404) { + // Conversation doesn't exist - clear and start fresh + console.log("Conversation not found, starting new"); + createConversation(); + } else { + handleAPIError(error, "Load conversation"); + } + } + }; + + // 4. DELETE CONVERSATION + const deleteConversation = async (conversationId: string) => { + try { + await openai.conversations.delete(conversationId); + + // If deleting current conversation, start fresh + if (conversation?.id === conversationId) { + createConversation(); + } + } catch (error) { + handleAPIError(error, "Delete conversation"); + } + }; + + // 5. HANDLE URL CHANGES (on mount and popstate) + useEffect(() => { + const handleUrlChange = () => { + const params = new URLSearchParams(window.location.search); + const conversationId = params.get("conversation_id"); + + if (conversationId && conversationId !== conversation?.id) { + // Load the conversation from URL + loadConversation(conversationId); + } else if (!conversationId && conversation?.id) { + // URL cleared - start new conversation + createConversation(); + } + }; + + // Initial load + handleUrlChange(); + + // Listen for browser back/forward + window.addEventListener("popstate", handleUrlChange); + return () => window.removeEventListener("popstate", handleUrlChange); + }, []); + + // 6. HANDLE NEW CHAT EVENT FROM SIDEBAR + useEffect(() => { + const handleNewChat = () => { + createConversation(); + }; + + window.addEventListener("newchat", handleNewChat); + return () => window.removeEventListener("newchat", handleNewChat); + }, []); + + return { + conversation, + messages, + ensureConversation, + loadConversation, + deleteConversation, + createConversation + }; +}; +``` + +#### Conversation State Transitions + +```mermaid +graph TD + A[No Conversation] -->|User types message| B[Create Conversation] + B --> C[Conversation Active] + C -->|User sends message| D[Add to Conversation] + C -->|User clicks New Chat| A + C -->|User selects different chat| E[Load Conversation] + E --> C + C -->|User deletes chat| A + C -->|Page refresh| F[Reload from URL] + F --> C +``` + +#### Important Lifecycle Considerations + +1. **Lazy Creation**: Don't create conversations until first message +2. **URL Sync**: Always keep URL in sync with active conversation +3. **Browser Navigation**: Handle back/forward buttons properly +4. **Cross-Device**: Polling ensures continuity across devices +5. **Error Recovery**: Handle missing conversations gracefully +6. **Title Generation**: Backend generates titles automatically from first message + +### 5. Detailed Streaming Event Handling + +The responses API uses Server-Sent Events (SSE) for streaming. Here's the complete event flow and handling: + +```typescript +// Complete streaming implementation based on POC +const sendMessage = async (userInput: string, conversationId: string) => { + // Create abort controller for cancellation + const abortController = new AbortController(); + + try { + // Create streaming response + const stream = await openai.responses.create({ + model: "ibnzterrell/Meta-Llama-3.3-70B-Instruct-AWQ-INT4", // Or user's selected model + conversation: conversationId, + input: [{ role: "user", content: userInput }], + stream: true, // Enable streaming + store: true, // Store in conversation history + background: true, // Continue processing in background + signal: abortController.signal + }); + + // Initialize assistant message + // NOTE: The POC used local UUID -> server ID swap pattern + // This is complicated and error-prone (see alternatives below) + const localAssistantId = crypto.randomUUID(); + let serverItemId: string | undefined; + let accumulatedContent = ""; + + const assistantMessage: Message = { + id: localAssistantId, + role: "assistant", + content: "", + timestamp: Date.now(), + status: "streaming" + }; + + // Add to messages immediately + setMessages(prev => [...prev, assistantMessage]); + + // Process streaming events + for await (const event of stream) { + switch (event.type) { + case "response.created": + // Response object created, contains initial metadata + console.log("Response started:", event.response?.id); + break; + + case "response.in_progress": + // Processing has begun + break; + + case "response.output_item.added": + // New output item added (message or tool call) + if (event.item?.type === "message" && event.item_id) { + serverItemId = event.item_id; + // PROBLEMATIC: Update local message ID to server ID + // This causes re-renders and complicates message tracking + setMessages(prev => prev.map(msg => + msg.id === localAssistantId + ? { ...msg, id: serverItemId || msg.id } + : msg + )); + } + break; + + case "response.content_part.added": + // Content part initialized (before text starts) + break; + + case "response.output_text.delta": + // Text chunk received - this is the main event for streaming text + if (event.delta) { + accumulatedContent += event.delta; + // Update message content in real-time + setMessages(prev => prev.map(msg => + msg.id === (serverItemId || localAssistantId) + ? { ...msg, content: accumulatedContent } + : msg + )); + } + break; + + case "response.output_text.done": + // Text streaming complete for this part + break; + + case "response.content_part.done": + // Content part finished + break; + + case "response.output_item.done": + // Output item complete + if (event.item?.type === "message") { + // Finalize the message + setMessages(prev => prev.map(msg => + msg.id === (serverItemId || localAssistantId) + ? { ...msg, status: "complete" } + : msg + )); + } + break; + + case "response.completed": + // Entire response complete, includes usage stats + if (event.response?.usage) { + console.log("Token usage:", event.response.usage); + } + setIsGenerating(false); + // Update last seen item ID for polling + if (serverItemId) { + setLastSeenItemId(serverItemId); + } + break; + + case "response.failed": + case "error": + // Handle streaming errors + console.error("Streaming error:", event.error); + setMessages(prev => prev.map(msg => + msg.id === (serverItemId || localAssistantId) + ? { ...msg, status: "error" } + : msg + )); + setIsGenerating(false); + break; + } + } + } catch (error) { + if (error.name !== 'AbortError') { + console.error("Failed to send message:", error); + throw error; + } + } +}; + +// Cancel generation function +const cancelGeneration = () => { + abortController?.abort(); + setIsGenerating(false); +}; +``` + +#### Event Sequence Details + +The typical event sequence for a streaming response: + +1. **response.created** - Initial response object with ID and metadata +2. **response.in_progress** - Processing begins +3. **response.output_item.added** - Message item added to response +4. **response.content_part.added** - Text content part initialized +5. **response.output_text.delta** - Multiple events with text chunks +6. **response.output_text.done** - Text generation complete +7. **response.content_part.done** - Content part finalized +8. **response.output_item.done** - Message item complete +9. **response.completed** - Full response done with usage stats + +#### Important Streaming Considerations + +- **Server Item IDs**: Messages get server-assigned IDs during streaming +- **Local vs Server IDs**: Start with local UUID, replace with server ID when available +- **Abort Handling**: Use AbortController for clean cancellation +- **Error Recovery**: Handle network interruptions gracefully +- **Token Usage**: Track usage from completed event for billing + +#### Problems with Local ID → Server ID Pattern + +The POC's approach of swapping IDs has several issues: + +1. **Complex State Updates**: Requires finding and updating messages by temporary ID +2. **Deduplication Issues**: Polling might see same message with different IDs +3. **React Re-rendering**: Changing keys causes unnecessary re-renders +4. **Race Conditions**: Polling might return before ID swap completes + +#### Alternative Approaches to Consider + +**Option 1: Use Array Index Instead of ID** +```typescript +// Track by position, not ID +const assistantMessageIndex = messages.length; +setMessages(prev => [...prev, assistantMessage]); +// Update by index, not ID +setMessages(prev => prev.map((msg, idx) => + idx === assistantMessageIndex ? {...msg, content: newContent} : msg +)); +``` + +**Option 2: Compound Key** +```typescript +// Use both IDs as a compound key +const messageKey = `${localId}_${serverId || 'pending'}`; +``` + +**Option 3: Separate Streaming Message State** +```typescript +const [streamingMessage, setStreamingMessage] = useState(null); +const [persistedMessages, setPersistedMessages] = useState([]); +// Only add to persisted when complete with server ID +``` + +**Option 4: Use Response ID from First Event** +```typescript +// response.created event includes response.id immediately +// Could use this instead of waiting for item_id +``` + +**Recommendation**: Further investigation needed to determine the cleanest approach. The array index method might be simplest but needs testing with the polling mechanism. + +### 6. Comprehensive Polling Mechanism + +The polling system ensures conversation continuity across sessions and devices. This is critical for: +- **Mid-stream refreshes**: User refreshes while AI is responding +- **Cross-device sync**: Continue conversation from another device +- **Background updates**: Catch responses that completed after network issues +- **Missed events**: Recover from temporary disconnections + +```typescript +// Complete polling implementation based on POC +const pollForNewItems = useCallback(async () => { + if (!conversationId || !openai) return; + + try { + // Fetch items after the last seen ID + const response = await openai.conversations.items.list(conversationId, { + after: lastSeenItemId, + limit: 100 // Get up to 100 new items + }); + + if (response.data.length > 0) { + // Convert API items to UI messages + const newMessages: Message[] = []; + + for (const item of response.data) { + if (item.type === "message" && item.role && item.content) { + // Extract text content from content array + let text = ""; + if (Array.isArray(item.content)) { + for (const part of item.content) { + if (typeof part === "object" && part.text) { + text += part.text; + } + } + } + + const message: Message = { + id: item.id, + role: item.role as "user" | "assistant", + content: text, + timestamp: item.created_at ? item.created_at * 1000 : Date.now(), + status: "complete" + }; + + newMessages.push(message); + } + } + + if (newMessages.length > 0) { + // Merge new messages with deduplication + setMessages(prev => { + const existingIds = new Set(prev.map(m => m.id)); + const existingSignatures = new Set( + prev.map(m => `${m.role}:${m.content.substring(0, 100)}`) + ); + + const uniqueNewMessages = newMessages.filter(m => { + // Skip if we already have this ID + if (existingIds.has(m.id)) return false; + + // Skip if we have a message with same role and similar content + const signature = `${m.role}:${m.content.substring(0, 100)}`; + if (existingSignatures.has(signature)) return false; + + return true; + }); + + if (uniqueNewMessages.length === 0) return prev; + + // Replace local messages with server versions when they match + const updatedMessages = prev.map(msg => { + // If this is a local message (UUID format) + if (msg.id.includes("-") && msg.id.length === 36) { + const serverVersion = uniqueNewMessages.find( + newMsg => newMsg.role === msg.role && newMsg.content === msg.content + ); + if (serverVersion) { + // Remove from unique list to avoid duplication + uniqueNewMessages.splice(uniqueNewMessages.indexOf(serverVersion), 1); + return { ...msg, id: serverVersion.id }; + } + } + return msg; + }); + + return [...updatedMessages, ...uniqueNewMessages]; + }); + + // Update last seen item ID + const lastItem = response.data[response.data.length - 1]; + if (lastItem?.id) { + setLastSeenItemId(lastItem.id); + } + + // Check if we're no longer generating + if (isGenerating && newMessages.some(m => m.role === "assistant")) { + setIsGenerating(false); + } + } + } + } catch (error) { + console.error("Polling error:", error); + // Don't throw - polling should fail silently + } +}, [conversationId, lastSeenItemId, isGenerating, openai]); + +// Set up polling interval +useEffect(() => { + if (!conversationId || !openai) return; + + // Poll immediately on mount/change + pollForNewItems(); + + // Then set up interval for every 5 seconds + const intervalId = setInterval(pollForNewItems, 5000); + + return () => clearInterval(intervalId); +}, [conversationId, openai, pollForNewItems]); +``` + +#### Polling Strategy Details + +1. **Cursor-based Pagination**: Uses `after` parameter with last seen item ID +2. **Immediate Poll**: Polls immediately when conversation loads or changes +3. **5-Second Interval**: Balances freshness with server load +4. **Silent Failures**: Polling errors don't interrupt user experience +5. **Automatic Stop**: Clears interval on unmount or conversation change + +#### Message Deduplication + +The polling system includes sophisticated deduplication: + +```typescript +// Deduplication strategy +const deduplicateMessages = (existing: Message[], incoming: Message[]) => { + // 1. Check by ID (server-assigned IDs) + const existingIds = new Set(existing.map(m => m.id)); + + // 2. Check by content signature (for local messages not yet synced) + const existingSignatures = new Set( + existing.map(m => `${m.role}:${m.content.substring(0, 100)}`) + ); + + // 3. Filter incoming messages + const unique = incoming.filter(m => { + if (existingIds.has(m.id)) return false; + + const signature = `${m.role}:${m.content.substring(0, 100)}`; + if (existingSignatures.has(signature)) return false; + + return true; + }); + + // 4. Replace local IDs with server IDs when messages match + const updated = existing.map(msg => { + if (msg.id.includes("-")) { // Local UUID + const serverMatch = unique.find( + u => u.role === msg.role && u.content === msg.content + ); + if (serverMatch) { + unique.splice(unique.indexOf(serverMatch), 1); + return { ...msg, id: serverMatch.id }; + } + } + return msg; + }); + + return [...updated, ...unique]; +}; +``` + +### 7. Simple Error Handling + +Keep error handling straightforward - just show what went wrong: + +```typescript +// Basic error handling +const handleAPIError = (error: any) => { + console.error("API Error:", error); + + if (error.status === 401) { + // Auth failed - let the SDK handle refresh + // If we still get 401, redirect to login + window.location.href = "/login"; + return; + } + + if (error.status === 404) { + // Conversation not found - clear and start fresh + setError("Conversation not found"); + const params = new URLSearchParams(window.location.search); + params.delete("conversation_id"); + window.history.replaceState({}, "", "/"); + return; + } + + if (error.name === "AbortError") { + // User cancelled - don't show error + return; + } + + // Show generic error for everything else + setError(error.message || "Something went wrong. Please try again."); + setIsGenerating(false); +}; + +// Simple usage +const sendMessage = async (input: string) => { + try { + // ... send message code ... + } catch (error) { + handleAPIError(error); + } +}; +``` + +#### Error Display in UI + +```typescript +// Simple error state +const [error, setError] = useState(null); + +// Show error banner +{error && ( +
+ {error} +
+)} + +// Auto-clear after 10 seconds +useEffect(() => { + if (error) { + const timer = setTimeout(() => setError(null), 10000); + return () => clearTimeout(timer); + } +}, [error]); +``` + +**Note**: No retry logic or exponential backoff for now. Keep it simple - if something fails, show an error and let the user try again manually. + +### 8. State Structure + +Maintain minimal, server-driven state: + +```typescript +const [conversation, setConversation] = useState(null); +const [messages, setMessages] = useState([]); +const [isStreaming, setIsStreaming] = useState(false); +const [lastSeenItemId, setLastSeenItemId] = useState(); +``` + +### 9. Sidebar Integration Updates + +The Sidebar component needs significant updates to work with the conversations API instead of localStorage: + +```typescript +// Current sidebar uses localStorage +const currentImplementation = { + storage: "localStorage", + key: "maple_chats", + format: "JSON array of chat objects" +}; + +// New implementation with conversations API +const SidebarWithConversationsAPI = () => { + const [conversations, setConversations] = useState([]); + const [isLoading, setIsLoading] = useState(true); + + // Fetch conversations from API + const loadConversations = async () => { + try { + setIsLoading(true); + + // Use custom list function from SDK (non-standard OpenAI extension) + const response = await fetch(`${API_URL}/v1/conversations?limit=50`, { + headers: { + Authorization: `Bearer ${localStorage.getItem("access_token")}`, + "Content-Type": "application/json" + } + }); + + if (!response.ok) throw new Error("Failed to fetch conversations"); + + const data: ConversationListResponse = await response.json(); + + // Sort by created_at descending (newest first) + const sorted = data.data.sort((a, b) => b.created_at - a.created_at); + setConversations(sorted); + } catch (error) { + console.error("Failed to load conversations:", error); + setConversations([]); + } finally { + setIsLoading(false); + } + }; + + // Load on mount + useEffect(() => { + loadConversations(); + }, []); + + // Refresh when conversation changes + useEffect(() => { + const handleConversationUpdate = () => { + loadConversations(); + }; + + window.addEventListener("conversationupdated", handleConversationUpdate); + return () => window.removeEventListener("conversationupdated", handleConversationUpdate); + }, []); + + // Handle conversation selection + const selectConversation = (conversationId: string) => { + // Update URL to load conversation + const params = new URLSearchParams(window.location.search); + params.set("conversation_id", conversationId); + window.history.replaceState({}, "", `/?${params}`); + + // Dispatch event for UnifiedChat to handle + window.dispatchEvent(new CustomEvent("conversationselected", { + detail: { conversationId } + })); + }; + + // Handle new chat + const createNewChat = () => { + // Clear URL parameter + const params = new URLSearchParams(window.location.search); + params.delete("conversation_id"); + window.history.replaceState({}, "", params.toString() ? `/?${params}` : "/"); + + // Dispatch event for UnifiedChat + window.dispatchEvent(new Event("newchat")); + }; + + // Handle delete + const deleteConversation = async (conversationId: string, e: React.MouseEvent) => { + e.stopPropagation(); // Don't select the conversation + + try { + await openai.conversations.delete(conversationId); + + // Remove from local state + setConversations(prev => prev.filter(c => c.id !== conversationId)); + + // If deleting active conversation, start new + const currentId = new URLSearchParams(window.location.search).get("conversation_id"); + if (currentId === conversationId) { + createNewChat(); + } + } catch (error) { + console.error("Failed to delete conversation:", error); + } + }; + + return ( +
+ + + {isLoading ? ( + + ) : ( +
+ {conversations.map(conv => ( +
selectConversation(conv.id)} + className="conversation-item" + > + {conv.metadata?.title || "Untitled Chat"} + + {new Date(conv.created_at * 1000).toLocaleDateString()} + + +
+ ))} +
+ )} +
+ ); +}; +``` + +#### Key Sidebar Changes + +1. **Remove localStorage**: No more `localStorage.getItem("maple_chats")` +2. **API Fetching**: Load conversations from `/v1/conversations` endpoint +3. **Real-time Updates**: Refresh list when conversations change +4. **Event-based Communication**: Use custom events to communicate with UnifiedChat +5. **Server-side Deletion**: Delete through API, not just local state +6. **Title Display**: Use `metadata.title` from backend-generated titles + +#### Staged Implementation Plan + +**Stage 1: Basic API Integration (No Cache)** +- Fetch 50 most recent conversations +- Simple list display +- Fetch on every sidebar open + +**Stage 2: Add Pagination** +```typescript +const [hasMore, setHasMore] = useState(true); +const [cursor, setCursor] = useState(); + +const loadMore = async () => { + const response = await fetch( + `${API_URL}/v1/conversations?limit=20&after=${cursor}` + ); + // Append to existing list +}; +``` + +**Stage 3: Add Caching** +```typescript +// Simple in-memory cache +const conversationCache = useRef<{ + data: Conversation[]; + timestamp: number; +}>({ data: [], timestamp: 0 }); + +// Use cache if fresh (< 30 seconds old) +if (Date.now() - cache.timestamp < 30000) { + return cache.data; +} +``` + +**Stage 4: Archived Legacy Chats** +```typescript +// Sidebar layout: +// [New Chat Button] +// [Active Conversations - paginated] +// ... scrollable area ... +// [Archived Chats] <- Click to show old localStorage chats +// [Account/Plan/Credits] + +const ArchivedSection = () => { + const [showArchived, setShowArchived] = useState(false); + const legacyChats = localStorage.getItem("maple_chats"); + + if (!legacyChats) return null; + + return ( +
+ + {showArchived && ( +
+ {/* Show old chats in read-only mode */} +
+ )} +
+ ); +}; +``` + +**Note**: Keep archived chats separate from new conversations to avoid mixing different data structures and breaking pagination. + +### 10. Migration from localStorage + +Complete removal of localStorage for chat data: + +```typescript +// REMOVE these localStorage operations: +localStorage.getItem("maple_chats") +localStorage.setItem("maple_chats", JSON.stringify(chats)) +localStorage.removeItem(`chat_${chatId}`) + +// KEEP these for auth (managed by SDK): +localStorage.getItem("access_token") +localStorage.getItem("refresh_token") +localStorage.getItem("user_id") +``` + +#### Handling Legacy Chats + +**Important**: Old chats are stored in KV store, not just localStorage. + +```typescript +// Legacy chat loading (one-time on sidebar mount) +const loadLegacyChats = async () => { + // Load ONCE from KV store on initial sidebar mount + const oldChats = await fetchLegacyChatsFromKV(); + + // Display in archived section (read-only) + setArchivedChats(oldChats); + + // Never write back to KV store + // Never update these chats + // They're frozen in time +}; +``` + +**Key Points**: +- Old chats exist in KV store (backend) +- Load them ONCE when sidebar first opens +- Never cache them in localStorage again +- Never write to them again +- Read-only archive for reference only +- Eventually can be removed entirely once users are comfortable with new system + +## Testing Strategy + +1. **Conversation Creation**: Verify new conversations are created properly +2. **Streaming**: Test response streaming and text accumulation +3. **Polling**: Verify updates are detected and merged correctly +4. **URL Management**: Ensure conversation IDs persist in URL +5. **Cross-Device**: Test conversation continuation across devices +6. **Error Recovery**: Verify graceful handling of network issues + +## Performance Considerations + +1. **No Caching**: As per requirements, no client-side caching +2. **Polling Frequency**: 5-second interval balances freshness vs load +3. **Stream Processing**: Process events efficiently without blocking UI +4. **Message Rendering**: Use React keys properly for smooth updates + +## Security Notes + +- All encryption handled by SDK's custom fetch +- JWT authentication automatic +- No sensitive data in localStorage +- Server validates all conversation access + +## References and Sources + +This implementation guide was developed by analyzing multiple sources across the OpenSecret ecosystem. Here are all the key references that provided the knowledge for this implementation: + +### 1. OpenSecret Backend Documentation +- **File**: `/Users/tony/Dev/OpenSecret/opensecret/docs/responses-implementation.md` +- **Content**: Detailed backend implementation of conversations/responses API +- **Key Insights**: + - Backend auto-generates titles from first message + - Supports SSE streaming with encryption + - Implements OpenAI-compatible endpoints + +### 2. OpenSecret SDK Implementation +- **Directory**: `/Users/tony/Dev/OpenSecret/OpenSecret-SDK/` +- **Key Files**: + - `src/lib/api.ts` - Core API functions including conversations/responses endpoints + - `src/lib/ai.ts` - Custom fetch wrapper with encryption support + - `src/lib/test/integration/ai.test.ts` - Comprehensive integration tests showing API usage + - `package.json` - Shows OpenAI v5.20.0+ dependency requirement +- **Key Insights**: + - Custom fetch handles JWT auth and encryption automatically + - Includes conversation CRUD operations + - Supports both streaming and non-streaming responses + - Has pagination support for conversation items + +### 3. Proof of Concept Implementation +- **Directory**: `/Users/tony/Dev/Personal/responses-poc/` +- **Key Files**: + - `frontend/src/contexts/ConversationContext.tsx` - Complete conversation management implementation + - `frontend/src/hooks/useConversation.ts` - React hook for conversation state + - `frontend/src/lib/openai-client.ts` - OpenAI client configuration + - `frontend/src/lib/streaming.ts` - SSE stream processing + - `frontend/src/components/ChatInterface.tsx` - UI implementation +- **Key Insights**: + - Polling mechanism for cross-device sync + - Message deduplication strategy + - Streaming event handling patterns + - Local vs server ID management + +### 4. Current Maple Project Structure +- **Directory**: `/Users/tony/Dev/OpenSecret/maple/` +- **Key Files**: + - `frontend/src/components/UnifiedChat.tsx` - Current monolithic chat component + - `frontend/src/components/Sidebar.tsx` - Existing sidebar using localStorage + - `docs/unified-chat-refactor.md` - Original refactor documentation + - `frontend/package.json` - Shows current dependencies (OpenAI v4.56.1) +- **Key Insights**: + - Monolithic component design philosophy + - Query parameter-based routing (no navigation) + - Event-based communication between components + +### 5. Integration Test Insights + +From the SDK integration tests (`ai.test.ts`), we learned: + +1. **Conversation Creation Pattern**: + ```typescript + const conversation = await openai.conversations.create({ + metadata: { title: "Test" } + }); + ``` + +2. **Streaming Response Pattern**: + ```typescript + const stream = await openai.responses.create({ + model: "model-name", + conversation: conversationId, + input: "user input", + stream: true, + store: true + }); + ``` + +3. **Event Types Sequence**: + - response.created + - response.in_progress + - response.output_item.added + - response.output_text.delta (multiple) + - response.completed + +4. **Pagination Pattern**: + ```typescript + const items = await openai.conversations.items.list(conversationId, { + after: lastSeenItemId, + limit: 100 + }); + ``` + +### 6. API Endpoint Analysis + +From the SDK and backend documentation: + +- **Base URL**: `https://api.opensecret.cloud/v1/` +- **Authentication**: JWT Bearer token in Authorization header +- **Encryption**: Handled transparently by SDK's custom fetch +- **Non-standard Extensions**: + - `GET /v1/conversations` - List conversations (not in standard OpenAI API) + - Background processing support via `background: true` parameter + +### 7. Key Design Decisions from Analysis + +1. **No Context Providers**: Keep everything in UnifiedChat (from refactor doc) +2. **No Caching**: Pure server-driven state (from requirements) +3. **Polling Required**: For cross-device sync and refresh recovery (from POC) +4. **Lazy Conversation Creation**: Create on first message only (from POC) +5. **Backend Title Generation**: No frontend title generation needed (from backend doc) + +### 8. Implementation Patterns Discovered + +1. **Message Deduplication**: Check by ID and content signature +2. **Error Recovery**: Exponential backoff for transient failures +3. **Stream Handling**: Process events in switch statement +4. **URL Management**: Use replaceState to avoid navigation +5. **Event Communication**: Custom events between Sidebar and UnifiedChat + +## Features to Migrate from Old Implementation + +After analyzing the old codebase (`index.backup.tsx`, `_auth.chat.$chatId.tsx`, `ChatBox.tsx`), here are the feature statuses: + +### ✅ Completed Features + +1. **Model Selection** ✅ + - ModelSelector component integrated + - Billing tier restrictions working + - Model switching mid-conversation supported + +2. **Token Management** ✅ + - HANDLED BY BACKEND - Intelligent compression on server-side + - No frontend token counting needed + - Backend automatically manages context limits + +3. **Voice Input** ✅ + - Audio recording with RecordRTC + - Whisper transcription working + - Recording overlay UI implemented + +4. **File Attachments** ✅ + - Document upload (.pdf, .txt, .md) + - Image attachments for multimodal + - Tauri integration for PDF parsing + +5. **Markdown Rendering** ✅ + - Code syntax highlighting + - LaTeX math rendering + - Copy code blocks + - Thinking tags stripping + +6. **Streaming Indicators** ✅ + - Visual feedback during generation + - Different implementation but working well + +### ❌ Features Still Needed + +1. **Text-to-Speech (TTS)** - POSTPONED + - API currently broken, will implement when fixed + - Play button on assistant messages + - AudioManager for playback control + - Pro/Team/Max tier requirement + +2. **Scroll Improvements** + - Scroll-to-bottom button when scrolled up + - Better auto-scroll logic matching old behavior: + - Auto-scroll on user message send + - Auto-scroll when streaming starts + - Maintain position when not at bottom + - Location: `_auth.chat.$chatId.tsx:530-607` + +3. **System Prompts** + - Coming via new API + - Will need collapsible UI when implemented + +### ✅ UI/UX Features Already Implemented + +- **Message Actions** ✅ - Copy button on messages +- **Keyboard Shortcuts** ✅ - Enter to send, Shift+Enter for newline +- **Auto-resize Textarea** ✅ - Dynamic height adjustment +- **Mobile Optimizations** ✅ - Responsive layout with new chat button + +### ✅ Business Logic Features (Already Working) + +- **Billing Integration** ✅ - Proactive status loading, upgrade prompts +- **Team Management** ✅ - Dialog available in main app +- **API Key Management** ✅ - Dialog available in main app +- **Query Parameter Handling** ✅ - Working in main index.tsx +- **Chat Compression** ✅ - Handled by backend automatically +- **Multimodal Support** ✅ - Images and documents working +- **Verification Modal** ✅ - Working in main app +- **Chat Session Management** ✅ - Via Conversations API + +### Implementation Summary + +**✅ COMPLETED:** +- Conversations/Responses API integration +- Model selection with billing +- Voice input and transcription +- File attachments (images & documents) +- Markdown rendering +- Streaming indicators +- Mobile optimizations +- Token management (backend-handled) +- All business logic features + +**❌ REMAINING:** +1. **Scroll improvements** - Better auto-scroll and scroll-to-bottom button +2. **TTS** - Postponed until API is fixed +3. **System prompts** - Coming via new API +4. **Draft persistence** - Nice-to-have for preventing data loss + +The refactor is essentially feature-complete except for scroll UX improvements! + +## Completed Features (as of Jan 2025) + +### Phase 1-4 Core Implementation ✅ +- Full Conversations/Responses API integration +- Server-side conversation management +- SSE streaming with real-time text accumulation +- 5-second polling for cross-device sync +- URL-based conversation routing +- Message deduplication +- Error recovery and 404 handling + +### Phase 2 Features Completed +1. **Model Selection** ✅ + - ModelSelector component integrated in both input areas + - Support for model switching mid-conversation + - Auto-switching to vision models when images are added + - Billing tier restrictions (Pro/Starter/Team) + - Upgrade prompts for restricted models + +2. **Billing Integration** ✅ + - Proactive billing status fetching on app load + - Billing status cached in LocalState + - ModelSelector respects billing tiers + - AccountMenu shows correct plan status + +## Next Steps + +1. **Implement scroll improvements** - Add scroll-to-bottom button and better auto-scroll behavior +2. **System prompts via API** - Implement when backend API is ready +3. **TTS integration** - Add when Kokoro API is fixed +4. **Draft persistence** - Optional localStorage backup for unsent messages +5. **Clean up old code** - Remove ChatBox.tsx, useChatSession.ts, index.backup.tsx +6. **Deprecate old chat routes** - Make _auth.chat.$chatId.tsx read-only for archived chats \ No newline at end of file diff --git a/docs/unified-chat-refactor.md b/docs/unified-chat-refactor.md new file mode 100644 index 00000000..c603e038 --- /dev/null +++ b/docs/unified-chat-refactor.md @@ -0,0 +1,335 @@ +# Unified Chat Refactor - Phase 1 + +## Overview + +This document describes the initial refactor of Maple's chat interface in preparation for migrating from the current localStorage-based chat system to OpenAI's Conversations/Responses API. + +## Motivation + +The existing chat architecture had several pain points: + +1. **Scattered State Management**: Chat state was distributed across multiple components and routes: + - `frontend/src/routes/index.tsx` - Home page with ChatBox + - `frontend/src/routes/_auth.chat.$chatId.tsx` - Individual chat route + - `frontend/src/components/ChatBox.tsx` - Shared chat input component + - Complex prop drilling and state synchronization between these components + +2. **Complex Routing Logic**: The system required careful coordination between routes, with state being passed through navigation params, leading to: + - Difficult debugging when state got out of sync + - Re-rendering and remounting issues on navigation + - Complex URL management logic + +3. **Preparation for API Migration**: The upcoming switch to OpenAI's Conversations/Responses API requires a simpler architecture that can handle: + - Server-side conversation state + - Streaming responses + - No dependency on localStorage for chat history + +## Architectural Decisions + +### 1. Monolithic Component Design + +We created a single `UnifiedChat` component that contains all chat functionality: + +```typescript +// frontend/src/components/UnifiedChat.tsx +export function UnifiedChat() { + // ALL chat state lives here + const [messages, setMessages] = useState([]); + const [input, setInput] = useState(""); + const [isGenerating, setIsGenerating] = useState(false); + // ... +} +``` + +**Rationale**: +- Following the principle "Premature abstraction is the root of all evil" +- Colocated code is easier to debug and understand +- No state synchronization bugs between components +- Similar to how large tech companies (Meta, etc.) handle complex components + +### 2. URL Management Without Navigation + +Instead of using TanStack Router navigation (which causes remounting), we use browser-native `window.history.replaceState()`: + +```javascript +// Update URL without any navigation/reload +const usp = new URLSearchParams(window.location.search); +usp.set("conversation_id", newChatId); +window.history.replaceState(null, "", `/?${usp.toString()}`); +``` + +**Benefits**: +- No component remounting +- No state loss +- URL updates for shareability/bookmarking +- No "route not found" errors (query params don't need routes) + +### 3. Query Parameters Over Route Parameters + +We use `?conversation_id=xxx` instead of `/chat/xxx`: + +- **Before**: `/chat/123` - Requires route file, causes navigation +- **After**: `/?conversation_id=123` - No route needed, just URL update + +This approach avoids the need for route configuration while maintaining URL-based state. + +### 4. Preserved Existing Infrastructure + +We maintained backward compatibility: +- Old `/chat/$chatId` routes still work +- Existing Sidebar component is reused +- Auth logic and modals (team setup, API keys) remain functional +- Search parameters for callbacks (`team_setup`, `credits_success`) preserved + +## Implementation Details + +### File Structure + +**New Files**: +- `frontend/src/components/UnifiedChat.tsx` - The unified chat component +- `frontend/src/routes/index.backup.tsx` - Backup of original index + +**Modified Files**: +- `frontend/src/routes/index.tsx` - Simplified to show Marketing or UnifiedChat based on auth +- `frontend/src/components/Sidebar.tsx` - Updated "New Chat" to clear conversation_id + +### State Management + +Currently using local React state with mocked responses: + +```typescript +// Mock AI response - will be replaced with OpenAI conversations API +setTimeout(() => { + const assistantMessage: Message = { + id: `msg-${Date.now()}-ai`, + role: "assistant", + content: "Hello world! This is a mocked response...", + timestamp: Date.now() + }; + setMessages(prev => [...prev, assistantMessage]); +}, 1000); +``` + +This will be replaced with actual API calls in Phase 2. + +### New Chat Flow + +1. User clicks "New Chat" in sidebar +2. Sidebar clears `conversation_id` from URL +3. Dispatches 'newchat' event +4. UnifiedChat listens and clears messages +5. Input field gets focus + +## Benefits Achieved + +1. **Simplified Codebase**: ~250 lines in one file vs ~500+ lines across multiple files +2. **No State Synchronization Issues**: Single source of truth +3. **Better Performance**: No unnecessary re-renders or navigation +4. **Easier Debugging**: All logic in one place +5. **Ready for API Migration**: Clean foundation for OpenAI integration + +## Next Steps (Phase 2) + +1. **OpenAI Conversations API Integration**: + - Replace mock responses with actual API calls + - Implement streaming responses + - Handle conversation creation and management + +2. **Remove localStorage Dependency**: + - Migrate chat history to server-side storage + - Update Sidebar to fetch from API instead of localStorage + +3. **Error Handling & Edge Cases**: + - Handle API failures gracefully + - Implement retry logic + - Add loading states for conversation fetching + +## Design Philosophy + +This refactor follows the principle of **"Make it work, make it right, make it fast"**: + +1. **Make it work**: Single component with all functionality (current state) +2. **Make it right**: Will be achieved with API integration +3. **Make it fast**: Can optimize/split components later if needed + +By avoiding premature optimization and keeping everything in one place, we've created a maintainable foundation that can evolve as requirements become clearer. + +## Technical Decisions Explained + +### Why Not Cache Conversations? + +We explicitly decided against caching for now: +- Most users work on one conversation at a time +- API is fast enough that loading isn't painful +- Adds complexity that may not be needed +- Can be added later if users report performance issues + +### Why Query Parameters? + +- No route configuration needed +- Works immediately without router setup +- Prevents "route not found" errors +- Can be migrated to proper routes later if needed + +### Why Keep Everything in One Component? + +- Based on real-world experience at major tech companies +- Easier to understand and debug +- No props drilling or state synchronization +- Can be split later when natural boundaries emerge + +## Current Implementation Status + +### ✅ Features Successfully Implemented + +The UnifiedChat component now includes these fully working features: + +#### Core Chat Functionality +- **Conversations/Responses API Integration** - Full server-side state management with OpenAI-compatible endpoints +- **Streaming responses** - Real-time SSE event handling for all response types +- **Message deduplication** - Smart ID management using server-assigned IDs with smooth local-to-server transitions +- **URL-based conversation routing** - Query parameter approach (`?conversation_id=xxx`) avoiding route configuration +- **5-second polling** - Automatic synchronization for cross-device conversations +- **Conversation lifecycle** - Lazy creation, loading from URL, switching between conversations + +#### User Interface +- **Modern ChatGPT-style UI** - Clean aesthetics with hover states and subtle backgrounds +- **Auto-scrolling** - Intelligent scroll on new messages (user and assistant) +- **Copy to clipboard** - One-click copy for assistant messages +- **React.memo optimization** - MessageList component prevents re-renders during input +- **Responsive sidebar** - Mobile-friendly with toggle button +- **Centered input for new chats** - Beautiful welcome screen with logo and prompt +- **Fixed input for active chats** - Standard chat interface when conversation is active +- **Mobile new chat button** - Quick access button in mobile header when in a conversation +- **Consistent mobile UI** - Aligned headers and consistent button styling across sidebar and main chat + +#### Multimodal Support +- **Image attachments** - Support for JPEG, PNG, WebP up to 10MB +- **Document parsing** - PDF, TXT, MD support (PDF requires Tauri) + - Fixed Tauri command: Uses `extract_document_content` instead of `parse_document` + - Simplified JSON format: Documents stored as `{ document: { filename, text_content } }` + - Removed unnecessary `status` and `errors` fields from document structure + - Proper markdown rendering with document preview button +- **Attachment preview** - Visual previews with remove capability +- **Auto model switching** - Automatically selects vision-capable models when images added +- **Plus button dropdown** - Clean attachment interface +- **Proper OpenAI format** - Uses `input_text`, `input_image`, `output_text` content types + +#### Billing & Access Control +- **Tier-based features** - Starter (images), Pro/Team (documents) +- **Upgrade prompts** - Contextual dialogs when accessing restricted features +- **Model selector integration** - Shows available models based on user's plan + +#### Error Handling +- **404 recovery** - Gracefully handles non-existent conversations +- **Network error display** - User-friendly error messages +- **Silent polling failures** - Doesn't interrupt user experience +- **Attachment validation** - File type and size checks with clear feedback + +### ✅ Recently Implemented Features + +#### Voice Recording (Completed December 2024) +- **Voice recording** - Microphone input with RecordRTC +- **Whisper transcription** - Convert speech to text via OpenSecret API +- **Recording overlay** - Visual feedback with waveform animation +- **Proper overlay positioning** - Covers only input area, not full page +- **Access control** - Requires Pro/Team tier and Whisper model availability +- **Error handling** - Clear messages for permission issues + +### ❌ Features Not Yet Migrated + +These features exist in the old components but haven't been implemented in UnifiedChat: + +#### TTS Features (Postponed - API not working) +- **Text-to-Speech (TTS)** - Kokoro voice synthesis with play/stop controls +- **Auto-play TTS** - Automatic playback for voice-initiated messages +- **Audio manager** - Prevents multiple TTS playing simultaneously + +#### ✅ Scroll Behavior (Completed December 2024) +- **Smart auto-scroll logic** - Improved scrolling that matches old behavior: + - Instant scroll to bottom on initial chat load + - Auto-scroll when user sends a message + - Auto-scroll slightly (100px) when assistant starts streaming + - No auto-scroll while streaming (lets user read at their pace) + - Auto-scroll when new messages arrive from polling (e.g., after refresh) + - Maintains scroll position when user has scrolled up +- **User scroll detection** - Tracks if user is within 100px of bottom +- **Scroll-to-bottom button** - Could be added but not currently implemented + +#### System Prompt (Coming Soon via API) +- **System prompt support** - Will be handled via new API, not frontend input +- **Collapsible display** - Will need UI for showing system prompts when implemented + +#### UI/UX Features +- **Draft message persistence** - localStorage backup of unsent messages + +#### Advanced Features +- **Document metadata tracking** - Preserve filename and full content +- **Multi-file selection** - Batch image uploads +- **Message-specific actions** - Per-message TTS controls + +### 🎯 Feature Prioritization + +Based on user value and implementation complexity: + +#### High Priority (Essential) +1. ✅ **Voice Input** - COMPLETED! Recording and transcription working +2. ✅ **Token Management** - HANDLED BY BACKEND! Intelligent compression on server-side +3. ✅ **Streaming indicators** - COMPLETED! Different implementation but working well +4. ✅ **Scroll behavior** - COMPLETED! Smart auto-scrolling with user detection +5. **TTS** - Postponed until API is fixed + +#### Medium Priority (Nice to Have) +6. **System prompt support** - Coming via new API +7. **Draft persistence** - Prevents data loss on refresh + +#### Low Priority (Already Done or Not Needed) +9. ✅ **Mobile new chat button** - Already implemented +10. ✅ **Token warnings** - Not needed, backend handles compression automatically +11. **Message-specific TTS controls** - Will implement when TTS API is fixed + +### 🏗️ Architecture Improvements Achieved + +The refactor has delivered significant architectural improvements: + +1. **Single Component Architecture** - All logic in UnifiedChat.tsx, no prop drilling +2. **Server-Driven State** - No localStorage dependencies for chat data +3. **Clean URL Management** - Query parameters avoid complex routing +4. **Optimized Rendering** - Strategic use of React.memo prevents unnecessary re-renders +5. **Proper Error Boundaries** - Graceful handling of API failures +6. **Event-Based Communication** - Clean integration with sidebar via custom events +7. **Abort Controllers** - Proper cleanup of in-flight requests +8. **Resource Management** - Proper cleanup of object URLs and event listeners + +### 📊 Comparison with Old Architecture + +| Aspect | Old Implementation | New UnifiedChat | +|--------|-------------------|----------------| +| **Files** | 3+ components, multiple routes | Single component | +| **State Management** | Props, localStorage, context | Local React state + API | +| **Chat Persistence** | localStorage | Server-side via API | +| **Routing** | `/chat/:chatId` with route files | `?conversation_id=xxx` query params | +| **Message IDs** | Client-generated only | Server-assigned with local fallback | +| **Polling** | None | 5-second interval with cursor | +| **Code Complexity** | ~500+ lines across files | ~1276 lines in one file | +| **Debugging** | Difficult (scattered logic) | Easy (colocated code) | + +### 🚀 Next Steps + +1. **Implement Voice Features** - Add recording and TTS for accessibility +2. **Add Token Management** - Implement counting and compression +3. **Enhance UX** - Add scroll-to-bottom and streaming indicators +4. **Performance Optimization** - Consider splitting component if it grows much larger +5. **Testing** - Add comprehensive tests for the unified component + +## Conclusion + +The UnifiedChat refactor has successfully achieved its primary goals: +- ✅ Simplified architecture with single component +- ✅ Full Conversations/Responses API integration +- ✅ Removed localStorage dependencies for chat data +- ✅ Maintained all essential functionality +- ✅ Improved performance with React.memo +- ✅ Created foundation for future enhancements + +While some features from the old implementation haven't been migrated yet, the core chat experience is fully functional and the architecture is much cleaner. The missing features are primarily UX enhancements that can be added incrementally based on user feedback and priorities. diff --git a/frontend/bun.lock b/frontend/bun.lock index 581a3f12..355be79a 100644 --- a/frontend/bun.lock +++ b/frontend/bun.lock @@ -4,7 +4,7 @@ "": { "name": "maple", "dependencies": { - "@opensecret/react": "1.4.3", + "@opensecret/react": "1.5.0", "@radix-ui/react-alert-dialog": "^1.1.1", "@radix-ui/react-avatar": "^1.1.0", "@radix-ui/react-dialog": "^1.1.1", @@ -28,7 +28,7 @@ "clsx": "^2.1.1", "gpt-tokenizer": "^3.0.1", "lucide-react": "^0.436.0", - "openai": "^4.56.1", + "openai": "5.20.0", "react": "^18.3.1", "react-dom": "^18.3.1", "react-markdown": "^9.0.1", @@ -221,7 +221,7 @@ "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], - "@opensecret/react": ["@opensecret/react@1.4.3", "", { "dependencies": { "@peculiar/x509": "^1.12.2", "@stablelib/base64": "^2.0.0", "@stablelib/chacha20poly1305": "^2.0.0", "@stablelib/random": "^2.0.0", "cbor2": "^1.7.0", "tweetnacl": "^1.0.3", "zod": "^3.23.8" }, "peerDependencies": { "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" } }, "sha512-lsBsPRM9tsY9C8y7hHxLe8MOKlvNVI2B3X0XLWUqn/Prm51iAl5anWsRbEhKBvvNvWjlW/gfgHBfx+i2B0tvAw=="], + "@opensecret/react": ["@opensecret/react@1.5.0", "", { "dependencies": { "@peculiar/x509": "^1.12.2", "@stablelib/base64": "^2.0.0", "@stablelib/chacha20poly1305": "^2.0.0", "@stablelib/random": "^2.0.0", "cbor2": "^1.7.0", "tweetnacl": "^1.0.3", "zod": "^3.23.8" }, "peerDependencies": { "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" } }, "sha512-fcN4IYvlom1e2pdGeMSmQ572V4fOALn/0AshyIut0EZjfgMU7camxi1JjXX7lJtgSUyW3Nys7Kiej3wo5sXIwQ=="], "@peculiar/asn1-cms": ["@peculiar/asn1-cms@2.3.15", "", { "dependencies": { "@peculiar/asn1-schema": "^2.3.15", "@peculiar/asn1-x509": "^2.3.15", "@peculiar/asn1-x509-attr": "^2.3.15", "asn1js": "^3.0.5", "tslib": "^2.8.1" } }, "sha512-B+DoudF+TCrxoJSTjjcY8Mmu+lbv8e7pXGWrhNp2/EGJp9EEcpzjBCar7puU57sGifyzaRVM03oD5L7t7PghQg=="], @@ -469,8 +469,6 @@ "@types/node": ["@types/node@22.13.4", "", { "dependencies": { "undici-types": "~6.20.0" } }, "sha512-ywP2X0DYtX3y08eFVx5fNIw7/uIv8hYUKgXoK8oayJlLnKcRfEYCxWMVE1XagUdVtCJlZT1AU4LXEABW+L1Peg=="], - "@types/node-fetch": ["@types/node-fetch@2.6.12", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.0" } }, "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA=="], - "@types/prop-types": ["@types/prop-types@15.7.14", "", {}, "sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ=="], "@types/react": ["@types/react@18.3.18", "", { "dependencies": { "@types/prop-types": "*", "csstype": "^3.0.2" } }, "sha512-t4yC+vtgnkYjNSKlFx1jkAhH8LgTo2N/7Qvi83kdEaUtMDiwpbLAktKDaAMlRcJ5eSxZkH74eEGt1ky31d7kfQ=="], @@ -505,14 +503,10 @@ "@vitejs/plugin-react": ["@vitejs/plugin-react@4.3.4", "", { "dependencies": { "@babel/core": "^7.26.0", "@babel/plugin-transform-react-jsx-self": "^7.25.9", "@babel/plugin-transform-react-jsx-source": "^7.25.9", "@types/babel__core": "^7.20.5", "react-refresh": "^0.14.2" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, "sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug=="], - "abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="], - "acorn": ["acorn@8.14.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA=="], "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], - "agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="], - "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], "ansi-regex": ["ansi-regex@6.1.0", "", {}, "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA=="], @@ -533,8 +527,6 @@ "asn1js": ["asn1js@3.0.5", "", { "dependencies": { "pvtsutils": "^1.3.2", "pvutils": "^1.1.3", "tslib": "^2.4.0" } }, "sha512-FVnvrKJwpt9LP2lAMl8qZswRNm3T4q9CON+bxldk2iwk3FFpuwhx2FfinyitizWHsVYyaY+y5JzDR0rCMV5yTQ=="], - "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], - "autoprefixer": ["autoprefixer@10.4.20", "", { "dependencies": { "browserslist": "^4.23.3", "caniuse-lite": "^1.0.30001646", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.0.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": { "autoprefixer": "bin/autoprefixer" } }, "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g=="], "babel-dead-code-elimination": ["babel-dead-code-elimination@1.0.9", "", { "dependencies": { "@babel/core": "^7.23.7", "@babel/parser": "^7.23.6", "@babel/traverse": "^7.23.7", "@babel/types": "^7.23.6" } }, "sha512-JLIhax/xullfInZjtu13UJjaLHDeTzt3vOeomaSUdO/nAMEL/pWC/laKrSvWylXMnVWyL5bpmG9njqBZlUQOdg=="], @@ -553,8 +545,6 @@ "bun-types": ["bun-types@1.2.2", "", { "dependencies": { "@types/node": "*", "@types/ws": "~8.5.10" } }, "sha512-RCbMH5elr9gjgDGDhkTTugA21XtJAy/9jkKe/G3WR2q17VPGhcquf9Sir6uay9iW+7P/BV0CAHA1XlHXMAVKHg=="], - "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], - "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], "camelcase-css": ["camelcase-css@2.0.1", "", {}, "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA=="], @@ -585,8 +575,6 @@ "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], - "combined-stream": ["combined-stream@1.0.8", "", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="], - "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], "commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], @@ -607,8 +595,6 @@ "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], - "delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="], - "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], "detect-node-es": ["detect-node-es@1.1.0", "", {}, "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="], @@ -621,8 +607,6 @@ "dlv": ["dlv@1.1.3", "", {}, "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="], - "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], "electron-to-chromium": ["electron-to-chromium@1.5.101", "", {}, "sha512-L0ISiQrP/56Acgu4/i/kfPwWSgrzYZUnQrC0+QPFuhqlLP1Ir7qzPPDVS9BcKIyWTRU8+o6CC8dKw38tSWhYIA=="], @@ -631,14 +615,6 @@ "entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], - "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], - - "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], - - "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], - - "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], - "esbuild": ["esbuild@0.21.5", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.21.5", "@esbuild/android-arm": "0.21.5", "@esbuild/android-arm64": "0.21.5", "@esbuild/android-x64": "0.21.5", "@esbuild/darwin-arm64": "0.21.5", "@esbuild/darwin-x64": "0.21.5", "@esbuild/freebsd-arm64": "0.21.5", "@esbuild/freebsd-x64": "0.21.5", "@esbuild/linux-arm": "0.21.5", "@esbuild/linux-arm64": "0.21.5", "@esbuild/linux-ia32": "0.21.5", "@esbuild/linux-loong64": "0.21.5", "@esbuild/linux-mips64el": "0.21.5", "@esbuild/linux-ppc64": "0.21.5", "@esbuild/linux-riscv64": "0.21.5", "@esbuild/linux-s390x": "0.21.5", "@esbuild/linux-x64": "0.21.5", "@esbuild/netbsd-x64": "0.21.5", "@esbuild/openbsd-x64": "0.21.5", "@esbuild/sunos-x64": "0.21.5", "@esbuild/win32-arm64": "0.21.5", "@esbuild/win32-ia32": "0.21.5", "@esbuild/win32-x64": "0.21.5" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw=="], "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], @@ -667,8 +643,6 @@ "esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="], - "event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="], - "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="], "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], @@ -693,12 +667,6 @@ "foreground-child": ["foreground-child@3.3.0", "", { "dependencies": { "cross-spawn": "^7.0.0", "signal-exit": "^4.0.1" } }, "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg=="], - "form-data": ["form-data@4.0.2", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "mime-types": "^2.1.12" } }, "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w=="], - - "form-data-encoder": ["form-data-encoder@1.7.2", "", {}, "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="], - - "formdata-node": ["formdata-node@4.4.1", "", { "dependencies": { "node-domexception": "1.0.0", "web-streams-polyfill": "4.0.0-beta.3" } }, "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ=="], - "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], @@ -707,12 +675,8 @@ "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], - "get-intrinsic": ["get-intrinsic@1.2.7", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "function-bind": "^1.1.2", "get-proto": "^1.0.0", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-VW6Pxhsrk0KAOqs3WEd0klDiF/+V7gQOpAvY1jVU/LHmaD/kQO4523aiJuikX/QAKYiW6x8Jh+RJej1almdtCA=="], - "get-nonce": ["get-nonce@1.0.1", "", {}, "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q=="], - "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], - "get-tsconfig": ["get-tsconfig@4.10.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A=="], "glob": ["glob@10.4.5", "", { "dependencies": { "foreground-child": "^3.1.0", "jackspeak": "^3.1.2", "minimatch": "^9.0.4", "minipass": "^7.1.2", "package-json-from-dist": "^1.0.0", "path-scurry": "^1.11.1" }, "bin": { "glob": "dist/esm/bin.mjs" } }, "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg=="], @@ -723,18 +687,12 @@ "goober": ["goober@2.1.16", "", { "peerDependencies": { "csstype": "^3.0.10" } }, "sha512-erjk19y1U33+XAMe1VTvIONHYoSqE4iS7BYUZfHaqeohLmnC0FdxEh7rQU+6MZ4OajItzjZFSRtVANrQwNq6/g=="], - "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], - "gpt-tokenizer": ["gpt-tokenizer@3.0.1", "", {}, "sha512-5jdaspBq/w4sWw322SvQj1Fku+CN4OAfYZeeEg8U7CWtxBz+zkxZ3h0YOHD43ee+nZYZ5Ud70HRN0ANcdIj4qg=="], "graphemer": ["graphemer@1.4.0", "", {}, "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="], "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], - "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], - - "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], - "hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="], "hast-util-from-dom": ["hast-util-from-dom@5.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hastscript": "^9.0.0", "web-namespaces": "^2.0.0" } }, "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q=="], @@ -763,8 +721,6 @@ "html-url-attributes": ["html-url-attributes@3.0.1", "", {}, "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ=="], - "humanize-ms": ["humanize-ms@1.2.1", "", { "dependencies": { "ms": "^2.0.0" } }, "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ=="], - "ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], @@ -841,8 +797,6 @@ "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="], - "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], - "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="], "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="], @@ -939,10 +893,6 @@ "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], - "mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], - - "mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], - "minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="], "minipass": ["minipass@7.1.2", "", {}, "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw=="], @@ -955,10 +905,6 @@ "natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="], - "node-domexception": ["node-domexception@1.0.0", "", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="], - - "node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="], - "node-releases": ["node-releases@2.0.19", "", {}, "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw=="], "normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="], @@ -969,7 +915,7 @@ "object-hash": ["object-hash@3.0.0", "", {}, "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw=="], - "openai": ["openai@4.85.1", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" }, "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-jkX2fntHljUvSH3MkWh4jShl10oNkb+SsCj4auKlbu2oF4KWAnmHLNR5EpnUHK1ZNW05Rp0fjbJzYwQzMsH8ZA=="], + "openai": ["openai@5.20.0", "", { "peerDependencies": { "ws": "^8.18.0", "zod": "^3.23.8" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-Bmc2zLM/YWgFrDpXr9hwXqGGDdMmMpE9+qoZPsaHpn0Y/Qk1Vu26hNqXo7+nHdli+sLsXINvS1f8kR3NKhGKmA=="], "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], @@ -1131,8 +1077,6 @@ "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], - "tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], - "trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="], "trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="], @@ -1199,14 +1143,8 @@ "web-namespaces": ["web-namespaces@2.0.1", "", {}, "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ=="], - "web-streams-polyfill": ["web-streams-polyfill@4.0.0-beta.3", "", {}, "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug=="], - - "webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="], - "webpack-virtual-modules": ["webpack-virtual-modules@0.6.2", "", {}, "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ=="], - "whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="], - "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], @@ -1289,8 +1227,6 @@ "mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], - "openai/@types/node": ["@types/node@18.19.76", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-yvR7Q9LdPz2vGpmpJX5LolrgRdWvB67MJKDPSgIIzpFbaf9a1j/f5DnLp5VDyHGMR0QZHlTr1afsD87QCXFHKw=="], - "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], "path-scurry/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], @@ -1333,8 +1269,6 @@ "glob/minimatch/brace-expansion": ["brace-expansion@2.0.1", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA=="], - "openai/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="], - "string-width-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], "tsx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.23.1", "", { "os": "aix", "cpu": "ppc64" }, "sha512-6VhYk1diRqrhBAqpJEdjASR/+WVRtfjpqKuNw11cLiaWpAT/Uu+nokB+UJnevzy/P9C/ty6AOe0dwueMrGh/iQ=="], diff --git a/frontend/package.json b/frontend/package.json index 56ae1ffa..06232c9f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,11 +1,12 @@ { "name": "maple", "private": true, - "version": "1.3.2", + "version": "1.99.0", "type": "module", "scripts": { "dev": "vite", "build": "tsc -b && vite build", + "build:fast": "vite build", "lint": "eslint './src/**/*.{ts,tsx}'", "preview": "vite preview", "add": "bunx shadcn-ui@latest add", @@ -16,7 +17,7 @@ "mdast-util-gfm-autolink-literal": "2.0.0" }, "dependencies": { - "@opensecret/react": "1.4.3", + "@opensecret/react": "1.5.0", "@radix-ui/react-alert-dialog": "^1.1.1", "@radix-ui/react-avatar": "^1.1.0", "@radix-ui/react-dialog": "^1.1.1", @@ -40,7 +41,7 @@ "clsx": "^2.1.1", "gpt-tokenizer": "^3.0.1", "lucide-react": "^0.436.0", - "openai": "^4.56.1", + "openai": "5.20.0", "react": "^18.3.1", "react-dom": "^18.3.1", "react-markdown": "^9.0.1", diff --git a/frontend/public/maple-leaf-and-maple-black.png b/frontend/public/maple-leaf-and-maple-black.png new file mode 100644 index 00000000..df602c78 Binary files /dev/null and b/frontend/public/maple-leaf-and-maple-black.png differ diff --git a/frontend/public/maple-leaf-and-maple-white.png b/frontend/public/maple-leaf-and-maple-white.png new file mode 100644 index 00000000..d2fca301 Binary files /dev/null and b/frontend/public/maple-leaf-and-maple-white.png differ diff --git a/frontend/src-tauri/Cargo.lock b/frontend/src-tauri/Cargo.lock index 3f280145..177a94e5 100644 --- a/frontend/src-tauri/Cargo.lock +++ b/frontend/src-tauri/Cargo.lock @@ -2733,7 +2733,7 @@ checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" [[package]] name = "maple" -version = "1.3.2" +version = "1.99.0" dependencies = [ "anyhow", "axum", diff --git a/frontend/src-tauri/Cargo.toml b/frontend/src-tauri/Cargo.toml index 47de9978..3dbbacbe 100644 --- a/frontend/src-tauri/Cargo.toml +++ b/frontend/src-tauri/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "maple" -version = "1.3.2" +version = "1.99.0" description = "Maple AI" authors = ["tony@opensecret.cloud"] license = "MIT" diff --git a/frontend/src-tauri/capabilities/default.json b/frontend/src-tauri/capabilities/default.json index d3d3ad81..e5a1a41e 100644 --- a/frontend/src-tauri/capabilities/default.json +++ b/frontend/src-tauri/capabilities/default.json @@ -62,6 +62,9 @@ }, { "url": "https://*.zaprite.com/*" + }, + { + "url": "https://opensecret.cloud/*" } ] }, diff --git a/frontend/src-tauri/capabilities/mobile-android.json b/frontend/src-tauri/capabilities/mobile-android.json index 18eb3f9c..0b6da8a6 100644 --- a/frontend/src-tauri/capabilities/mobile-android.json +++ b/frontend/src-tauri/capabilities/mobile-android.json @@ -65,6 +65,9 @@ { "url": "https://*.zaprite.com/*" }, + { + "url": "https://opensecret.cloud/*" + }, { "url": "https://github.com/*" }, diff --git a/frontend/src-tauri/capabilities/mobile-ios.json b/frontend/src-tauri/capabilities/mobile-ios.json index 283d8ae8..601b11fc 100644 --- a/frontend/src-tauri/capabilities/mobile-ios.json +++ b/frontend/src-tauri/capabilities/mobile-ios.json @@ -27,6 +27,9 @@ }, { "url": "https://*.zaprite.com/*" + }, + { + "url": "https://opensecret.cloud/*" } ] }, diff --git a/frontend/src-tauri/gen/apple/maple_iOS/Info.plist b/frontend/src-tauri/gen/apple/maple_iOS/Info.plist index 7a7a1606..56305b98 100644 --- a/frontend/src-tauri/gen/apple/maple_iOS/Info.plist +++ b/frontend/src-tauri/gen/apple/maple_iOS/Info.plist @@ -15,9 +15,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 1.3.2 + 1.99.0 CFBundleVersion - 1.3.2 + 1.99.0 LSRequiresIPhoneOS UILaunchStoryboardName diff --git a/frontend/src-tauri/gen/apple/project.yml b/frontend/src-tauri/gen/apple/project.yml index 0033d46c..df7785d2 100644 --- a/frontend/src-tauri/gen/apple/project.yml +++ b/frontend/src-tauri/gen/apple/project.yml @@ -52,8 +52,8 @@ targets: - UIInterfaceOrientationPortraitUpsideDown - UIInterfaceOrientationLandscapeLeft - UIInterfaceOrientationLandscapeRight - CFBundleShortVersionString: 1.3.2 - CFBundleVersion: 1.3.2 + CFBundleShortVersionString: 1.99.0 + CFBundleVersion: 1.99.0 entitlements: path: maple_iOS/maple_iOS.entitlements scheme: diff --git a/frontend/src-tauri/tauri.conf.json b/frontend/src-tauri/tauri.conf.json index 49e7fa26..bad290b7 100644 --- a/frontend/src-tauri/tauri.conf.json +++ b/frontend/src-tauri/tauri.conf.json @@ -1,7 +1,7 @@ { "$schema": "../node_modules/@tauri-apps/cli/config.schema.json", "productName": "Maple", - "version": "1.3.2", + "version": "1.99.0", "identifier": "cloud.opensecret.maple", "build": { "frontendDist": "../dist", @@ -68,7 +68,7 @@ "developmentTeam": "X773Y823TN" }, "android": { - "versionCode": 1003002002 + "versionCode": 1099000001 }, "windows": { "certificateThumbprint": null, diff --git a/frontend/src/ai/OpenAIContext.tsx b/frontend/src/ai/OpenAIContext.tsx index 72fa43a5..3cead47c 100644 --- a/frontend/src/ai/OpenAIContext.tsx +++ b/frontend/src/ai/OpenAIContext.tsx @@ -24,7 +24,8 @@ export const OpenAIProvider = ({ children }: { children: React.ReactNode }) => { defaultHeaders: { "Accept-Encoding": "identity" }, - fetch: aiCustomFetch + fetch: aiCustomFetch, + maxRetries: 0 // Disable automatic retries }); return {children}; diff --git a/frontend/src/components/AccountDialog.tsx b/frontend/src/components/AccountDialog.tsx index 117dba6e..a2b41e12 100644 --- a/frontend/src/components/AccountDialog.tsx +++ b/frontend/src/components/AccountDialog.tsx @@ -24,11 +24,13 @@ import { CheckCircle, XCircle, Trash } from "lucide-react"; import { ChangePasswordDialog } from "./ChangePasswordDialog"; import { useLocalState } from "@/state/useLocalState"; import { DeleteAccountDialog } from "./DeleteAccountDialog"; +import { PreferencesDialog } from "./PreferencesDialog"; export function AccountDialog() { const os = useOpenSecret(); const [isChangePasswordOpen, setIsChangePasswordOpen] = useState(false); const [isDeleteAccountOpen, setIsDeleteAccountOpen] = useState(false); + const [isPreferencesOpen, setIsPreferencesOpen] = useState(false); const [verificationStatus, setVerificationStatus] = useState<"unverified" | "pending">( "unverified" ); @@ -108,6 +110,16 @@ export function AccountDialog() {
+ {isEmailUser && ( - )} -
- ); -} - -export default function Component({ - onSubmit, - startTall, - messages = [], - isStreaming = false, - onCompress, - isSummarizing = false, - imageConversionError -}: { - onSubmit: ( - input: string, - systemPrompt?: string, - images?: File[], - documentText?: string, - documentMetadata?: { filename: string; fullContent: string }, - sentViaVoice?: boolean - ) => void | Promise; - startTall?: boolean; - messages?: ChatMessage[]; - isStreaming?: boolean; - onCompress?: () => void; - isSummarizing?: boolean; - imageConversionError?: string | null; -}) { - const [inputValue, setInputValue] = useState(""); - const [systemPromptValue, setSystemPromptValue] = useState(""); - const [isSystemPromptExpanded, setIsSystemPromptExpanded] = useState(false); - const { - billingStatus, - setBillingStatus, - draftMessages, - setDraftMessage, - clearDraftMessage, - model, - setModel, - availableModels, - hasWhisperModel - } = useLocalState(); - - const supportsVision = MODEL_CONFIG[model]?.supportsVision || false; - const [images, setImages] = useState([]); - const [imageUrls, setImageUrls] = useState>(new Map()); - const [uploadedDocument, setUploadedDocument] = useState<{ - original: DocumentResponse; - parsed: ParsedDocument; - cleanedText: string; - } | null>(null); - const [isUploadingDocument, setIsUploadingDocument] = useState(false); - const [documentError, setDocumentError] = useState(null); - const [imageError, setImageError] = useState(null); - const fileInputRef = useRef(null); - const documentInputRef = useRef(null); - const [upgradeDialogOpen, setUpgradeDialogOpen] = useState(false); - const [upgradeFeature, setUpgradeFeature] = useState<"image" | "voice" | "document">("image"); - const os = useOpenSecret(); - const isTauriEnv = isTauri(); - - // Audio recording state - const [isRecording, setIsRecording] = useState(false); - const [isTranscribing, setIsTranscribing] = useState(false); - const [isProcessingSend, setIsProcessingSend] = useState(false); - const [audioError, setAudioError] = useState(null); - const recorderRef = useRef(null); - const streamRef = useRef(null); - - // Find the first vision-capable model the user has access to - const findFirstVisionModel = () => { - // Check if user has Pro/Team access - if (!hasProTeamAccess) return null; - - // Find first model that supports vision - for (const modelId of availableModels.map((m) => m.id)) { - const modelConfig = MODEL_CONFIG[modelId]; - if (modelConfig?.supportsVision) { - // Check if user has access to this model - const needsStarter = modelConfig.requiresStarter; - const needsPro = modelConfig.requiresPro; - - // If no special requirements, or user meets requirements - if (!needsStarter && !needsPro) return modelId; - if ( - needsStarter && - (freshBillingStatus?.product_name?.toLowerCase().includes("starter") || - freshBillingStatus?.product_name?.toLowerCase().includes("pro") || - freshBillingStatus?.product_name?.toLowerCase().includes("max") || - freshBillingStatus?.product_name?.toLowerCase().includes("team")) - ) { - return modelId; - } - if (needsPro && hasProTeamAccess) return modelId; - } - } - return null; - }; - - const handleAddImages = (e: React.ChangeEvent) => { - if (!e.target.files) return; - - const supportedTypes = ["image/jpeg", "image/jpg", "image/png", "image/webp"]; - const maxSizeInBytes = 10 * 1024 * 1024; // 10MB for images - const errors: string[] = []; - - const validFiles = Array.from(e.target.files).filter((file) => { - // Check file type - if (!supportedTypes.includes(file.type.toLowerCase())) { - return false; - } - - // Check file size - if (file.size > maxSizeInBytes) { - const sizeInMB = (file.size / (1024 * 1024)).toFixed(2); - errors.push(`${file.name} is too large (${sizeInMB}MB)`); - return false; - } - - return true; - }); - - if (validFiles.length < e.target.files.length) { - const skippedCount = e.target.files.length - validFiles.length; - const typeErrors = e.target.files.length - validFiles.length - errors.length; - - if (errors.length > 0) { - setImageError(`${errors.join(", ")}. Max size is 10MB per image.`); - } else if (typeErrors > 0) { - setImageError( - `${skippedCount} file(s) skipped. Only JPEG, PNG, and WebP images are supported.` - ); - } - // Clear error after 5 seconds - setTimeout(() => setImageError(null), 5000); - } else { - setImageError(null); - } - - // Create object URLs for the new images - const newUrlMap = new Map(imageUrls); - validFiles.forEach((file) => { - if (!newUrlMap.has(file)) { - newUrlMap.set(file, URL.createObjectURL(file)); - } - }); - setImageUrls(newUrlMap); - setImages((prev) => [...prev, ...validFiles]); - }; - - const removeImage = (idx: number) => { - setImages((prev) => { - const fileToRemove = prev[idx]; - // Revoke the object URL when removing the image - const url = imageUrls.get(fileToRemove); - if (url) { - URL.revokeObjectURL(url); - setImageUrls((prevUrls) => { - const newUrls = new Map(prevUrls); - newUrls.delete(fileToRemove); - return newUrls; - }); - } - return prev.filter((_, i) => i !== idx); - }); - // Clear any image errors when removing images - setImageError(null); - }; - - // Helper function to read text file and format as ParsedDocument - const processTextFileLocally = async (file: File): Promise => { - return new Promise((resolve, reject) => { - const reader = new FileReader(); - - reader.onload = (event) => { - const content = event.target?.result as string; - - // Create a ParsedDocument structure matching the expected format - const parsedDocument: ParsedDocument = { - document: { - filename: file.name, - text_content: content - }, - status: "completed", - errors: [] - }; - - resolve(parsedDocument); - }; - - reader.onerror = () => { - reject(new Error("Failed to read file")); - }; - - reader.readAsText(file); - }); - }; - - const handleDocumentUpload = async (e: React.ChangeEvent) => { - if (!e.target.files || e.target.files.length === 0) return; - - const file = e.target.files[0]; - - // Check file size (10MB limit for local processing) - const maxSizeInBytes = 10 * 1024 * 1024; // 10MB - if (file.size > maxSizeInBytes) { - const sizeInMB = (file.size / (1024 * 1024)).toFixed(2); - setDocumentError(`File too large (${sizeInMB}MB). Maximum size is 10MB.`); - e.target.value = ""; // Reset input - return; - } - - setIsUploadingDocument(true); - setDocumentError(null); - - try { - let parsed: ParsedDocument; - let result: DocumentResponse | undefined; - - // Use the existing isTauriEnv state instead of checking again - if ( - isTauriEnv && - (file.type === "application/pdf" || - file.name.endsWith(".pdf") || - file.type === "text/plain" || - file.name.endsWith(".txt") || - file.name.endsWith(".md")) - ) { - // Process documents locally using Rust in Tauri - const { invoke } = await import("@tauri-apps/api/core"); - - // Convert file to base64 - const reader = new FileReader(); - const base64Data = await new Promise((resolve, reject) => { - reader.onload = () => { - const base64 = (reader.result as string).split(",")[1]; // Remove data:type;base64, prefix - resolve(base64); - }; - reader.onerror = reject; - reader.readAsDataURL(file); - }); - - // Determine file type - let fileType = file.type; - if (file.name.endsWith(".pdf")) fileType = "pdf"; - else if (file.name.endsWith(".txt")) fileType = "txt"; - else if (file.name.endsWith(".md")) fileType = "md"; - - // Call Rust function to extract content - const rustResponse = await invoke("extract_document_content", { - fileBase64: base64Data, - filename: file.name, - fileType: fileType - }); - - // Convert Rust response to ParsedDocument format - parsed = { - document: { - filename: rustResponse.document.filename, - text_content: rustResponse.document.text_content - }, - status: rustResponse.status, - errors: [] - }; - } else if ( - file.type === "text/plain" || - file.name.endsWith(".txt") || - file.name.endsWith(".md") - ) { - // Process text files locally in browser - parsed = await processTextFileLocally(file); - } else { - // PDF files in browser are not supported without Tauri - setDocumentError("PDF files can only be processed in the desktop app"); - e.target.value = ""; // Reset input - return; - // REMOVED: Cloud API fallback for document processing - // result = await os.uploadDocumentWithPolling(file); - // parsed = JSON.parse(result.text) as ParsedDocument; - } - - // Extract content - // const content = parsed.document.text_content || ""; - - // Create a cleaned version of the parsed document - const cleanedParsed = { - ...parsed, - document: { - ...parsed.document, - text_content: parsed.document.text_content - ? parsed.document.text_content.replace(/!\[Image\]\([^)]+\)/g, "") - : parsed.document.text_content - } - }; - - // Always provide a valid original-like payload; Tauri-local paths have no server result - const originalResponse: DocumentResponse = - result ?? - ({ - text: JSON.stringify(parsed), - filename: file.name, - size: file.size - } as DocumentResponse); - - setUploadedDocument({ - original: originalResponse, - parsed: parsed, - cleanedText: JSON.stringify(cleanedParsed) // Store the cleaned JSON as a string - }); - } catch (error) { - console.error("Document upload failed:", error); - if (error instanceof Error) { - if (error.message.includes("exceeds maximum limit")) { - setDocumentError("File too large. Maximum size is 10MB."); - } else if (error.message.includes("401")) { - setDocumentError("Authentication required. Please log in to upload documents."); - } else if (error.message.includes("403")) { - setDocumentError("Usage limit exceeded. Please upgrade your plan."); - } else { - setDocumentError("Failed to process document. Please try again."); - } - } else { - setDocumentError("An unexpected error occurred."); - } - } finally { - setIsUploadingDocument(false); - if (e.target) e.target.value = ""; - } - }; - - const removeDocument = () => { - setUploadedDocument(null); - setDocumentError(null); - }; - - // Audio recording functions - const startRecording = async () => { - // Prevent duplicate starts - if (isRecording || isTranscribing) return; - - try { - // Check if getUserMedia is available - if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { - setAudioError( - "Microphone access is blocked. Please check your browser permissions or disable Lockdown Mode for this site (Settings > Safari > Advanced > Lockdown Mode)." - ); - setTimeout(() => setAudioError(null), 8000); // Longer timeout for this important message - return; - } - - const stream = await navigator.mediaDevices.getUserMedia({ - audio: { - echoCancellation: false, // Disable to reduce processing overhead - noiseSuppression: true, - autoGainControl: false, // Disable AGC to prevent audio ducking - sampleRate: 16000 // Lower sample rate to match output - } - }); - - streamRef.current = stream; - - // Create RecordRTC instance configured for WAV - const recorder = new RecordRTC(stream, { - type: "audio", - mimeType: "audio/wav", - recorderType: RecordRTC.StereoAudioRecorder, - numberOfAudioChannels: 1, // Mono audio for smaller file size - desiredSampRate: 16000 // 16kHz is good for speech - }); - - recorderRef.current = recorder; - recorder.startRecording(); - setIsRecording(true); - setAudioError(null); // Clear any previous errors - } catch (error) { - console.error("Failed to start recording:", error); - const err = error as Error & { name?: string }; - console.error("Error name:", err.name); - console.error("Error message:", err.message); - - // Handle different error types - if (err.name === "NotAllowedError" || err.name === "PermissionDeniedError") { - setAudioError( - "Microphone access denied. Please enable microphone permissions in Settings > Maple." - ); - } else if (err.name === "NotFoundError" || err.name === "DevicesNotFoundError") { - setAudioError("No microphone found. Please check your device."); - } else if (err.name === "NotReadableError" || err.name === "TrackStartError") { - setAudioError("Microphone is already in use by another app."); - } else { - // Include error details for debugging - setAudioError( - `Failed to access microphone: ${err.name || "Unknown error"} - ${err.message || "Please try again"}` - ); - } - - // Clear error after 5 seconds - setTimeout(() => setAudioError(null), 5000); - } - }; - - const stopRecording = (shouldSend: boolean = false) => { - if (recorderRef.current && isRecording) { - // Only hide immediately if canceling, keep visible if sending - if (!shouldSend) { - setIsRecording(false); - } else { - setIsProcessingSend(true); // Show processing state - } - - recorderRef.current.stopRecording(async () => { - // Safely get blob (recorder might be null by now) - const blob = recorderRef.current?.getBlob(); - - if (!blob || blob.size === 0) { - console.error("No audio recorded or empty recording"); - if (shouldSend) { - setAudioError("No audio was recorded. Please try again."); - setTimeout(() => setAudioError(null), 5000); - } - // Still need to clean up - if (streamRef.current) { - streamRef.current.getTracks().forEach((track) => track.stop()); - streamRef.current = null; - } - recorderRef.current = null; - setIsProcessingSend(false); - setIsRecording(false); - return; - } - - // Create a proper WAV file - const audioFile = new File([blob], "recording.wav", { - type: "audio/wav" - }); - - if (shouldSend) { - setIsTranscribing(true); - try { - const result = await os.transcribeAudio(audioFile, "whisper-large-v3"); - - // Set the transcribed text - const transcribedText = result.text.trim(); - - if (transcribedText) { - // Directly submit without updating the input field - const newValue = inputValue ? `${inputValue} ${transcribedText}` : transcribedText; - - if (newValue.trim()) { - // Wait for onSubmit to complete (in case it returns a Promise for navigation) - await onSubmit( - newValue.trim(), - messages.length === 0 ? systemPromptValue.trim() || undefined : undefined, - images, - uploadedDocument?.cleanedText, - uploadedDocument - ? { - filename: uploadedDocument.parsed.document.filename, - fullContent: uploadedDocument.parsed.document.text_content || "" - } - : undefined, - true // sentViaVoice flag - ); - - // Clear the input and other states - setInputValue(""); - imageUrls.forEach((url) => URL.revokeObjectURL(url)); - setImageUrls(new Map()); - setImages([]); - setUploadedDocument(null); - setDocumentError(null); - setImageError(null); - } - } - } catch (error) { - console.error("Transcription failed:", error); - setAudioError("Failed to transcribe audio. Please try again."); - // Clear error after 5 seconds - setTimeout(() => setAudioError(null), 5000); - } finally { - setIsTranscribing(false); - setIsProcessingSend(false); - setIsRecording(false); // Hide overlay after send is complete - } - } - - // Clean up - if (streamRef.current) { - streamRef.current.getTracks().forEach((track) => track.stop()); - streamRef.current = null; - } - recorderRef.current = null; - }); - } - }; - - const toggleRecording = () => { - if (isRecording) { - stopRecording(); - } else { - startRecording(); - } - }; - - const handleRecordingSend = () => { - stopRecording(true); - }; - - const handleRecordingCancel = () => { - if (recorderRef.current && isRecording) { - setIsRecording(false); // Hide overlay immediately - - recorderRef.current.stopRecording(() => { - // Clean up without transcribing - if (streamRef.current) { - streamRef.current.getTracks().forEach((track) => track.stop()); - streamRef.current = null; - } - recorderRef.current = null; - }); - } - }; - - const [isFocused, setIsFocused] = useState(false); - const inputRef = useRef(null); - const systemPromptRef = useRef(null); - const lastDraftRef = useRef(""); - const previousChatIdRef = useRef(undefined); - const currentInputRef = useRef(""); - - // Get the chatId from the current route state - const router = useRouter(); - const chatId = router.state.matches.find((m) => m.routeId === ChatRoute.id)?.params?.chatId as - | string - | undefined; - - const { data: freshBillingStatus } = useQuery({ - queryKey: ["billingStatus"], - queryFn: async () => { - const billingService = getBillingService(); - const status = await billingService.getBillingStatus(); - setBillingStatus(status); - return status; - } - }); - - // Use the centralized function for mobile detection directly - const isMobilePlatform = isMobile(); - - // Check if we're in Tauri environment on component mount - - // Check if user can use system prompts (paid users only - exclude free plans) - const canUseSystemPrompt = - freshBillingStatus && !freshBillingStatus.product_name?.toLowerCase().includes("free"); - - // Check if system prompt can be edited (only for new chats) - const canEditSystemPrompt = canUseSystemPrompt && messages.length === 0; - - // Check if user has access to Pro/Team/Max features (Pro, Max, or Team plan) - const hasProTeamAccess = - freshBillingStatus && - (freshBillingStatus.product_name?.toLowerCase().includes("pro") || - freshBillingStatus.product_name?.toLowerCase().includes("max") || - freshBillingStatus.product_name?.toLowerCase().includes("team")); - - // Check if user has access to Starter features (Starter plan and above) - const hasStarterAccess = - freshBillingStatus && - (freshBillingStatus.product_name?.toLowerCase().includes("starter") || - freshBillingStatus.product_name?.toLowerCase().includes("pro") || - freshBillingStatus.product_name?.toLowerCase().includes("max") || - freshBillingStatus.product_name?.toLowerCase().includes("team")); - - const canUseImages = hasStarterAccess; - const canUseVoice = hasProTeamAccess; - const canUseDocuments = hasProTeamAccess; - - const handleSubmit = (e?: React.FormEvent) => { - e?.preventDefault(); - - // Allow submission if there's text input, images, or a document - const hasContent = inputValue.trim() || images.length > 0 || uploadedDocument; - if (!hasContent || isSubmitDisabled) return; - - // Clear the drafts when submitting - if (chatId) { - try { - clearDraftMessage(chatId); - lastDraftRef.current = ""; - currentInputRef.current = ""; - } catch (error) { - console.error("Failed to clear draft messages:", error); - // Continue with submission even if draft clearing fails - } - } - - // Only pass system prompt if this is the first message - const isFirstMessage = messages.length === 0; - onSubmit( - inputValue.trim(), - isFirstMessage ? systemPromptValue.trim() || undefined : undefined, - images, - uploadedDocument?.cleanedText, // Now contains the full JSON with cleaned text_content - uploadedDocument - ? { - filename: uploadedDocument.parsed.document.filename, - fullContent: uploadedDocument.parsed.document.text_content || "" - } - : undefined - ); - setInputValue(""); - - // Clean up image URLs when clearing images - imageUrls.forEach((url) => URL.revokeObjectURL(url)); - setImageUrls(new Map()); - setImages([]); - - setUploadedDocument(null); - setDocumentError(null); - setImageError(null); - - // Re-focus input after submitting (desktop only) - if (!isMobilePlatform) { - setTimeout(() => { - inputRef.current?.focus(); - }, 0); - } - }; - - const handleKeyDown = (e: React.KeyboardEvent) => { - if (e.key === "Enter") { - if (isMobilePlatform || e.shiftKey || isStreaming) { - // On mobile, when Shift is pressed, or when streaming, allow newline - return; - } else if (isSubmitDisabled || !inputValue.trim()) { - // Prevent form submission when disabled or empty input - e.preventDefault(); - return; - } else { - // On desktop without Shift and not streaming, submit the form - e.preventDefault(); - handleSubmit(); - } - } - }; - - // Auto-resize effect for main input - useEffect(() => { - if (inputRef.current) { - inputRef.current.style.height = "auto"; - inputRef.current.style.height = `${inputRef.current.scrollHeight}px`; - } - }, [inputValue]); - - // Auto-resize effect for system prompt - useEffect(() => { - if (systemPromptRef.current) { - systemPromptRef.current.style.height = "auto"; - systemPromptRef.current.style.height = `${systemPromptRef.current.scrollHeight}px`; - } - }, [systemPromptValue]); - - // Debounce input for token calculations to avoid lag while typing - const debouncedInputValue = useDebounce(inputValue, 300); - - // Calculate token usage percentage - const totalTokens = useMemo( - () => calculateTotalTokens(messages, debouncedInputValue), - [messages, debouncedInputValue] - ); - const tokenLimit = getModelTokenLimit(model); - const tokenPercentage = (totalTokens / tokenLimit) * 100; - const isAt99Percent = tokenPercentage >= 99; - - // Update current input ref when input value changes - useEffect(() => { - currentInputRef.current = inputValue; - }, [inputValue]); - - // Handle draft loading and saving only on chat switches - useEffect(() => { - // 1. Save drafts from previous chat before switching - if (previousChatIdRef.current && previousChatIdRef.current !== chatId) { - const oldChatId = previousChatIdRef.current; - - // Save message draft - const currentInput = currentInputRef.current.trim(); - if (currentInput !== "") { - setDraftMessage(oldChatId, currentInput); - } else { - clearDraftMessage(oldChatId); - } - } - - // 2. Load drafts for new chat - if (chatId) { - try { - // Load message draft - const draft = draftMessages.get(chatId) || ""; - setInputValue(draft); - lastDraftRef.current = draft; - currentInputRef.current = draft; - - // Reset system prompt for new chat - setSystemPromptValue(""); - setIsSystemPromptExpanded(false); - } catch (error) { - console.error("Failed to load draft messages:", error); - setInputValue(""); - setSystemPromptValue(""); - lastDraftRef.current = ""; - currentInputRef.current = ""; - } - } - - // 3. Update the previous chat ID - previousChatIdRef.current = chatId; - }, [chatId, draftMessages, setDraftMessage, clearDraftMessage]); - - // Determine when the submit button should be disabled - const isSubmitDisabled = - (freshBillingStatus !== undefined && - (!freshBillingStatus.can_chat || - (freshBillingStatus.chats_remaining !== null && - freshBillingStatus.chats_remaining <= 0))) || - isStreaming || - isAt99Percent; - - // Disable the input box only when the user is out of chats or when streaming - const isInputDisabled = - (freshBillingStatus !== undefined && - (!freshBillingStatus.can_chat || - (freshBillingStatus.chats_remaining !== null && - freshBillingStatus.chats_remaining <= 0))) || - isStreaming; - - // Auto-focus effect - runs on mount, when chat ID changes, and after streaming completes - useEffect(() => { - // Skip auto-focus on mobile to prevent keyboard popup - if (isMobilePlatform) { - return; - } - - // Skip if user is already focused on an input elsewhere - if (document.activeElement?.matches("input, textarea")) { - return; - } - - // Short delay to ensure DOM is ready - const timer = setTimeout(() => { - // Only focus if input isn't disabled - if (inputRef.current && !isInputDisabled) { - inputRef.current.focus(); - } - }, 100); - - return () => clearTimeout(timer); - }, [chatId, isStreaming, isInputDisabled, isMobilePlatform]); // Re-run when chat ID changes, streaming completes, or input state changes - - // Cleanup effect for object URLs - useEffect(() => { - return () => { - // Revoke all object URLs when component unmounts - imageUrls.forEach((url) => URL.revokeObjectURL(url)); - }; - }, [imageUrls]); - - // Cleanup audio recording on unmount - useEffect(() => { - return () => { - // Stop any active recording and release microphone - if (streamRef.current) { - streamRef.current.getTracks().forEach((track) => track.stop()); - streamRef.current = null; - } - // Clean up recorder - if (recorderRef.current && isRecording) { - recorderRef.current.stopRecording(() => { - recorderRef.current = null; - }); - } - }; - }, []); - - // No longer need token calculation or plan type check since we removed the hard limit - // Just keeping the TokenWarning component which handles its own calculations - const placeholderText = (() => { - if (isAt99Percent) { - return "Chat is too long to continue."; - } - if (billingStatus === null || freshBillingStatus === undefined) - return "Type your message here..."; - if (freshBillingStatus.can_chat === false) { - return "You've used up all your messages. Upgrade to continue."; - } - return "Type your message here..."; - })(); - - return ( -
- {isRecording && ( - - )} -
- {/* Simple System Prompt Section - just a gear button and input when expanded */} -
-
- -
- - {isSystemPromptExpanded && ( -