diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000..23e05b0 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,5 @@ +{ + "skills": { + "paths": [".claude/skills"] + } +} diff --git a/.claude/skills/commit-msg.md b/.claude/skills/commit-msg.md new file mode 100644 index 0000000..0f5bf9a --- /dev/null +++ b/.claude/skills/commit-msg.md @@ -0,0 +1,141 @@ +# Commit Message Protocol + +Drafts a correctly structured commit message for this project. Invoke after completing +a unit of work and before running `git commit`. + +Triggered by: `/commit-msg` + +--- + +## Message structure + +### Subject line +``` +type(scope): imperative summary in present tense +``` +- Max 72 characters +- Imperative mood: "add", "fix", "update" — not "added", "fixing", "updates" +- Types: `feat`, `fix`, `chore`, `docs`, `refactor`, `review` +- Scope: the subsystem, component, or file area affected (e.g. `ptt`, `panel`, `settings`) +- No trailing period +- **Never** include `Co-Authored-By` lines + +### Body format +- Leave one blank line between subject and body +- Group related changes under a plain-text **section label** followed by a colon + (e.g. `Cursor companion:`, `Settings store:`, `Voice catalogue:`) +- Each point starts with `- ` and uses an em dash `—` to separate the change + from its rationale or mechanism +- Explain the **why** and **how**, not just the what +- Wrap all lines at ~72 characters +- Separate section groups with a blank line +- No bullet sub-nesting — keep it flat + +### What belongs in the body +- Non-obvious decisions or constraints +- Failure modes the change addresses +- Subtle invariants a future reader would need to understand the diff +- Anything that would look arbitrary without context + +### What does NOT belong +- Restatement of what the diff already shows +- References to the current task, issue number, or caller + ("added for the X flow", "handles the case from issue #123") — these rot +- `Co-Authored-By` lines — ever + +--- + +## Step 1 — Gather context + +Run the following to understand what changed: +``` +git diff --cached --stat +git diff --cached +git status +``` + +If nothing is staged, note it and ask the developer which files to include before drafting. + +--- + +## Step 2 — Draft the message + +Using the structure above, produce a complete commit message. Show it in a code block +so the developer can copy it directly. + +### Choosing the type +| Situation | Type | +|-----------|------| +| New user-facing capability | `feat` | +| Bug fix or regression | `fix` | +| Build, tooling, deps, version bumps | `chore` | +| Documentation only | `docs` | +| Internal restructure, no behaviour change | `refactor` | +| Code review follow-up changes | `review` | + +### Choosing the scope +Use the primary subsystem or component name affected. Examples from this codebase: +`ptt`, `panel`, `overlay`, `settings`, `voice`, `chats`, `cursor`, `ipc`, `types`, `build` + +If multiple unrelated subsystems changed, consider whether this should be split into +multiple commits. If the changes are coherent, use the broadest accurate scope or omit +scope parentheses. + +--- + +## Step 3 — Validate before presenting + +Self-check before showing the draft: +- [ ] Subject ≤ 72 characters +- [ ] Present tense imperative subject +- [ ] No `Co-Authored-By` line present +- [ ] Body lines wrap at ~72 characters +- [ ] Rationale present for any non-obvious change (em dash format) +- [ ] No task/issue references in the body +- [ ] Section labels are plain text (no `##` markdown headers) + +--- + +## Step 4 — Present and iterate + +Show the draft. Ask: "Does this capture everything, or should any section be adjusted?" + +If the developer requests changes, revise and re-validate before presenting again. + +--- + +## Example (from this project's git history) + +``` +feat(sprint/v1.3.0): cursor companion, voice catalogue, chat export + +Cursor companion: +- Add CursorCompanion.tsx — floats near the system cursor so the + overlay stays visible without covering active UI elements. +- Wire IPC channel for position updates — renderer polls on a 100ms + interval to keep latency imperceptible without saturating the main + process. + +Voice catalogue: +- Extend VoiceTab with per-voice preview — lets users audition a voice + before committing; avoids settings round-trips. +- Add voice metadata to shared types — ElevenLabs model ID and category + fields needed downstream for the API call builder. + +Chat export: +- Add export-to-clipboard action in ChatsTab — markdown format chosen + for portability; plain text would lose structural context. +- Persist export timestamp in settings-store — prevents duplicate + exports on accidental re-trigger. +``` + +--- + +## Hard constraints (always enforce) + +- **Never include `Co-Authored-By`** — this project explicitly prohibits it +- **Never generate a subject line over 72 characters** — truncate scope or + rephrase rather than exceed the limit +- **Never use markdown in the commit body** — no `##`, `**bold**`, or backtick + code fences; plain text only +- **Never reference issue numbers, PR numbers, or task IDs** in the body diff --git a/.claude/skills/merge-flow.md b/.claude/skills/merge-flow.md new file mode 100644 index 0000000..d8c00de --- /dev/null +++ b/.claude/skills/merge-flow.md @@ -0,0 +1,99 @@ +# Merge Flow Protocol + +Enforces the project's GitFlow PR routing rules. Invoke before opening any pull request +or when unsure where a branch should target. + +Triggered by: `/merge-flow` + +--- + +## Routing rules (canonical) + +``` +feature/* → dev (integration; all features stabilise here) +fix/* → master (hotfix) + dev (backport) +sprint/vX → dev (then dev → master via release/*) +release/vX → master (tagged) + dev +dev → master (via release/* only — never direct) +``` + +**master is a production release trigger, not an integration branch.** +Every merge to master = a versioned installer build fires in CI. +The project head gates master exclusively via PR review. + +--- + +## Step 1 — Identify current branch + +Run: `git rev-parse --abbrev-ref HEAD` + +Classify the branch by prefix: + +| Branch pattern | Correct PR target | Notes | +|----------------|-------------------|-------| +| `feature/*` | `dev` | Standard feature work | +| `sprint/vX.X.X`| `dev` | Port/selective rebase off master | +| `fix/*` | `master` + `dev` | Hotfix: merge to master (tagged), backport to dev | +| `release/vX.X.X`| `master` + `dev` | RC branch: merge to master (tagged), sync dev | +| `dev` | Never directly | Only via a `release/*` branch | +| `master` | Never | Production; no outbound PRs | +| anything else | Warn + confirm | Non-standard name — clarify intent | + +--- + +## Step 2 — Validate the target + +If the developer states or implies a PR target, check it against the table above. + +### If target is correct: +Confirm: "Branch `[name]` → `[target]` is correct per GitFlow rules. Proceed." + +### If target is `master` and branch is NOT `fix/*` or `release/*`: +Block and warn: +> "Direct merge to master is not allowed for `[branch-type]` branches. +> master is a production release trigger gated by the project head. +> Correct target for `[branch]` is `dev`. +> Open the PR against `dev` instead." + +### If target is `dev` and branch is `fix/*`: +Warn: +> "Hotfixes must merge to `master` first (with a version tag), then backport to `dev`. +> If you merge only to `dev`, the fix will not ship until the next release cycle." + +--- + +## Step 3 — Pre-PR checklist + +Before the developer opens the PR, run through: + +1. **Branch is aligned** — suggest running `/sprint-align` if branch is `sprint/vX.X.X` +2. **No direct commits to master or dev** — confirm work is on a proper branch +3. **Version bump** — for `release/*` branches only: confirm `package.json` version + is updated before merging to master +4. **Changelog** — `.changelog/` is gitignored and local only; do not attempt + `git add .changelog/` +5. **Commit messages** — suggest `/commit-msg` if any commits on this branch + need to be cleaned up before the PR + +Report checklist status: which items pass, which need attention. + +--- + +## Step 4 — PR description guidance + +Remind the developer: +- PR title should follow the same conventional prefix as commits: `feat(scope): ...` +- PR body should summarise the WHY, not just the what +- For `release/*` → `master` PRs: include the version tag that will be applied post-merge +- The project head reviews all PRs into master; do not merge without approval + +--- + +## Hard constraints (always enforce) + +- **Never create, merge, or push PRs to master autonomously** +- **Never suggest bypassing the project head's review gate** +- **Never commit directly to `master` or `dev`** — always branch +- If asked to "just push to master directly", refuse and explain the release trigger risk: + > "Every master merge fires a CI build and ships a versioned installer. + > Unreviewed code reaching master is a production incident, not a shortcut." diff --git a/.claude/skills/new-branch.md b/.claude/skills/new-branch.md new file mode 100644 index 0000000..7eb1c39 --- /dev/null +++ b/.claude/skills/new-branch.md @@ -0,0 +1,110 @@ +# New Branch Protocol + +Creates a correctly named, correctly based branch for any GitFlow work type. +Invoke before starting any new unit of work. + +Triggered by: `/new-branch` + +--- + +## Branch types, bases, and naming + +| Work type | Branch pattern | Base branch | Example | +|-----------|---------------|-------------|---------| +| New feature or experiment | `feature/` | `dev` | `feature/voice-preview` | +| Bug fix on live production | `fix/` | `master` | `fix/ptt-mic-stuck` | +| Release candidate / QA | `release/vX.X.X` | `dev` | `release/v1.2.0` | +| Upstream redesign port | `sprint/vX.X.X` | `master` | `sprint/v1.2.0` | + +**Rules:** +- `feature/*` and `release/*` always base off `dev` — never master +- `fix/*` always base off `master` — hotfixes must ship without carrying unreleased dev work +- `sprint/*` always base off `master` — selective port after upstream redesign +- Descriptions use kebab-case, no version numbers in feature names +- Keep names short and unambiguous (3–5 words max) + +--- + +## Step 1 — Clarify intent + +Ask the developer: +> "What are you building? (feature / hotfix / release candidate / sprint port)" + +If the developer describes the work without naming a type, infer from context: +- "add X feature" → `feature/*` off `dev` +- "fix a bug in prod" → `fix/*` off `master` +- "start the next sprint" → `sprint/vX.X.X` off `master` +- "prep a release" → `release/vX.X.X` off `dev` + +Confirm the classification before proceeding: +> "This sounds like a `feature/*` branch. Correct?" + +--- + +## Step 2 — Confirm base branch is current + +Run: `git fetch origin --quiet --prune` + +Then check the intended base is up to date locally: +``` +git log --oneline HEAD..origin/ | head -5 +``` + +If the local base is behind origin, warn: +> "Your local `[base]` is behind origin by [N] commits. Update it first: +> `git checkout [base] && git pull --ff-only`" + +Do not proceed until the developer confirms the base is current. + +--- + +## Step 3 — Suggest branch name + +Propose a name following the pattern. Use the developer's description, kebab-cased: + +> "Suggested branch name: `feature/voice-preview` +> Base: `dev` +> Command: `git checkout -b feature/voice-preview dev`" + +Ask: "Does this name work, or would you like to adjust it?" + +--- + +## Step 4 — Output the exact command + +Once name and base are confirmed, output the single command to run: + +``` +git checkout -b +``` + +**Do not run this command.** Present it for the developer to execute. + +After the developer confirms the branch is created, suggest running `/sprint-align` +for `sprint/*` branches, or remind them that `/merge-flow` will validate the PR +target when they're ready to open a PR. + +--- + +## Step 5 — Post-creation reminder + +Once the branch exists, state: +> "Branch `[name]` created off `[base]`. When you're ready: +> - `/commit-msg` — draft a structured commit message +> - `/merge-flow` — confirm your PR target before opening a PR" + +For `sprint/*` branches specifically, add: +> "Run `/sprint-align` now to confirm this branch is current with master." + +--- + +## Hard constraints (always enforce) + +- **Never base a `feature/*` branch off `master`** — unreleased features + must not bypass the `dev` integration gate +- **Never base a `fix/*` branch off `dev`** — hotfixes must not carry + unreleased dev work into production +- **Never create branches directly on `master` or `dev`** — those branches + receive merges, they do not originate work +- **Never run `git checkout -b` autonomously** — always present the command + and let the developer execute it diff --git a/.claude/skills/sprint-align.md b/.claude/skills/sprint-align.md new file mode 100644 index 0000000..9710781 --- /dev/null +++ b/.claude/skills/sprint-align.md @@ -0,0 +1,124 @@ +# Sprint Alignment Protocol + +Invoke this skill at the START of every sprint branch before writing any code. +Triggered by: `/sprint-align` + +--- + +## Step 1 — Identify branch context + +Run: `git rev-parse --abbrev-ref HEAD` + +If the branch name does not match the pattern `sprint/vX.X.X`, display a warning: +> "Current branch does not follow the sprint/vX.X.X naming convention. Confirm this is +> the intended sprint branch before proceeding." + +Ask the developer to confirm or correct before continuing. + +--- + +## Step 2 — Run the alignment script + +Execute: `bash scripts/sprint-start.sh` + +Run this from the repository root. Capture the full output and exit code. + +If the script fails to run (bash not found, wrong working directory, permission error): +- Report the exact error +- Tell the developer to run manually: `bash scripts/sprint-start.sh` from the repo root +- Do not proceed until the output is available + +--- + +## Step 3 — Interpret output + +### Exit code 0 — ALIGNED + +Report to the developer: +> "Sprint branch is aligned with master. No unmerged master commits detected. +> Merge base: [SHA from output]. Sprint-only commits: [count]. Safe to begin work." + +Proceed to Step 5. + +### Exit code 1 — DIVERGED + +Summarise in natural language (do NOT paste raw script output verbatim): +- How many commits master has that this sprint branch does not +- The commit list from the `COMMITS ON master NOT IN SPRINT` section +- Which files changed, from the `FILE CHANGES` section + +Example summary: +> "Master has 3 commits this sprint branch does not include: +> - abc1234 fix(ptt): mic stuck on after release +> - def5678 feat(panel): redesign layout +> - ghi9012 chore: update dependencies +> +> Affected files: src/renderer/components/OverlayApp.tsx (+12/-5), +> src/main/index.ts (+8/-2), package.json (+1/-1)" + +Then proceed to Step 4. + +--- + +## Step 4 — Decision prompt (DIVERGED only) + +Present the developer with four options: + +> "Master has [N] commits this sprint branch does not include. +> How would you like to proceed? +> +> [R] Rebase — `git rebase master` (recommended: linear history, clean sprint base) +> [M] Merge — `git merge master` (preserves branch topology) +> [S] Skip — proceed without aligning (not recommended; state your reason) +> [A] Abort — I need to review the diff manually first" + +### If [R] or [M]: +Provide the exact command but **do not execute it**. Wait for the developer to confirm they +have run it, then re-run `bash scripts/sprint-start.sh` to verify alignment (Step 2). +Do not proceed to Step 5 until exit code is 0. + +### If [S] (Skip): +Record the developer's stated reason in session context. Display a persistent warning +at the top of all subsequent responses this session: +> "[WARN] Sprint branch skipped master alignment — [stated reason]" + +### If [A] (Abort): +Suggest the developer inspect divergence manually: +``` +git log --oneline master ^HEAD | head -20 +git diff --stat master...HEAD +``` +Stop the skill. Await further instruction. + +--- + +## Step 5 — Session context memo + +After alignment is confirmed (exit code 0), record in working memory: +- Sprint branch name +- Master HEAD SHA at time of check (`git rev-parse master`) +- Date/time of alignment check + +Reference this context if the developer later asks "are we aligned with master?" without +re-running the script. + +--- + +## Hard constraints (always enforce) + +- **Never push to master** under any circumstances +- **Never execute `git rebase` or `git merge` on the developer's behalf** — instruct and wait +- **Never skip the fetch step** inside the script — stale local refs cause false ALIGNED results +- If mid-sprint the developer asks to re-run alignment, run it again; the script handles + non-zero sprint-only commit counts correctly and will still report ALIGNED if master + hasn't moved since the last check + +--- + +## GitFlow context (why this matters) + +Per this project's branching strategy, `sprint/vX.X.X` branches are selective ports off +`master` after upstream redesigns. They must start from master HEAD. A diverged sprint +branch means the project head has continued work on master that this branch was not +rebased onto — discovered late, this causes conflict-heavy merges across already-modified +files. This skill catches that at the session start, before the first line of code is written. diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..e4e10b7 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# Normalize line endings +* text=auto + +# Force LF for shell scripts — CRLF causes \r: command not found in bash +*.sh text eol=lf + +# Force LF for markdown skill files read by Claude Code +.claude/skills/*.md text eol=lf diff --git a/.gitignore b/.gitignore index 1c2ffcf..b86e31d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,9 @@ release/ .DS_Store Thumbs.db *.log + +# Local-only files — never commit +CLAUDE.md +.changelog/ +run.bat +setup.bat diff --git a/package.json b/package.json index 2e296e2..c8c84c2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flicky", - "version": "1.0.3", + "version": "1.2.0", "description": "Cross-platform AI companion — voice-driven, screen-aware assistant with a pointing cursor overlay", "main": "dist/main/main/index.js", "author": { diff --git a/scripts/sprint-start.sh b/scripts/sprint-start.sh new file mode 100644 index 0000000..50ccf81 --- /dev/null +++ b/scripts/sprint-start.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# sprint-start.sh — Pre-sprint alignment check +# Usage: bash scripts/sprint-start.sh [sprint-branch] [base-branch] +# Defaults: current branch vs master +set -euo pipefail + +SPRINT_BRANCH="${1:-$(git rev-parse --abbrev-ref HEAD)}" +BASE_BRANCH="${2:-master}" + +echo "Sprint branch : $SPRINT_BRANCH" +echo "Base branch : $BASE_BRANCH" +echo "" + +# Fetch to ensure base ref reflects remote state +git fetch origin "$BASE_BRANCH" --quiet --prune 2>/dev/null || { + echo "[WARN] Could not fetch origin/$BASE_BRANCH — comparing against local ref only." +} + +AHEAD_COUNT=$(git rev-list --count "$SPRINT_BRANCH".."$BASE_BRANCH") +SPRINT_ONLY=$(git rev-list --count "$BASE_BRANCH".."$SPRINT_BRANCH") +MERGE_BASE=$(git merge-base "$BASE_BRANCH" "$SPRINT_BRANCH") + +echo "=== STATUS ===" +if [ "$AHEAD_COUNT" -eq 0 ]; then + echo "ALIGNED" + echo "Sprint branch is up to date with $BASE_BRANCH." + echo "Merge base : $MERGE_BASE" + echo "Sprint-only commits: $SPRINT_ONLY" + exit 0 +else + echo "DIVERGED" + echo "$BASE_BRANCH has $AHEAD_COUNT commit(s) not present in this sprint branch." + echo "Sprint-only commits: $SPRINT_ONLY" + echo "Merge base : $MERGE_BASE" + echo "" + echo "=== COMMITS ON $BASE_BRANCH NOT IN SPRINT ===" + git log --oneline "$SPRINT_BRANCH".."$BASE_BRANCH" + echo "" + echo "=== FILE CHANGES ($BASE_BRANCH vs SPRINT) ===" + git diff --stat "$SPRINT_BRANCH"..."$BASE_BRANCH" + exit 1 +fi diff --git a/src/main/companion-manager.ts b/src/main/companion-manager.ts index 92b5d60..7c8be60 100644 --- a/src/main/companion-manager.ts +++ b/src/main/companion-manager.ts @@ -1,6 +1,7 @@ import { app, systemPreferences } from 'electron'; import { ClaudeAPI } from './services/claude-api'; import { OpenAIAPI } from './services/openai-api'; +import { OllamaAPI } from './services/ollama-api'; import { ElevenLabsTTS } from './services/elevenlabs-tts'; import { createTranscriptionProvider, type TranscriptionProvider } from './services/transcription'; import { captureAllDisplays } from './services/screen-capture'; @@ -50,6 +51,7 @@ export class CompanionManager { private claude: ClaudeAPI; private openai: OpenAIAPI; + private ollama: OllamaAPI; private tts: ElevenLabsTTS; private context: ContextManager; private transcriptionProvider: TranscriptionProvider | null = null; @@ -77,6 +79,7 @@ export class CompanionManager { this.callbacks = callbacks; this.claude = new ClaudeAPI(); this.openai = new OpenAIAPI(); + this.ollama = new OllamaAPI(); this.tts = new ElevenLabsTTS(); this.context = new ContextManager(); @@ -480,6 +483,34 @@ export class CompanionManager { mindOptions, mindCallbacks, ); + } else if (settings.mindProvider === 'ollama') { + const connections = (settings.localConnections ?? []).filter((c) => c.enabled); + const conn = connections[0]; + if (!conn) { + mindCallbacks.onError(new Error('No enabled local connection. Add one in Mind → Local.')); + return; + } + const bearerToken = keyStore.getApiKey(`local_${conn.id}`) ?? undefined; + let model: string; + if (conn.activeModelId) { + model = conn.activeModelId; + } else if (conn.modelIds.length > 0) { + model = conn.modelIds[0]; + } else { + const discovered = await this.ollama.getModels(conn.url, bearerToken); + model = discovered[0] ?? 'llama3'; + } + const fullModelId = conn.prefixId ? `${conn.prefixId}${model}` : model; + await this.ollama.streamChat( + result.text, + this.lastScreenshots, + this.context.getMessagesForSend(), + fullModelId, + { replyTone: mindOptions.replyTone, signal: mindOptions.signal }, + mindCallbacks, + conn.url, + bearerToken, + ); } else { await this.claude.streamChat( result.text, diff --git a/src/main/index.ts b/src/main/index.ts index efeb170..28ec65f 100644 --- a/src/main/index.ts +++ b/src/main/index.ts @@ -2,9 +2,13 @@ import { app, BrowserWindow, Tray, Menu, globalShortcut, screen, ipcMain, shell, import path from 'path'; import { CompanionManager } from './companion-manager'; import { createPanelWindow, createOverlayWindow, createStreamWindow } from './windows'; -import { IPC, type StreamVisibility, type StreamWindowBounds } from '../shared/types'; +import { IPC, type StreamVisibility, type StreamWindowBounds, type LocalConnection } from '../shared/types'; import { AUDIO_IPC } from './services/audio-capture'; import * as chatHistory from './services/chat-history-store'; +import * as settingsStore from './services/settings-store'; +import { setApiKey, getApiKey, deleteApiKey } from './services/key-store'; +import { OllamaAPI } from './services/ollama-api'; +import { randomUUID } from 'crypto'; // Prevent multiple instances const gotLock = app.requestSingleInstanceLock(); @@ -281,6 +285,82 @@ app.whenReady().then(() => { ipcMain.on(IPC.DELETE_API_KEY, (_e, name) => companion.deleteApiKey(name)); ipcMain.handle(IPC.GET_API_KEY_STATUS, () => companion.getApiKeyStatus()); + // Local Connection Management + const ollamaAPI = new OllamaAPI(); + + function emitLocalConnections(): void { + const settings = companion.getSettings(); + sendToPanel(IPC.SETTINGS_CHANGED, settings); + } + + ipcMain.handle(IPC.GET_LOCAL_CONNECTIONS, () => { + return settingsStore.get('localConnections') ?? []; + }); + + ipcMain.handle(IPC.ADD_LOCAL_CONNECTION, (_e, conn: Omit) => { + const connections = settingsStore.get('localConnections') ?? []; + const newConn: LocalConnection = { ...conn, id: randomUUID() }; + settingsStore.set('localConnections', [...connections, newConn]); + emitLocalConnections(); + return newConn; + }); + + ipcMain.handle(IPC.UPDATE_LOCAL_CONNECTION, (_e, id: string, patch: Partial) => { + const connections = settingsStore.get('localConnections') ?? []; + const updated = connections.map((c) => (c.id === id ? { ...c, ...patch, id } : c)); + settingsStore.set('localConnections', updated); + emitLocalConnections(); + }); + + ipcMain.handle(IPC.DELETE_LOCAL_CONNECTION, (_e, id: string) => { + const connections = settingsStore.get('localConnections') ?? []; + settingsStore.set('localConnections', connections.filter((c) => c.id !== id)); + try { deleteApiKey(`local_${id}`); } catch { /* key may not exist */ } + emitLocalConnections(); + }); + + ipcMain.handle(IPC.TEST_LOCAL_CONNECTION, (_e, url: string, bearerToken?: string) => { + return ollamaAPI.testConnection(url, bearerToken); + }); + + ipcMain.handle(IPC.SET_LOCAL_CONNECTION_KEY, (_e, id: string, token: string) => { + setApiKey(`local_${id}`, token); + }); + + ipcMain.handle(IPC.DELETE_LOCAL_CONNECTION_KEY, (_e, id: string) => { + try { deleteApiKey(`local_${id}`); } catch { /* key may not exist */ } + }); + + // Ollama Model Management + ipcMain.handle(IPC.GET_OLLAMA_MODELS, (_e, url: string, bearerToken?: string) => { + return ollamaAPI.getModelDetails(url, bearerToken); + }); + + ipcMain.on(IPC.PULL_OLLAMA_MODEL, (event, url: string, modelTag: string, bearerToken?: string) => { + const controller = new AbortController(); + ollamaAPI.pullModel( + url, + modelTag, + bearerToken, + (progress) => { event.sender.send(IPC.OLLAMA_PULL_PROGRESS, progress); }, + controller.signal, + ).then(() => { + event.sender.send(IPC.OLLAMA_PULL_COMPLETE, { model: modelTag }); + }).catch((err: Error) => { + if (err.name !== 'AbortError') { + event.sender.send(IPC.OLLAMA_PULL_ERROR, { error: err.message }); + } + }); + }); + + ipcMain.handle(IPC.DELETE_OLLAMA_MODEL, (_e, url: string, modelName: string, bearerToken?: string) => { + return ollamaAPI.deleteModel(url, modelName, bearerToken); + }); + + ipcMain.handle(IPC.CREATE_OLLAMA_MODEL, (_e, url: string, modelTag: string, modelfileJson: string, bearerToken?: string) => { + return ollamaAPI.createModel(url, modelTag, modelfileJson, bearerToken); + }); + // Audio capture: relay chunks from overlay renderer to companion ipcMain.on(AUDIO_IPC.AUDIO_CHUNK, (_e, buffer: Buffer) => { companion.handleAudioChunk(buffer); diff --git a/src/main/services/key-store.ts b/src/main/services/key-store.ts index a232e88..a3eac11 100644 --- a/src/main/services/key-store.ts +++ b/src/main/services/key-store.ts @@ -18,7 +18,8 @@ import { writeFileAtomic } from './fs-util'; */ const KEY_NAMES = ['anthropic', 'openai', 'elevenlabs', 'groq'] as const; -export type ApiKeyName = (typeof KEY_NAMES)[number]; +export type NamedApiKey = (typeof KEY_NAMES)[number]; +export type ApiKeyName = NamedApiKey | string; interface KeyFile { encryptedKeys: Record; // base64-encoded ciphertext @@ -83,7 +84,7 @@ export function deleteApiKey(name: ApiKeyName): void { writeKeyFile(data); } -export function getKeyStatus(): Record { +export function getKeyStatus(): Record { return { anthropic: hasApiKey('anthropic'), openai: hasApiKey('openai'), diff --git a/src/main/services/ollama-api.ts b/src/main/services/ollama-api.ts new file mode 100644 index 0000000..30b2cec --- /dev/null +++ b/src/main/services/ollama-api.ts @@ -0,0 +1,356 @@ +import type { + ConversationTurn, + ScreenCapture, + ReplyTone, + OllamaModelInfo, + OllamaPullProgress, +} from '../../shared/types'; +import { buildSystemPrompt } from './prompts'; + +const CONNECTION_TIMEOUT_MS = 3_000; + +// Model families that support vision/multimodal input. +const VISION_FAMILIES = [ + 'llava', 'bakllava', 'moondream', 'cogvlm', 'minicpm-v', + 'llava-llama3', 'llava-phi3', 'granite3.2-vision', + 'qwen2-vl', 'qwen2.5-vl', 'qwen3-vl', + 'llava-v1.6', 'llava-v1.5', + 'gemma3', 'gemma4', 'mistral-small3.1', 'devstral', +]; + +export function isVisionModel(modelName: string): boolean { + const lower = modelName.toLowerCase(); + return VISION_FAMILIES.some((f) => lower.includes(f)); +} + +export interface OllamaStreamCallbacks { + onChunk: (text: string) => void; + onComplete: (fullText: string, usage?: { inputTokens: number; outputTokens: number }) => void; + onError: (error: Error) => void; +} + +export interface OllamaChatOptions { + replyTone: ReplyTone; + signal?: AbortSignal; +} + +export interface OllamaTestResult { + ok: boolean; + latencyMs: number; + modelCount?: number; + error?: string; +} + +function authHeaders(bearerToken?: string): Record { + if (bearerToken) return { Authorization: `Bearer ${bearerToken}` }; + return {}; +} + +// Strip trailing /v1 so callers can safely append /v1/... without doubling. +// e.g. https://api.x.ai/v1 → https://api.x.ai +// http://localhost:11434 → http://localhost:11434 +function normalizeBase(url: string): string { + return url.replace(/\/v1\/?$/, '').replace(/\/$/, ''); +} + +async function timedFetch( + url: string, + init: RequestInit, + timeoutMs: number, +): Promise { + const controller = new AbortController(); + const timer = setTimeout(() => controller.abort(), timeoutMs); + const signal = init.signal + ? AbortSignal.any([init.signal as AbortSignal, controller.signal]) + : controller.signal; + try { + const res = await fetch(url, { ...init, signal }); + clearTimeout(timer); + return res; + } catch (err) { + clearTimeout(timer); + throw err; + } +} + +export class OllamaAPI { + // ── Connection health ─────────────────────────────────────────────── + + async testConnection(url: string, bearerToken?: string): Promise { + const base = normalizeBase(url); + const start = Date.now(); + try { + // Try Ollama-native endpoint first; fall back to OpenAI-compat for external providers. + let response = await timedFetch( + `${base}/api/tags`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + if (response.status === 404) { + response = await timedFetch( + `${base}/v1/models`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + } + const latencyMs = Date.now() - start; + if (!response.ok) return { ok: false, latencyMs, error: `HTTP ${response.status}` }; + const data = await response.json() as { + models?: OllamaModelInfo[]; + data?: { id: string }[]; + }; + const modelCount = data.models?.length ?? data.data?.length ?? 0; + return { ok: true, latencyMs, modelCount }; + } catch (err) { + const latencyMs = Date.now() - start; + const msg = err instanceof Error ? err.message : String(err); + return { ok: false, latencyMs, error: msg.includes('abort') ? 'Connection timed out' : msg }; + } + } + + // ── Model listing ─────────────────────────────────────────────────── + + async getModels(url: string, bearerToken?: string): Promise { + const base = normalizeBase(url); + try { + let response = await timedFetch( + `${base}/api/tags`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + if (response.status === 404) { + response = await timedFetch( + `${base}/v1/models`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + if (!response.ok) return []; + const data = await response.json() as { data?: { id: string }[] }; + return (data.data ?? []).map((m) => m.id); + } + if (!response.ok) return []; + const data = await response.json() as { models?: OllamaModelInfo[] }; + return (data.models ?? []).map((m) => m.name); + } catch { + return []; + } + } + + async getModelDetails(url: string, bearerToken?: string): Promise { + const base = normalizeBase(url); + try { + let response = await timedFetch( + `${base}/api/tags`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + if (response.status === 404) { + response = await timedFetch( + `${base}/v1/models`, + { headers: authHeaders(bearerToken) }, + CONNECTION_TIMEOUT_MS, + ); + if (!response.ok) return []; + const data = await response.json() as { data?: { id: string }[] }; + return (data.data ?? []).map((m) => ({ name: m.id })); + } + if (!response.ok) return []; + const data = await response.json() as { models?: OllamaModelInfo[] }; + return data.models ?? []; + } catch { + return []; + } + } + + // ── Model pull (streaming NDJSON) ─────────────────────────────────── + + async pullModel( + url: string, + modelTag: string, + bearerToken: string | undefined, + onProgress: (p: OllamaPullProgress) => void, + signal?: AbortSignal, + ): Promise { + const response = await fetch(`${url}/api/pull`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', ...authHeaders(bearerToken) }, + body: JSON.stringify({ name: modelTag, stream: true }), + signal, + }); + if (!response.ok) { + const errText = await response.text(); + throw new Error(`Pull failed ${response.status}: ${errText}`); + } + const reader = response.body?.getReader(); + if (!reader) throw new Error('No response body'); + const decoder = new TextDecoder(); + let buffer = ''; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop() ?? ''; + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed) continue; + try { + const event = JSON.parse(trimmed) as OllamaPullProgress; + onProgress(event); + } catch { /* skip malformed */ } + } + } + } + + // ── Model delete ──────────────────────────────────────────────────── + + async deleteModel(url: string, modelName: string, bearerToken?: string): Promise { + const response = await timedFetch( + `${url}/api/delete`, + { + method: 'DELETE', + headers: { 'Content-Type': 'application/json', ...authHeaders(bearerToken) }, + body: JSON.stringify({ name: modelName }), + }, + CONNECTION_TIMEOUT_MS, + ); + if (!response.ok) { + const errText = await response.text(); + throw new Error(`Delete failed ${response.status}: ${errText}`); + } + } + + // ── Model create (from modelfile JSON) ────────────────────────────── + + async createModel( + url: string, + modelTag: string, + modelfileJson: string, + bearerToken?: string, + ): Promise { + let body: Record; + try { + body = JSON.parse(modelfileJson) as Record; + body.model = modelTag; + } catch { + throw new Error('Invalid JSON in modelfile field.'); + } + const response = await timedFetch( + `${url}/api/create`, + { + method: 'POST', + headers: { 'Content-Type': 'application/json', ...authHeaders(bearerToken) }, + body: JSON.stringify(body), + }, + 30_000, + ); + if (!response.ok) { + const errText = await response.text(); + throw new Error(`Create failed ${response.status}: ${errText}`); + } + } + + // ── Chat (inference) ──────────────────────────────────────────────── + + async streamChat( + prompt: string, + screenshots: ScreenCapture[], + history: ConversationTurn[], + model: string, + options: OllamaChatOptions, + callbacks: OllamaStreamCallbacks, + baseUrl: string, + bearerToken?: string, + ): Promise { + const systemPrompt = buildSystemPrompt(options.replyTone, { hasWebSearch: false }); + const vision = isVisionModel(model); + + const messages: Array<{ role: string; content: unknown }> = [ + { role: 'system', content: systemPrompt }, + ]; + + for (const turn of history) { + messages.push({ role: turn.role, content: turn.content }); + } + + // Only send image content for vision-capable models. + if (vision && screenshots.length > 0) { + const userContent: Array> = []; + for (let i = 0; i < screenshots.length; i++) { + const sc = screenshots[i]; + userContent.push({ + type: 'text', + text: `[screen${i}] ${sc.imageWidth}x${sc.imageHeight}px.${sc.isCursorScreen ? ' (active screen)' : ''}`, + }); + userContent.push({ + type: 'image_url', + image_url: { url: `data:image/jpeg;base64,${sc.dataBase64}` }, + }); + } + userContent.push({ type: 'text', text: prompt }); + messages.push({ role: 'user', content: userContent }); + } else { + messages.push({ role: 'user', content: prompt }); + } + + const body = { + model, + messages, + stream: true, + stream_options: { include_usage: true }, + }; + + try { + const response = await fetch(`${normalizeBase(baseUrl)}/v1/chat/completions`, { + method: 'POST', + headers: { 'Content-Type': 'application/json', ...authHeaders(bearerToken) }, + body: JSON.stringify(body), + signal: options.signal, + }); + + if (!response.ok) { + const errText = await response.text(); + throw new Error(`Ollama error ${response.status}: ${errText}`); + } + + const reader = response.body?.getReader(); + if (!reader) throw new Error('No response body'); + + const decoder = new TextDecoder(); + let fullText = ''; + let buffer = ''; + let inputTokens = 0; + let outputTokens = 0; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split('\n'); + buffer = lines.pop() ?? ''; + for (const line of lines) { + if (!line.startsWith('data: ')) continue; + const data = line.slice(6); + if (data === '[DONE]') continue; + try { + const event = JSON.parse(data); + const chunk = event.choices?.[0]?.delta?.content; + if (typeof chunk === 'string' && chunk.length > 0) { + fullText += chunk; + callbacks.onChunk(chunk); + } + if (event.usage) { + inputTokens = event.usage.prompt_tokens ?? 0; + outputTokens = event.usage.completion_tokens ?? 0; + } + } catch { /* skip malformed SSE lines */ } + } + } + + callbacks.onComplete(fullText, { inputTokens, outputTokens }); + } catch (err) { + if (err instanceof Error && (err.name === 'AbortError' || options.signal?.aborted)) return; + callbacks.onError(err instanceof Error ? err : new Error(String(err))); + } + } +} diff --git a/src/main/services/settings-store.ts b/src/main/services/settings-store.ts index 544d592..8acbf97 100644 --- a/src/main/services/settings-store.ts +++ b/src/main/services/settings-store.ts @@ -12,6 +12,7 @@ import type { ReplyTone, StreamVisibility, StreamWindowBounds, + LocalConnection, } from '../../shared/types'; /** @@ -40,6 +41,8 @@ export interface StoredSettings { streamVisibility: StreamVisibility; streamWindowBounds: StreamWindowBounds | null; + localConnections: LocalConnection[]; + onboardingComplete: boolean; } @@ -64,6 +67,8 @@ const DEFAULTS: StoredSettings = { streamVisibility: 'off', streamWindowBounds: null, + localConnections: [], + onboardingComplete: false, }; diff --git a/src/preload/index.ts b/src/preload/index.ts index 78a6ef0..94fb8a5 100644 --- a/src/preload/index.ts +++ b/src/preload/index.ts @@ -16,7 +16,11 @@ import type { ChatEntry, StreamVisibility, StreamWindowBounds, + LocalConnection, + OllamaModelInfo, + OllamaPullProgress, } from '../shared/types'; +import type { OllamaTestResult } from '../main/services/ollama-api'; const api = { // ── Settings ─────────────────────────────────────────────────────── @@ -68,6 +72,47 @@ const api = { getChatHistory: (): Promise => ipcRenderer.invoke(IPC.GET_CHAT_HISTORY), clearChatHistory: (): void => ipcRenderer.send(IPC.CLEAR_CHAT_HISTORY), + // ── Local Connections ───────────────────────────────────────────────── + getLocalConnections: (): Promise => + ipcRenderer.invoke(IPC.GET_LOCAL_CONNECTIONS), + addLocalConnection: (conn: Omit): Promise => + ipcRenderer.invoke(IPC.ADD_LOCAL_CONNECTION, conn), + updateLocalConnection: (id: string, patch: Partial): Promise => + ipcRenderer.invoke(IPC.UPDATE_LOCAL_CONNECTION, id, patch), + deleteLocalConnection: (id: string): Promise => + ipcRenderer.invoke(IPC.DELETE_LOCAL_CONNECTION, id), + testLocalConnection: (url: string, bearerToken?: string): Promise => + ipcRenderer.invoke(IPC.TEST_LOCAL_CONNECTION, url, bearerToken), + getOllamaModels: (url: string, bearerToken?: string): Promise => + ipcRenderer.invoke(IPC.GET_OLLAMA_MODELS, url, bearerToken), + setLocalConnectionKey: (id: string, token: string): Promise => + ipcRenderer.invoke(IPC.SET_LOCAL_CONNECTION_KEY, id, token), + deleteLocalConnectionKey: (id: string): Promise => + ipcRenderer.invoke(IPC.DELETE_LOCAL_CONNECTION_KEY, id), + + // ── Ollama Model Management ─────────────────────────────────────────── + pullOllamaModel: (url: string, modelTag: string, bearerToken?: string): void => + ipcRenderer.send(IPC.PULL_OLLAMA_MODEL, url, modelTag, bearerToken), + onOllamaPullProgress: (cb: (p: OllamaPullProgress) => void) => { + const handler = (_e: Electron.IpcRendererEvent, p: OllamaPullProgress) => cb(p); + ipcRenderer.on(IPC.OLLAMA_PULL_PROGRESS, handler); + return () => ipcRenderer.removeListener(IPC.OLLAMA_PULL_PROGRESS, handler); + }, + onOllamaPullComplete: (cb: (info: { model: string }) => void) => { + const handler = (_e: Electron.IpcRendererEvent, info: { model: string }) => cb(info); + ipcRenderer.on(IPC.OLLAMA_PULL_COMPLETE, handler); + return () => ipcRenderer.removeListener(IPC.OLLAMA_PULL_COMPLETE, handler); + }, + onOllamaPullError: (cb: (info: { error: string }) => void) => { + const handler = (_e: Electron.IpcRendererEvent, info: { error: string }) => cb(info); + ipcRenderer.on(IPC.OLLAMA_PULL_ERROR, handler); + return () => ipcRenderer.removeListener(IPC.OLLAMA_PULL_ERROR, handler); + }, + deleteOllamaModel: (url: string, modelName: string, bearerToken?: string): Promise => + ipcRenderer.invoke(IPC.DELETE_OLLAMA_MODEL, url, modelName, bearerToken), + createOllamaModel: (url: string, modelTag: string, modelfileJson: string, bearerToken?: string): Promise => + ipcRenderer.invoke(IPC.CREATE_OLLAMA_MODEL, url, modelTag, modelfileJson, bearerToken), + // ── Lifecycle ────────────────────────────────────────────────────── openExternal: (url: string): void => ipcRenderer.send(IPC.OPEN_EXTERNAL, url), quit: (): void => ipcRenderer.send(IPC.QUIT_APP), diff --git a/src/renderer/components/panel/AddConnectionModal.tsx b/src/renderer/components/panel/AddConnectionModal.tsx new file mode 100644 index 0000000..0d8d327 --- /dev/null +++ b/src/renderer/components/panel/AddConnectionModal.tsx @@ -0,0 +1,272 @@ +import { useState, useEffect } from 'react'; +import type { LocalConnection } from '../../../shared/types'; +import type { OllamaTestResult } from '../../../main/services/ollama-api'; + +interface AddConnectionModalProps { + existing?: LocalConnection; + onSave: (conn: LocalConnection) => void; + onClose: () => void; + onDelete?: (conn: LocalConnection) => void; +} + +type VerifyState = 'idle' | 'pending' | 'ok' | 'error'; + +export function AddConnectionModal({ existing, onSave, onClose, onDelete }: AddConnectionModalProps) { + const [connType, setConnType] = useState<'local' | 'external'>(existing?.type ?? 'local'); + const [url, setUrl] = useState(existing?.url ?? ''); + const [bearerEnabled, setBearerEnabled] = useState(existing?.bearerEnabled ?? false); + const [bearerToken, setBearerToken] = useState(''); + const [bearerVisible, setBearerVisible] = useState(false); + const [prefixId, setPrefixId] = useState(existing?.prefixId ?? ''); + const [modelInput, setModelInput] = useState(''); + const [modelIds, setModelIds] = useState(existing?.modelIds ?? []); + const [tagInput, setTagInput] = useState(''); + const [tags, setTags] = useState(existing?.tags ?? []); + const [verifyState, setVerifyState] = useState('idle'); + const [verifyResult, setVerifyResult] = useState(null); + + // Pre-fill default URL for local type + useEffect(() => { + if (!existing && connType === 'local' && !url) { + setUrl('http://localhost:11434'); + } + }, [connType]); + + const handleVerify = async () => { + if (!url.trim()) return; + setVerifyState('pending'); + setVerifyResult(null); + try { + const token = bearerEnabled && bearerToken.trim() ? bearerToken.trim() : undefined; + const result = await window.flicky.testLocalConnection(url.trim(), token); + setVerifyResult(result); + setVerifyState(result.ok ? 'ok' : 'error'); + } catch { + setVerifyState('error'); + setVerifyResult({ ok: false, latencyMs: 0, error: 'Unexpected error' }); + } + }; + + const addModelId = () => { + const v = modelInput.trim(); + if (v && !modelIds.includes(v)) setModelIds((prev) => [...prev, v]); + setModelInput(''); + }; + + const removeModelId = (id: string) => setModelIds((prev) => prev.filter((m) => m !== id)); + + const addTag = () => { + const v = tagInput.trim(); + if (v && !tags.includes(v)) setTags((prev) => [...prev, v]); + setTagInput(''); + }; + + const removeTag = (t: string) => setTags((prev) => prev.filter((x) => x !== t)); + + const handleSave = async () => { + const trimmedUrl = url.trim(); + if (!trimmedUrl) return; + + const conn: LocalConnection = { + id: existing?.id ?? '', + type: connType, + url: trimmedUrl, + enabled: existing?.enabled ?? true, + bearerEnabled, + prefixId: prefixId.trim() || undefined, + modelIds, + tags, + }; + + if (existing?.id) { + await window.flicky.updateLocalConnection(existing.id, conn); + if (bearerEnabled && bearerToken.trim()) { + await window.flicky.setLocalConnectionKey(existing.id, bearerToken.trim()); + } else if (!bearerEnabled) { + await window.flicky.deleteLocalConnectionKey(existing.id); + } + onSave({ ...conn, id: existing.id }); + } else { + const saved = await window.flicky.addLocalConnection(conn); + if (bearerEnabled && bearerToken.trim()) { + await window.flicky.setLocalConnectionKey(saved.id, bearerToken.trim()); + } + onSave(saved); + } + }; + + const canSave = url.trim().length > 0; + + return ( +
+
e.stopPropagation()}> +
+ {existing ? 'Edit Connection' : 'Add Connection'} + +
+ + {/* Connection Type */} +
+
Connection Type
+
+ + +
+
+ + {/* URL */} +
+
URL
+
+ { setUrl(e.target.value); setVerifyState('idle'); }} + /> + +
+ {verifyResult && ( +
+ {verifyResult.ok + ? `Connected · ${verifyResult.latencyMs}ms${verifyResult.modelCount !== undefined ? ` · ${verifyResult.modelCount} model${verifyResult.modelCount !== 1 ? 's' : ''} available` : ''}` + : `Failed: ${verifyResult.error ?? 'unknown error'}`} +
+ )} +
+ + {/* Auth */} +
+
Auth
+
+
+ + +
+ {bearerEnabled && ( + <> + setBearerToken(e.target.value)} + /> + + + )} +
+
+ + {/* Prefix ID */} +
+
Prefix ID
+ setPrefixId(e.target.value)} + /> +
Prepended to model names at inference time.
+
+ + {/* Model IDs */} +
+
Model IDs
+
+ Leave empty to include all models from the /api/tags endpoint. +
+ {modelIds.length > 0 && ( +
+ {modelIds.map((m) => ( + + {m} + + + ))} +
+ )} +
+ setModelInput(e.target.value)} + onKeyDown={(e) => { if (e.key === 'Enter') addModelId(); }} + /> + +
+
+ + {/* Tags */} +
+
Tags
+ {tags.length > 0 && ( +
+ {tags.map((t) => ( + + {t} + + + ))} +
+ )} + setTagInput(e.target.value)} + onKeyDown={(e) => { if (e.key === 'Enter') addTag(); }} + /> +
+ +
+ {existing && onDelete && ( + + )} +
+ + +
+
+
+
+ ); +} diff --git a/src/renderer/components/panel/ConnectionRow.tsx b/src/renderer/components/panel/ConnectionRow.tsx new file mode 100644 index 0000000..53f662b --- /dev/null +++ b/src/renderer/components/panel/ConnectionRow.tsx @@ -0,0 +1,37 @@ +import type { LocalConnection } from '../../../shared/types'; + +interface ConnectionRowProps { + conn: LocalConnection; + onManage: (conn: LocalConnection) => void; + onConfigure: (conn: LocalConnection) => void; + onToggle: (conn: LocalConnection) => void; +} + +export function ConnectionRow({ conn, onManage, onConfigure, onToggle }: ConnectionRowProps) { + const displayUrl = conn.label ?? conn.url; + const activeModel = conn.activeModelId; + + return ( +
+
+
{displayUrl}
+ {activeModel && ( +
{activeModel}
+ )} +
+
+ + +
+
+ ); +} diff --git a/src/renderer/components/panel/MindTab.tsx b/src/renderer/components/panel/MindTab.tsx index d61797e..3c01557 100644 --- a/src/renderer/components/panel/MindTab.tsx +++ b/src/renderer/components/panel/MindTab.tsx @@ -8,6 +8,7 @@ import type { ReplyTone, } from '../../../shared/types'; import { ProviderKey } from './ProviderKey'; +import { OllamaSection } from './OllamaSection'; interface MindTabProps { settings: FlickySettings; @@ -56,10 +57,17 @@ const OPENAI_MODELS: Array> = [ export function MindTab({ settings }: MindTabProps) { const [providerOpen, setProviderOpen] = useState(false); - const isAnthropic = settings.mindProvider === 'anthropic'; + const provider = settings.mindProvider; + const isAnthropic = provider === 'anthropic'; + const isOpenAI = provider === 'openai'; + const isOllama = provider === 'ollama'; const setTone = (t: ReplyTone) => window.flicky.setReplyTone(t); const setDepth = (d: ReasoningDepth) => window.flicky.setReasoningDepth(d); + const providerLabel = isAnthropic ? 'Anthropic' : isOpenAI ? 'OpenAI' : 'Local'; + const providerLogoText = isAnthropic ? 'A' : isOpenAI ? 'Ai' : '⬡'; + const providerLogoClass = isAnthropic ? '' : isOpenAI ? 'openai' : 'local'; + return ( <>

@@ -79,40 +87,37 @@ export function MindTab({ settings }: MindTabProps) { className="provider-pick" onClick={() => setProviderOpen((x) => !x)} > -
- {isAnthropic ? 'A' : 'Ai'} -
- {isAnthropic ? 'Anthropic' : 'OpenAI'} +
{providerLogoText}
+ {providerLabel} {providerOpen && (
- - + {( + [ + { id: 'anthropic', label: 'Anthropic', sub: 'Claude Sonnet / Opus · built-in web search' }, + { id: 'openai', label: 'OpenAI', sub: 'GPT-5 · GPT-4o · reasoning effort' }, + { id: 'ollama', label: 'Local', sub: 'Ollama · LM Studio · vLLM · any OpenAI-compatible endpoint' }, + ] as Array<{ id: MindProvider; label: string; sub: string }> + ).map((p) => ( + + ))}
)} - {isAnthropic ? ( + {isAnthropic && ( - ) : ( + )} + {isOpenAI && ( )} -

Powers the reasoning behind every answer.

+ {!isOllama && ( +

Powers the reasoning behind every answer.

+ )} -
-
Model
-
- {isAnthropic - ? CLAUDE_MODELS.map((m) => ( - - )) - : OPENAI_MODELS.map((m) => ( - - ))} -
-
+ {isOllama ? ( + c.enabled) + } + onToggleOllama={(enabled) => { + const conns = settings.localConnections ?? []; + conns.forEach((c) => { + void window.flicky.updateLocalConnection(c.id, { enabled }); + }); + }} + /> + ) : ( + <> +
+
Model
+
+ {isAnthropic + ? CLAUDE_MODELS.map((m) => ( + + )) + : OPENAI_MODELS.map((m) => ( + + ))} +
+
-
-
Reasoning depth
-

- How much Flicky thinks before replying. -

-
- - - -
-
+
+
Reasoning depth
+

+ How much Flicky thinks before replying. +

+
+ + + +
+
+ + )}
Reply tone
diff --git a/src/renderer/components/panel/OllamaManageModal.tsx b/src/renderer/components/panel/OllamaManageModal.tsx new file mode 100644 index 0000000..1608b0e --- /dev/null +++ b/src/renderer/components/panel/OllamaManageModal.tsx @@ -0,0 +1,376 @@ +import { useState, useEffect, useRef } from 'react'; +import type { LocalConnection, OllamaModelInfo, OllamaPullProgress } from '../../../shared/types'; +import { VISION_MODEL_CATALOG } from '../../../shared/vision-models'; + +// Known vision-capable model families (mirrors ollama-api.ts constant). +const VISION_FAMILIES = [ + 'llava', 'bakllava', 'moondream', 'cogvlm', 'minicpm-v', + 'llava-llama3', 'llava-phi3', 'granite3.2-vision', + 'qwen2-vl', 'qwen2.5-vl', 'qwen3-vl', + 'llava-v1.6', 'llava-v1.5', + 'gemma3', 'gemma4', 'mistral-small3.1', 'devstral', +]; + +function isVision(name: string): boolean { + const lower = name.toLowerCase(); + return VISION_FAMILIES.some((f) => lower.includes(f)); +} + +function fmtSize(bytes?: number): string { + if (!bytes) return ''; + const gb = bytes / 1_073_741_824; + return gb >= 1 ? `${gb.toFixed(1)} GB` : `${(bytes / 1_048_576).toFixed(0)} MB`; +} + +interface OllamaManageModalProps { + conn: LocalConnection; + onClose: () => void; + onModelSelected: (modelId: string) => void; +} + +type PullState = 'idle' | 'pulling' | 'done' | 'error'; + +export function OllamaManageModal({ conn, onClose, onModelSelected }: OllamaManageModalProps) { + const [models, setModels] = useState([]); + const [loading, setLoading] = useState(true); + + // Pull + const [pullTag, setPullTag] = useState(''); + const [pullState, setPullState] = useState('idle'); + const [pullProgress, setPullProgress] = useState(null); + const [pullError, setPullError] = useState(''); + const [quickPullTag, setQuickPullTag] = useState(null); + + // Delete + const [deleteTarget, setDeleteTarget] = useState(''); + const [deleteConfirm, setDeleteConfirm] = useState(false); + const [deleting, setDeleting] = useState(false); + + // Create + const [createOpen, setCreateOpen] = useState(false); + const [createTag, setCreateTag] = useState(''); + const [createJson, setCreateJson] = useState('{\n "from": "llama3:8b"\n}'); + const [creating, setCreating] = useState(false); + const [createError, setCreateError] = useState(''); + + const bearerToken = useRef(undefined); + + const getBearer = async (): Promise => { + return bearerToken.current; + }; + + const loadModels = async () => { + setLoading(true); + const list = await window.flicky.getOllamaModels(conn.url, await getBearer()); + setModels(list); + setLoading(false); + }; + + useEffect(() => { void loadModels(); }, [conn.id]); + + // Wire pull progress events + useEffect(() => { + const offProgress = window.flicky.onOllamaPullProgress((p) => setPullProgress(p)); + const offComplete = window.flicky.onOllamaPullComplete(({ model }) => { + setPullState('done'); + setPullTag(''); + void loadModels(); + // Auto-select pulled model + onModelSelected(model); + }); + const offError = window.flicky.onOllamaPullError(({ error }) => { + setPullState('error'); + setPullError(error); + }); + return () => { offProgress(); offComplete(); offError(); }; + }, [conn.id]); + + const handlePull = (tag = pullTag.trim()) => { + if (!tag || pullState === 'pulling') return; + setPullState('pulling'); + setPullProgress(null); + setPullError(''); + window.flicky.pullOllamaModel(conn.url, tag, undefined); + }; + + const handleQuickPull = (tag: string) => { + setQuickPullTag(tag); + setPullTag(tag); + handlePull(tag); + }; + + const handleDelete = async () => { + if (!deleteTarget || deleting) return; + setDeleting(true); + try { + await window.flicky.deleteOllamaModel(conn.url, deleteTarget, undefined); + setDeleteTarget(''); + setDeleteConfirm(false); + if (conn.activeModelId === deleteTarget) { + onModelSelected(''); + } + await loadModels(); + } catch (err) { + console.error('Delete failed:', err); + } finally { + setDeleting(false); + } + }; + + const handleCreate = async () => { + const tag = createTag.trim(); + if (!tag || creating) return; + setCreating(true); + setCreateError(''); + try { + await window.flicky.createOllamaModel(conn.url, tag, createJson, undefined); + setCreateTag(''); + setCreateOpen(false); + await loadModels(); + } catch (err) { + setCreateError(err instanceof Error ? err.message : String(err)); + } finally { + setCreating(false); + } + }; + + const pullPercent = pullProgress?.total + ? Math.round(((pullProgress.completed ?? 0) / pullProgress.total) * 100) + : null; + + return ( +
+
e.stopPropagation()}> +
+ Manage Ollama +
{conn.url}
+ +
+ + {/* ── Installed models ─────────────────────────────────── */} +
+
Installed models
+ {loading ? ( +
Loading…
+ ) : models.length === 0 ? ( +
No models installed. Pull one below.
+ ) : ( +
+ {models.map((m) => { + const vision = isVision(m.name); + const active = conn.activeModelId === m.name; + return ( + + ); + })} +
+ )} +
+ + {/* ── Vision model catalog ─────────────────────────────── */} +
+
Vision-capable models
+
+
+ {VISION_MODEL_CATALOG.map((entry) => { + const installed = models.some((m) => m.name === entry.tag); + const isPulling = pullState === 'pulling' && quickPullTag === entry.tag; + return ( +
+
+
{entry.name}
+
{entry.description}
+
{entry.sizeLabel}
+
+
+ {installed ? ( + installed + ) : ( + + )} + +
+
+ ); + })} +
+
+ {pullState === 'pulling' && quickPullTag && pullProgress && ( +
+
{pullProgress.status}
+ {pullPercent !== null && ( + <> +
+
+
+
{pullPercent}%
+ + )} +
+ )} +
+ + {/* ── Pull a model ─────────────────────────────────────── */} +
+
Pull a model from Ollama.com
+
+ { setPullTag(e.target.value); setPullState('idle'); }} + onKeyDown={(e) => { if (e.key === 'Enter') handlePull(); }} + disabled={pullState === 'pulling'} + /> + +
+ + {pullState === 'pulling' && pullProgress && ( +
+
{pullProgress.status}
+ {pullPercent !== null && ( +
+
+
+ )} + {pullPercent !== null && ( +
{pullPercent}%
+ )} +
+ )} + {pullState === 'done' && ( +
Model pulled and selected.
+ )} + {pullState === 'error' && ( +
Error: {pullError}
+ )} +
+ To browse available models,{' '} + + . +
+
+ + {/* ── Delete a model ───────────────────────────────────── */} +
+
Delete a model
+
+ + {deleteTarget && !deleteConfirm && ( + + )} + {deleteTarget && deleteConfirm && ( + <> + + + + )} +
+
+ + {/* ── Create a model ───────────────────────────────────── */} +
+ + + {createOpen && ( + <> +
+ setCreateTag(e.target.value)} + /> +
+